Compare commits

...

17 Commits

Author SHA1 Message Date
Nikita Gubarkov
02d36310f5 Lower Spir-V target version to match Vulkan 1.2 2023-09-06 15:19:44 +02:00
Nikita Gubarkov
afdfd15c92 Make dynamic rendering optional. 2023-09-06 15:19:44 +02:00
Nikita Gubarkov
bd7e216e4f Lower Vulkan version to 1.2 and use dynamic rendering extension. 2023-09-06 15:19:44 +02:00
Nikita Gubarkov
5ac65d371f Make synchronization2 optional. 2023-09-06 15:19:43 +02:00
Nikita Gubarkov
001e63aa13 Separate method for attaching native surface data to Java object. 2023-09-06 15:19:43 +02:00
Nikita Gubarkov
86bb3a5441 Memory management via VMA, vertex buffer pool, shader push constants. 2023-09-06 15:19:43 +02:00
Nikita Gubarkov
85811e97dd Enable logicOp feature for XOR painting mode. 2023-09-06 15:19:43 +02:00
Nikita Gubarkov
4618419f78 Shader compilation, test pipeline.
Shaders are compiled with glslc or glslangValidator and bytecode is inlined directly into libawt_wlawt
2023-09-06 15:19:43 +02:00
Nikita Gubarkov
634910f86c Split command recording into primary and secondary command buffers.
This allows us to record commands "in the past", before current render pass started, which gives possibility for some heavy optimizations:
1. When we suddenly need some texture in the middle of the render pass - no need to stop render pass in order to insert necessary synchronization - we can do it as if we knew it beforehand.
2. When we draw something and then clear the surface - just erase all commands inside current render pass we recorded earlier, so the actual drawing will never happen.
2023-09-06 15:19:43 +02:00
Nikita Gubarkov
544a6771c6 Each device has a single timeline semaphore (basically 64-bit counter), monotonically increasing as device executes our commands, allowing us to track the state of the submitted batches and reuse resources which are no longer in use. 2023-09-06 15:19:43 +02:00
Nikita Gubarkov
a554ca9888 State management, synchronization & layout transition.
Now using dynamic render passes and synchronization2 from Vulkan 1.3
2023-09-06 15:19:43 +02:00
Nikita Gubarkov
71224a9c95 Refactored native surface data hierarchy.
There was a C-style "inheritance" model with VKSDOps having an SurfaceDataOps as its first member and conversions back and forth between them. And then also privOps - pointer to the platform-specific part (WLVK).

This was refactored into plain inheritance:
SurfaceDataOps -> VKSurfaceData -> VKSwapchainSurfaceData -> WLVKSurfaceData

Also replaced pthread mutexes with std::recursive_mutex to move this abstraction up the hierarchy.
2023-09-06 15:19:43 +02:00
Nikita Gubarkov
dc03522baa Moved platform-independent part of WLVKSurfaceData into VKSwapchainSurfaceData 2023-09-06 15:19:43 +02:00
Nikita Gubarkov
57d46f4f82 Make debug messenger part of graphics configuration 2023-09-06 15:19:43 +02:00
Nikita Gubarkov
0e7602d8e1 Report which device was created 2023-09-06 15:19:43 +02:00
Nikita Gubarkov
ebba8f1aba Fixed validation errors (only). Added basic synchronization.
Merged physical and logical device into single entity.
Other refactoring.
2023-09-06 15:19:42 +02:00
Nikita Gubarkov
aa1d84f9e1 Get rid of maxTextureSize in Vulkan code.
This concept was introduced to fix macOS-specific bugs and don't map well to Vulkan implementation, as this value is tied to specific device and texture format, so get rid of it for now and see whether we need it at all.
2023-09-06 15:19:42 +02:00
32 changed files with 25668 additions and 582 deletions

View File

@@ -99,6 +99,9 @@ AC_DEFUN_ONCE([LIB_SETUP_WAYLAND],
AC_ARG_WITH(vulkan-include, [AS_HELP_STRING([--with-vulkan-include],
[specify directory for the vulkan include files])])
AC_ARG_WITH(vulkan-shader-compiler, [AS_HELP_STRING([--with-vulkan-shader-compiler],
[specify which shader compiler to use: glslc/glslangValidator])])
if test "x$SUPPORTS_LIB_VULKAN" = xfalse; then
if (test "x${with_vulkan}" != x && test "x${with_vulkan}" != xno) || \
@@ -120,7 +123,6 @@ AC_DEFUN_ONCE([LIB_SETUP_WAYLAND],
AC_CHECK_HEADERS([${with_vulkan_include}/include/vulkan/vulkan.h],
[ VULKAN_FOUND=yes
VULKAN_FLAGS="-DVK_USE_PLATFORM_WAYLAND_KHR -I${with_vulkan_include}/include -DVULKAN_ENABLED"
VULKAN_ENABLED=true
],
[ AC_MSG_ERROR([Can't find 'vulkan/vulkan.h' under '${with_vulkan_include}']) ]
)
@@ -131,7 +133,6 @@ AC_DEFUN_ONCE([LIB_SETUP_WAYLAND],
AC_CHECK_HEADERS([$VULKAN_SDK/include/vulkan/vulkan.h],
[ VULKAN_FOUND=yes
VULKAN_FLAGS="-DVK_USE_PLATFORM_WAYLAND_KHR -I${VULKAN_SDK}/include -DVULKAN_ENABLED"
VULKAN_ENABLED=true
],
[ VULKAN_FOUND=no; break ]
)
@@ -142,7 +143,6 @@ AC_DEFUN_ONCE([LIB_SETUP_WAYLAND],
AC_CHECK_HEADERS([vulkan/vulkan.h],
[ VULKAN_FOUND=yes
VULKAN_FLAGS="-DVK_USE_PLATFORM_WAYLAND_KHR -DVULKAN_ENABLED"
VULKAN_ENABLED=true
],
[ VULKAN_FOUND=no; break ]
)
@@ -151,11 +151,32 @@ AC_DEFUN_ONCE([LIB_SETUP_WAYLAND],
if test "x$VULKAN_FOUND" = xno; then
HELP_MSG_MISSING_DEPENDENCY([vulkan])
AC_MSG_ERROR([Could not find vulkan! $HELP_MSG ])
else
# Find shader compiler - glslc or glslangValidator
if (test "x${with_vulkan_shader_compiler}" = x || test "x${with_vulkan_shader_compiler}" = xglslc); then
UTIL_LOOKUP_PROGS(GLSLC, glslc)
SHADER_COMPILER="$GLSLC"
VULKAN_SHADER_COMPILER="glslc --target-env=vulkan1.2 -mfmt=num -o"
fi
if (test "x${with_vulkan_shader_compiler}" = x || test "x${with_vulkan_shader_compiler}" = xglslangValidator) && \
test "x$SHADER_COMPILER" = x; then
UTIL_LOOKUP_PROGS(GLSLANG, glslangValidator)
SHADER_COMPILER="$GLSLANG"
VULKAN_SHADER_COMPILER="glslangValidator --target-env vulkan1.2 -x -o"
fi
if test "x$SHADER_COMPILER" != x; then
VULKAN_ENABLED=true
else
AC_MSG_ERROR([Can't find shader compiler])
fi
fi
fi
fi
fi
AC_SUBST(VULKAN_FLAGS)
AC_SUBST(VULKAN_SHADER_COMPILER)
AC_SUBST(VULKAN_ENABLED)
AC_SUBST(WAYLAND_CFLAGS)
AC_SUBST(WAYLAND_LIBS)

View File

@@ -482,6 +482,7 @@ A11Y_JAWS_ANNOUNCING_ENABLED:=@A11Y_JAWS_ANNOUNCING_ENABLED@
WAYLAND_CFLAGS:=@WAYLAND_CFLAGS@
WAYLAND_LIBS:=@WAYLAND_LIBS@
VULKAN_FLAGS:=@VULKAN_FLAGS@
VULKAN_SHADER_COMPILER:=@VULKAN_SHADER_COMPILER@
VULKAN_ENABLED:=@VULKAN_ENABLED@
# The lowest required version of macosx

View File

@@ -219,6 +219,34 @@ ifeq ($(call isTargetOs, windows)+$(ENABLE_HEADLESS_ONLY)+$(A11Y_NVDA_ANNOUNCING
TARGETS += $(COPY_NVDA_DEPENDENCIES)
endif
# Compile Vulkan shaders
define compile-spirv
$(call MakeTargetDir)
$(VULKAN_SHADER_COMPILER) '$(call DecodeSpace, $@)' '$(call DecodeSpace, $<)'
endef
spirv-name = $(strip $1).h
ifeq ($(VULKAN_ENABLED), true)
$(eval $(call SetupCopyFiles, COMPILE_VULKAN_SHADERS, \
SRC := $(TOPDIR)/src/$(MODULE)/share/glsl/vulkan, \
FILES := $(call FindFiles, $(TOPDIR)/src/$(MODULE)/share/glsl/vulkan), \
DEST := $(SUPPORT_OUTPUTDIR)/headers/java.desktop/vulkan/spirv, \
MACRO := compile-spirv, \
NAME_MACRO := spirv-name, \
))
VULKAN_SHADER_LIST = $(SUPPORT_OUTPUTDIR)/headers/java.desktop/vulkan/shader_list.h
$(VULKAN_SHADER_LIST): $(COMPILE_VULKAN_SHADERS)
> $(VULKAN_SHADER_LIST) $(NEWLINE) \
$(foreach f, $(patsubst $(TOPDIR)/src/$(MODULE)/share/glsl/vulkan/%,%,$(call FindFiles, $(TOPDIR)/src/$(MODULE)/share/glsl/vulkan)), \
$(ECHO) SHADER_ENTRY\($(subst .,$(COMMA),$(subst /,_,$f))\) >> $(VULKAN_SHADER_LIST) $(NEWLINE) \
$(ECHO) '#ifdef INCLUDE_BYTECODE' >> $(VULKAN_SHADER_LIST) $(NEWLINE) \
$(ECHO) '#include "spirv/$f.h"' >> $(VULKAN_SHADER_LIST) $(NEWLINE) \
$(ECHO) BYTECODE_END >> $(VULKAN_SHADER_LIST) $(NEWLINE) \
$(ECHO) '#endif' >> $(VULKAN_SHADER_LIST) $(NEWLINE) \
)
$(BUILD_LIBAWT): $(VULKAN_SHADER_LIST)
endif
TARGETS += $(BUILD_LIBAWT)
################################################################################
@@ -354,6 +382,7 @@ ifeq ($(call isTargetOs, windows macosx), false)
common/font \
common/java2d/wl \
common/java2d/vulkan \
libvmahpp \
#
# Enable 'wakefield' extension for java.awt.Robot support
@@ -401,6 +430,8 @@ ifeq ($(call isTargetOs, windows macosx), false)
DISABLED_WARNINGS_CXX_gcc := undef, \
DISABLED_WARNINGS_clang := parentheses format undef \
logical-op-parentheses format-nonliteral int-conversion, \
DISABLED_WARNINGS_gcc_VKMemory.cpp := missing-field-initializers implicit-fallthrough parentheses, \
DISABLED_WARNINGS_clang_VKMemory.cpp := missing-field-initializers implicit-fallthrough parentheses, \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN) \
-L$(INSTALL_LIBRARIES_HERE), \

View File

@@ -0,0 +1,8 @@
#version 450
layout(location = 0) in vec4 inColor;
layout(location = 0) out vec4 outColor;
void main() {
outColor = inColor;
}

View File

@@ -0,0 +1,21 @@
#version 450
layout(push_constant) uniform Push {
vec2 invViewport2; // 2.0/viewport
} push;
vec4 colors[3] = vec4[](
vec4(1,0,0,1),
vec4(0,1,0,1),
vec4(0,0,1,1)
);
layout(location = 0) in vec2 inPosition;
layout(location = 0) out vec4 outColor;
void main() {
outColor = colors[gl_VertexIndex % 3];
gl_Position = vec4(inPosition * push.invViewport2 - 1.0, 0.0, 1.0);
gl_PointSize = 1.0f;
}

View File

@@ -27,16 +27,18 @@
#include "VKBase.h"
#include <Trace.h>
#include <set>
#if defined(DEBUG)
#include <csignal>
#endif
#define VALIDATION_LAYER_NAME "VK_LAYER_KHRONOS_validation"
static const uint32_t REQUIRED_VULKAN_VERSION = VK_MAKE_API_VERSION(0, 1, 0, 0);
static const uint32_t REQUIRED_VULKAN_VERSION = VK_MAKE_API_VERSION(0, 1, 2, 0);
std::unique_ptr<VKGraphicsEnvironment> VKGraphicsEnvironment::_ge_instance = nullptr;
// ========== Vulkan instance ==========
// ========== Graphics environment ==========
#if defined(DEBUG)
static vk::raii::DebugUtilsMessengerEXT debugMessenger = nullptr;
static VkBool32 debugCallback(
VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
@@ -57,127 +59,13 @@ static VkBool32 debugCallback(
J2dRlsTraceLn(level, data->pMessage);
// TODO if (messageSeverity == VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) ABORT?
if (messageSeverity == VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) {
raise(SIGABRT);
}
return 0;
}
#endif
// ========== Vulkan device ==========
#if defined(VK_USE_PLATFORM_WAYLAND_KHR)
extern struct wl_display *wl_display;
#endif
class PhysicalDevice : vk::raii::PhysicalDevice {
friend class VKDevice;
bool _supported = false;
std::vector<const char*> _enabled_layers, _enabled_extensions;
int _queue_family = -1;
public:
int queue_family() const {
return _queue_family;
}
std::vector<const char *> & enabled_layers() {
return _enabled_layers;
}
std::vector<const char *> & enabled_extensions() {
return _enabled_extensions;
}
explicit PhysicalDevice(vk::raii::PhysicalDevice&& handle) : vk::raii::PhysicalDevice(std::move(handle)) {
const auto& properties = getProperties();
const auto& queueFamilies = getQueueFamilyProperties();
J2dRlsTrace5(J2D_TRACE_INFO, "Vulkan: Found device %s (%d.%d.%d, %s)\n",
(const char*) properties.deviceName,
VK_API_VERSION_MAJOR(properties.apiVersion),
VK_API_VERSION_MINOR(properties.apiVersion),
VK_API_VERSION_PATCH(properties.apiVersion),
vk::to_string(properties.deviceType).c_str());
if (properties.apiVersion < REQUIRED_VULKAN_VERSION) {
J2dRlsTrace(J2D_TRACE_INFO, " Unsupported Vulkan version\n");
return;
}
// Check supported queue families.
for (unsigned int i = 0; i < queueFamilies.size(); i++) {
const auto& family = queueFamilies[i];
#if defined(VK_USE_PLATFORM_WAYLAND_KHR)
bool presentationSupported = getWaylandPresentationSupportKHR(i, *wl_display);
#endif
char logFlags[5] {
family.queueFlags & vk::QueueFlagBits::eGraphics ? 'G' : '-',
family.queueFlags & vk::QueueFlagBits::eCompute ? 'C' : '-',
family.queueFlags & vk::QueueFlagBits::eTransfer ? 'T' : '-',
family.queueFlags & vk::QueueFlagBits::eSparseBinding ? 'S' : '-',
presentationSupported ? 'P' : '-'
};
J2dRlsTrace3(J2D_TRACE_INFO, " %d queues in family (%.*s)\n", family.queueCount, 5, logFlags);
// TODO use compute workloads? Separate transfer-only DMA queue?
if (_queue_family == -1 && (family.queueFlags & vk::QueueFlagBits::eGraphics) && presentationSupported) {
_queue_family = i;
}
}
if (_queue_family == -1) {
J2dRlsTrace(J2D_TRACE_INFO, " No suitable queue\n");
return;
}
// Populate maps and log supported layers & extensions.
std::set<std::string> layers, extensions;
J2dRlsTrace(J2D_TRACE_VERBOSE, " Supported device layers:\n");
for (auto& l : enumerateDeviceLayerProperties()) {
J2dRlsTrace1(J2D_TRACE_VERBOSE, " %s\n", (char*) l.layerName);
layers.emplace((char*) l.layerName);
}
J2dRlsTrace(J2D_TRACE_VERBOSE, " Supported device extensions:\n");
for (auto& e : enumerateDeviceExtensionProperties(nullptr)) {
J2dRlsTrace1(J2D_TRACE_VERBOSE, " %s\n", (char*) e.extensionName);
extensions.emplace((char*) e.extensionName);
}
// Check required layers & extensions.
_enabled_extensions.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
bool requiredNotFound = false;
for (auto e : _enabled_extensions) {
if (extensions.find(e) == extensions.end()) {
J2dRlsTrace1(J2D_TRACE_INFO, " Required device extension not supported: %s\n", (char*) e);
requiredNotFound = true;
}
}
if (requiredNotFound) return;
// Validation layer
#ifdef DEBUG
if (layers.find(VALIDATION_LAYER_NAME) != layers.end()) {
_enabled_layers.push_back(VALIDATION_LAYER_NAME);
} else {
J2dRlsTrace1(J2D_TRACE_INFO, " %s device layer is not supported\n", VALIDATION_LAYER_NAME);
}
#endif
// This device is supported
_supported = true;
}
operator bool() const {
vk::PhysicalDevice handle = **this;
return handle && _supported;
}
uint32_t getMaxImageDimension2D() {
const auto& properties = getProperties();
return properties.limits.maxImageDimension2D;
}
};
VKGraphicsEnvironment *VKGraphicsEnvironment::graphics_environment() {
if (!_ge_instance) {
try {
@@ -192,7 +80,7 @@ VKGraphicsEnvironment *VKGraphicsEnvironment::graphics_environment() {
}
VKGraphicsEnvironment::VKGraphicsEnvironment() :
_vk_context(), _vk_instance(nullptr), _default_device(-1) {
_vk_context(), _vk_instance(nullptr), _default_device(nullptr) {
// Load library.
uint32_t version = _vk_context.enumerateInstanceVersion();
J2dRlsTrace3(J2D_TRACE_INFO, "Vulkan: Available (%d.%d.%d)\n",
@@ -231,9 +119,9 @@ VKGraphicsEnvironment::VKGraphicsEnvironment() :
}
// Configure validation
#ifdef DEBUG
std::array<vk::ValidationFeatureEnableEXT, 4> enabledValidationFeatures = {
vk::ValidationFeatureEnableEXT::eGpuAssisted,
vk::ValidationFeatureEnableEXT::eGpuAssistedReserveBindingSlot,
std::array<vk::ValidationFeatureEnableEXT, 2> enabledValidationFeatures = {
// vk::ValidationFeatureEnableEXT::eGpuAssisted, // TODO GPU assisted validation is available only from Vulkan 1.1
// vk::ValidationFeatureEnableEXT::eGpuAssistedReserveBindingSlot,
vk::ValidationFeatureEnableEXT::eBestPractices,
vk::ValidationFeatureEnableEXT::eSynchronizationValidation
};
@@ -272,7 +160,7 @@ VKGraphicsEnvironment::VKGraphicsEnvironment() :
// Create debug messenger
#if defined(DEBUG)
if (pNext) {
debugMessenger = vk::raii::DebugUtilsMessengerEXT(_vk_instance, vk::DebugUtilsMessengerCreateInfoEXT {
_debugMessenger = vk::raii::DebugUtilsMessengerEXT(_vk_instance, vk::DebugUtilsMessengerCreateInfoEXT {
/*flags*/ {},
/*messageSeverity*/ vk::DebugUtilsMessageSeverityFlagBitsEXT::eError |
vk::DebugUtilsMessageSeverityFlagBitsEXT::eWarning |
@@ -289,26 +177,21 @@ VKGraphicsEnvironment::VKGraphicsEnvironment() :
// Find suitable devices.
for (auto &handle: _vk_instance.enumeratePhysicalDevices()) {
PhysicalDevice physicalDevice{std::move(handle)};
if (physicalDevice) { // Supported.
_physical_devices.push_back(std::move(physicalDevice));
VKDevice device {*_vk_instance, std::move(handle)};
if (device.supported()) {
_devices.push_back(std::make_unique<VKDevice>(std::move(device)));
}
}
if (_physical_devices.empty()) {
if (_devices.empty()) {
throw std::runtime_error("Vulkan: No suitable device found");
}
// Create virtual device for a physical device.
// TODO system property for manual choice of GPU
// TODO integrated/discrete presets
// TODO performance/power saving mode switch on the fly?
_default_physical_device = 0; // TODO pick first just to check hat virtual device creation works
_devices.push_back(std::move(VKDevice{_physical_devices[_default_physical_device]}));
_default_device = 0;
}
uint32_t VKGraphicsEnvironment::max_texture_size() {
return _physical_devices[_default_physical_device].getMaxImageDimension2D();
_default_device = &*_devices[0]; // TODO pick first just to check hat virtual device creation works
_default_device->init();
}
vk::raii::Instance& VKGraphicsEnvironment::vk_instance() {
@@ -320,31 +203,215 @@ void VKGraphicsEnvironment::dispose() {
}
VKDevice& VKGraphicsEnvironment::default_device() {
return _devices[_default_device];
return *_default_device;
}
// ========== Vulkan device ==========
VKDevice::VKDevice(PhysicalDevice& physicalDevice) : vk::raii::Device(nullptr), _command_pool(nullptr) {
#if defined(VK_USE_PLATFORM_WAYLAND_KHR)
extern struct wl_display *wl_display;
#endif
VKDevice::VKDevice(vk::Instance instance, vk::raii::PhysicalDevice&& handle) :
vk::raii::Device(nullptr), vk::raii::PhysicalDevice(nullptr), _instance(instance) {
auto featuresChain = handle.getFeatures2<vk::PhysicalDeviceFeatures2,
vk::PhysicalDeviceVulkan11Features,
vk::PhysicalDeviceVulkan12Features>();
const auto& features10 = featuresChain.get<vk::PhysicalDeviceFeatures2>().features;
const auto& features11 = featuresChain.get<vk::PhysicalDeviceVulkan11Features>();
const auto& features12 = featuresChain.get<vk::PhysicalDeviceVulkan12Features>();
auto propertiesChain = handle.getProperties2<vk::PhysicalDeviceProperties2,
vk::PhysicalDeviceVulkan11Properties,
vk::PhysicalDeviceVulkan12Properties>();
const auto& properties10 = propertiesChain.get<vk::PhysicalDeviceProperties2>().properties;
const auto& properties11 = propertiesChain.get<vk::PhysicalDeviceVulkan11Properties>();
const auto& properties12 = propertiesChain.get<vk::PhysicalDeviceVulkan12Properties>();
const auto& queueFamilies = handle.getQueueFamilyProperties();
_name = (const char*) properties10.deviceName;
J2dRlsTrace5(J2D_TRACE_INFO, "Vulkan: Found device %s (%d.%d.%d, %s)\n",
(const char*) properties10.deviceName,
VK_API_VERSION_MAJOR(properties10.apiVersion),
VK_API_VERSION_MINOR(properties10.apiVersion),
VK_API_VERSION_PATCH(properties10.apiVersion),
vk::to_string(properties10.deviceType).c_str());
// Check API version.
if (properties10.apiVersion < REQUIRED_VULKAN_VERSION) {
J2dRlsTrace(J2D_TRACE_INFO, " Unsupported Vulkan version\n");
return;
}
// Check supported features.
if (!features10.logicOp) {
J2dRlsTrace(J2D_TRACE_INFO, " Logic op not supported\n");
return;
}
if (!features12.timelineSemaphore) {
J2dRlsTrace(J2D_TRACE_INFO, " Timeline semaphore not supported\n");
return;
}
// Check supported queue families.
for (unsigned int i = 0; i < queueFamilies.size(); i++) {
const auto& family = queueFamilies[i];
#if defined(VK_USE_PLATFORM_WAYLAND_KHR)
bool presentationSupported = handle.getWaylandPresentationSupportKHR(i, *wl_display);
#endif
char logFlags[5] {
family.queueFlags & vk::QueueFlagBits::eGraphics ? 'G' : '-',
family.queueFlags & vk::QueueFlagBits::eCompute ? 'C' : '-',
family.queueFlags & vk::QueueFlagBits::eTransfer ? 'T' : '-',
family.queueFlags & vk::QueueFlagBits::eSparseBinding ? 'S' : '-',
presentationSupported ? 'P' : '-'
};
J2dRlsTrace3(J2D_TRACE_INFO, " %d queues in family (%.*s)\n", family.queueCount, 5, logFlags);
// TODO use compute workloads? Separate transfer-only DMA queue?
if (_queue_family == -1 && (family.queueFlags & vk::QueueFlagBits::eGraphics) && presentationSupported) {
_queue_family = i;
}
}
if (_queue_family == -1) {
J2dRlsTrace(J2D_TRACE_INFO, " No suitable queue\n");
return;
}
// Populate maps and log supported layers & extensions.
std::set<std::string> layers, extensions;
J2dRlsTrace(J2D_TRACE_VERBOSE, " Supported device layers:\n");
for (auto& l : handle.enumerateDeviceLayerProperties()) {
J2dRlsTrace1(J2D_TRACE_VERBOSE, " %s\n", (char*) l.layerName);
layers.emplace((char*) l.layerName);
}
J2dRlsTrace(J2D_TRACE_VERBOSE, " Supported device extensions:\n");
for (auto& e : handle.enumerateDeviceExtensionProperties(nullptr)) {
J2dRlsTrace1(J2D_TRACE_VERBOSE, " %s\n", (char*) e.extensionName);
extensions.emplace((char*) e.extensionName);
}
// Check required layers & extensions.
_enabled_extensions.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
bool requiredNotFound = false;
for (auto e : _enabled_extensions) {
if (extensions.find(e) == extensions.end()) {
J2dRlsTrace1(J2D_TRACE_INFO, " Required device extension not supported: %s\n", (char*) e);
requiredNotFound = true;
}
}
if (requiredNotFound) return;
_ext_memory_budget = extensions.find(VK_EXT_MEMORY_BUDGET_EXTENSION_NAME) != extensions.end();
if (_ext_memory_budget) _enabled_extensions.push_back(VK_EXT_MEMORY_BUDGET_EXTENSION_NAME);
_khr_synchronization2 = extensions.find(VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME) != extensions.end();
if (_khr_synchronization2) _enabled_extensions.push_back(VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME);
_khr_dynamic_rendering = extensions.find(VK_KHR_DYNAMIC_RENDERING_EXTENSION_NAME) != extensions.end();
if (_khr_dynamic_rendering) _enabled_extensions.push_back(VK_KHR_DYNAMIC_RENDERING_EXTENSION_NAME);
// Validation layer
#ifdef DEBUG
if (layers.find(VALIDATION_LAYER_NAME) != layers.end()) {
_enabled_layers.push_back(VALIDATION_LAYER_NAME);
} else {
J2dRlsTrace1(J2D_TRACE_INFO, " %s device layer is not supported\n", VALIDATION_LAYER_NAME);
}
#endif
// This device is supported
((vk::raii::PhysicalDevice&) *this) = std::move(handle);
}
void VKDevice::init() {
float queuePriorities[1] {1.0f}; // We only use one queue for now
std::vector<vk::DeviceQueueCreateInfo> queueCreateInfos;
queueCreateInfos.push_back(vk::DeviceQueueCreateInfo {
{}, (uint32_t) physicalDevice.queue_family(), 1, &queuePriorities[0]
{}, queue_family(), 1, &queuePriorities[0]
});
vk::PhysicalDeviceFeatures features10;
features10.logicOp = true;
vk::PhysicalDeviceVulkan12Features features12;
features12.timelineSemaphore = true;
void *pNext = &features12;
vk::PhysicalDeviceSynchronization2FeaturesKHR synchronization2Features;
if (_khr_synchronization2) {
synchronization2Features.synchronization2 = true;
synchronization2Features.pNext = pNext;
pNext = &synchronization2Features;
}
vk::PhysicalDeviceDynamicRenderingFeaturesKHR dynamicRenderingFeatures;
if (_khr_dynamic_rendering) {
dynamicRenderingFeatures.dynamicRendering = true;
dynamicRenderingFeatures.pNext = pNext;
pNext = &dynamicRenderingFeatures;
}
vk::DeviceCreateInfo deviceCreateInfo {
/*flags*/ {},
/*pQueueCreateInfos*/ queueCreateInfos,
/*ppEnabledLayerNames*/ physicalDevice.enabled_layers(),
/*ppEnabledExtensionNames*/ physicalDevice.enabled_extensions(),
/*pEnabledFeatures*/ nullptr
/*ppEnabledLayerNames*/ _enabled_layers,
/*ppEnabledExtensionNames*/ _enabled_extensions,
/*pEnabledFeatures*/ &features10,
/*pNext*/ pNext
};
*((vk::raii::Device*) this) = {physicalDevice, deviceCreateInfo};
this->_queue_family = physicalDevice.queue_family();
J2dRlsTrace(J2D_TRACE_INFO, "Vulkan: Device created\n"); // TODO which one?
((vk::raii::Device&) *this) = {*this, deviceCreateInfo};
_memory.init(_instance, *this, *this, REQUIRED_VULKAN_VERSION, _ext_memory_budget);
_pipelines.init((vk::raii::Device&) *this, _khr_dynamic_rendering);
_queue = getQueue(queue_family(), 0);
_commandPool = createCommandPool(vk::CommandPoolCreateInfo {
vk::CommandPoolCreateFlagBits::eTransient | vk::CommandPoolCreateFlagBits::eResetCommandBuffer,
queue_family()
});
vk::SemaphoreTypeCreateInfo semaphoreTypeCreateInfo { vk::SemaphoreType::eTimeline, 0 };
_timelineSemaphore = createSemaphore(vk::SemaphoreCreateInfo {{}, &semaphoreTypeCreateInfo});
_timelineCounter = 0;
J2dRlsTrace1(J2D_TRACE_INFO, "Vulkan: Device created %s\n", _name.c_str());
}
extern "C" jint VK_MaxTextureSize() {
return (jint)VKGraphicsEnvironment::graphics_environment()->max_texture_size();
VKBuffer VKDevice::getVertexBuffer() {
auto b = popPending<VKBuffer>(_pendingVertexBuffers);
if (*b) {
b.position() = 0;
return b;
} else {
return _memory.allocateBuffer(64 * 1024, vk::BufferUsageFlagBits::eVertexBuffer,
vma::AllocationCreateFlagBits::eMapped | vma::AllocationCreateFlagBits::eHostAccessSequentialWrite,
vma::MemoryUsage::eAutoPreferHost);
}
}
vk::raii::CommandBuffer VKDevice::getCommandBuffer(vk::CommandBufferLevel level) {
auto b = popPending<vk::raii::CommandBuffer>(level == vk::CommandBufferLevel::ePrimary ?
_pendingPrimaryBuffers : _pendingSecondaryBuffers);
if (*b) {
b.reset({});
return b;
} else {
return std::move(allocateCommandBuffers({*_commandPool, level, 1})[0]);
}
}
void VKDevice::submitCommandBuffer(vk::raii::CommandBuffer&& primary,
std::vector<vk::raii::CommandBuffer>& secondary,
std::vector<VKBuffer>& vertexBuffers,
std::vector<vk::Semaphore>& waitSemaphores,
std::vector<vk::PipelineStageFlags>& waitStages,
std::vector<vk::Semaphore>& signalSemaphores) {
_timelineCounter++;
signalSemaphores.insert(signalSemaphores.begin(), *_timelineSemaphore);
vk::TimelineSemaphoreSubmitInfo timelineInfo { 0, nullptr, (uint32_t) signalSemaphores.size(), &_timelineCounter };
queue().submit(vk::SubmitInfo {
waitSemaphores, waitStages, *primary, signalSemaphores, &timelineInfo
}, nullptr);
pushPending(_pendingPrimaryBuffers, std::move(primary));
pushPending(_pendingSecondaryBuffers, secondary);
pushPending(_pendingVertexBuffers, vertexBuffers);
signalSemaphores.clear();
waitSemaphores.clear();
waitStages.clear();
}
extern "C" jboolean VK_Init() {
@@ -353,8 +420,9 @@ extern "C" jboolean VK_Init() {
return true;
}
#if defined(DEBUG)
debugMessenger = nullptr;
#endif
return false;
}
}
extern "C" JNIEXPORT void JNICALL JNI_OnUnload(JavaVM *vm, void *reserved) {
VKGraphicsEnvironment::dispose();
}

View File

@@ -31,70 +31,111 @@
#define VK_NO_PROTOTYPES
#define VULKAN_HPP_NO_DEFAULT_DISPATCHER
#include <queue>
#include <vulkan/vulkan_raii.hpp>
#include "jni.h"
#include "VKMemory.h"
#include "VKPipeline.h"
class PhysicalDevice;
class VKGraphicsEnvironment;
class VKDevice : public vk::raii::Device {
class VKDevice : public vk::raii::Device, public vk::raii::PhysicalDevice {
friend class VKGraphicsEnvironment;
vk::raii::CommandPool _command_pool;
int _queue_family = -1;
VKDevice(PhysicalDevice& physicalDevice);
vk::Instance _instance;
std::string _name;
std::vector<const char*> _enabled_layers, _enabled_extensions;
bool _ext_memory_budget, _khr_synchronization2, _khr_dynamic_rendering;
int _queue_family = -1;
// Logical device state
VKMemory _memory;
VKPipelines _pipelines;
vk::raii::Queue _queue = nullptr;
vk::raii::CommandPool _commandPool = nullptr;
vk::raii::Semaphore _timelineSemaphore = nullptr;
uint64_t _timelineCounter = 0;
uint64_t _lastReadTimelineCounter = 0;
template <typename T> struct Pending {
T resource;
uint64_t counter;
using Queue = std::queue<Pending<T>>;
};
Pending<vk::raii::CommandBuffer>::Queue _pendingPrimaryBuffers, _pendingSecondaryBuffers;
Pending<VKBuffer>::Queue _pendingVertexBuffers;
template <typename T> T popPending(typename Pending<T>::Queue& queue) {
if (!queue.empty()) {
auto& f = queue.front();
if (_lastReadTimelineCounter >= f.counter ||
(_lastReadTimelineCounter = _timelineSemaphore.getCounterValue()) >= f.counter) {
T resource = std::move(f.resource);
queue.pop();
return resource;
}
}
return T(nullptr);
}
template <typename T> void pushPending(typename Pending<T>::Queue& queue, T&& resource) {
queue.push({std::move(resource), _timelineCounter});
}
template <typename T> void pushPending(typename Pending<T>::Queue& queue, std::vector<T>& resources) {
for (T& r : resources) {
pushPending(queue, std::move(r));
}
resources.clear();
}
explicit VKDevice(vk::Instance instance, vk::raii::PhysicalDevice&& handle);
public:
int queue_family() const {
return _queue_family;
bool synchronization2() {
return _khr_synchronization2;
}
bool dynamicRendering() {
return _khr_dynamic_rendering;
}
VKPipelines& pipelines() {
return _pipelines;
}
uint32_t queue_family() const {
return (uint32_t) _queue_family;
}
const vk::raii::Queue& queue() const {
return _queue;
}
void init(); // Creates actual logical device
VKBuffer getVertexBuffer();
vk::raii::CommandBuffer getCommandBuffer(vk::CommandBufferLevel level);
void submitCommandBuffer(vk::raii::CommandBuffer&& primary,
std::vector<vk::raii::CommandBuffer>& secondary,
std::vector<VKBuffer>& vertexBuffers,
std::vector<vk::Semaphore>& waitSemaphores,
std::vector<vk::PipelineStageFlags>& waitStages,
std::vector<vk::Semaphore>& signalSemaphores);
bool supported() const { // Supported or not
return *((const vk::raii::PhysicalDevice&) *this);
}
explicit operator bool() const { // Initialized or not
return *((const vk::raii::Device&) *this);
}
};
class VKSurfaceData {
uint32_t _width;
uint32_t _height;
uint32_t _scale;
uint32_t _bg_color;
public:
VKSurfaceData(uint32_t w, uint32_t h, uint32_t s, uint32_t bgc)
: _width(w), _height(h), _scale(s), _bg_color(bgc) {};
uint32_t width() const {
return _width;
}
uint32_t height() const {
return _height;
}
uint32_t scale() const {
return _scale;
}
uint32_t bg_color() const {
return _bg_color;
}
virtual void set_bg_color(uint32_t bg_color) {
_bg_color = bg_color;
}
virtual ~VKSurfaceData() = default;
virtual void revalidate(uint32_t w, uint32_t h, uint32_t s)
{
_width = w;
_height = h;
_scale = s;
}
};
class VKGraphicsEnvironment {
vk::raii::Context _vk_context;
vk::raii::Instance _vk_instance;
std::vector<PhysicalDevice> _physical_devices;
std::vector<VKDevice> _devices;
int _default_physical_device;
int _default_device;
vk::raii::Context _vk_context;
vk::raii::Instance _vk_instance;
#if defined(DEBUG)
vk::raii::DebugUtilsMessengerEXT _debugMessenger = nullptr;
#endif
std::vector<std::unique_ptr<VKDevice>> _devices;
VKDevice* _default_device;
static std::unique_ptr<VKGraphicsEnvironment> _ge_instance;
VKGraphicsEnvironment();
public:
@@ -102,7 +143,6 @@ public:
static void dispose();
VKDevice& default_device();
vk::raii::Instance& vk_instance();
uint32_t max_texture_size();
};
extern "C" {
@@ -110,8 +150,6 @@ extern "C" {
jboolean VK_Init();
jint VK_MaxTextureSize();
#ifdef __cplusplus
}
#endif //__cplusplus

View File

@@ -0,0 +1,59 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, JetBrains s.r.o.. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#define VMA_IMPLEMENTATION
#include "VKMemory.h"
void VKMemory::init(vk::Instance instance, const vk::raii::PhysicalDevice& physicalDevice,
const vk::raii::Device& device, uint32_t apiVersion, bool extMemoryBudget) {
vma::VulkanFunctions functions = vma::functionsFromDispatcher(physicalDevice.getDispatcher(), device.getDispatcher());
vma::AllocatorCreateInfo createInfo {};
createInfo.instance = instance;
createInfo.physicalDevice = *physicalDevice;
createInfo.device = *device;
createInfo.pVulkanFunctions = &functions;
createInfo.vulkanApiVersion = apiVersion;
if (extMemoryBudget) {
createInfo.flags |= vma::AllocatorCreateFlagBits::eExtMemoryBudget;
}
_allocator = vma::createAllocatorUnique(createInfo);
*((vma::Allocator*) this) = *_allocator;
}
VKBuffer VKMemory::allocateBuffer(uint32_t size, vk::BufferUsageFlags usage,
vma::AllocationCreateFlags flags, vma::MemoryUsage memoryUsage) {
VKBuffer b = nullptr;
auto pair = createBufferUnique(vk::BufferCreateInfo {
{}, size, usage, vk::SharingMode::eExclusive, {}
}, vma::AllocationCreateInfo {
flags,
memoryUsage, {}, {}, (uint32_t) -1
}, b._allocationInfo);
b._buffer = std::move(pair.first);
b._allocation = std::move(pair.second);
b._size = size;
return b;
}

View File

@@ -0,0 +1,69 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, JetBrains s.r.o.. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef VKMemory_h_Included
#define VKMemory_h_Included
#define VK_NO_PROTOTYPES
#define VULKAN_HPP_NO_DEFAULT_DISPATCHER
#include <vulkan/vulkan_raii.hpp>
#include <vk_mem_alloc.hpp>
class VKBuffer {
friend class VKMemory;
vma::UniqueBuffer _buffer;
vma::UniqueAllocation _allocation;
vma::AllocationInfo _allocationInfo;
uint32_t _size = 0;
uint32_t _position = 0;
public:
VKBuffer(nullptr_t) {}
vk::Buffer operator*() const {
return *_buffer;
}
uint32_t size() const {
return _size;
}
uint32_t& position() {
return _position;
}
void* data() {
return _allocationInfo.pMappedData;
}
};
class VKMemory : vma::Allocator {
vma::UniqueAllocator _allocator;
public:
void init(vk::Instance instance, const vk::raii::PhysicalDevice& physicalDevice,
const vk::raii::Device& device, uint32_t apiVersion, bool extMemoryBudget);
VKBuffer allocateBuffer(uint32_t size, vk::BufferUsageFlags usage,
vma::AllocationCreateFlags flags, vma::MemoryUsage memoryUsage);
};
#endif //VKMemory_h_Included

View File

@@ -0,0 +1,127 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, JetBrains s.r.o.. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "VKPipeline.h"
void VKPipelines::init(const vk::raii::Device& device, bool dynamicRendering) {
shaders.init(device);
vk::Format format = vk::Format::eB8G8R8A8Unorm; // TODO
if (!dynamicRendering) {
vk::AttachmentDescription attachmentDescription {
/*flags*/ {},
/*format*/ format,
/*samples*/ vk::SampleCountFlagBits::e1,
/*loadOp*/ vk::AttachmentLoadOp::eLoad,
/*storeOp*/ vk::AttachmentStoreOp::eStore,
/*stencilLoadOp*/ vk::AttachmentLoadOp::eDontCare,
/*stencilStoreOp*/ vk::AttachmentStoreOp::eDontCare,
/*initialLayout*/ vk::ImageLayout::eColorAttachmentOptimal,
/*finalLayout*/ vk::ImageLayout::eColorAttachmentOptimal
};
vk::AttachmentReference attachmentReference { 0, vk::ImageLayout::eColorAttachmentOptimal };
vk::SubpassDescription subpassDescription {
/*flags*/ {},
/*pipelineBindPoint*/ vk::PipelineBindPoint::eGraphics,
/*inputAttachmentCount*/ 0,
/*pInputAttachments*/ nullptr,
/*colorAttachmentCount*/ 1,
/*pColorAttachments*/ &attachmentReference,
/*pResolveAttachments*/ nullptr,
/*pDepthStencilAttachment*/ nullptr,
/*preserveAttachmentCount*/ 0,
/*pPreserveAttachments*/ nullptr,
};
// We don't know in advance, which operations to synchronize
// with before and after the render pass, so do a full sync.
std::array<vk::SubpassDependency, 2> subpassDependencies {vk::SubpassDependency{
/*srcSubpass*/ VK_SUBPASS_EXTERNAL,
/*dstSubpass*/ 0,
/*srcStageMask*/ vk::PipelineStageFlagBits::eBottomOfPipe,
/*dstStageMask*/ vk::PipelineStageFlagBits::eColorAttachmentOutput,
/*srcAccessMask*/ {},
/*dstAccessMask*/ vk::AccessFlagBits::eColorAttachmentRead | vk::AccessFlagBits::eColorAttachmentWrite,
/*dependencyFlags*/ {},
}, vk::SubpassDependency{
/*srcSubpass*/ 0,
/*dstSubpass*/ VK_SUBPASS_EXTERNAL,
/*srcStageMask*/ vk::PipelineStageFlagBits::eColorAttachmentOutput,
/*dstStageMask*/ vk::PipelineStageFlagBits::eTopOfPipe,
/*srcAccessMask*/ vk::AccessFlagBits::eColorAttachmentRead | vk::AccessFlagBits::eColorAttachmentWrite,
/*dstAccessMask*/ {},
/*dependencyFlags*/ {},
}};
renderPass = device.createRenderPass(vk::RenderPassCreateInfo{
/*flags*/ {},
/*pAttachments*/ attachmentDescription,
/*pSubpasses*/ subpassDescription,
/*pDependencies*/ subpassDependencies
});
}
vk::PushConstantRange pushConstantRange {vk::ShaderStageFlagBits::eVertex, 0, sizeof(float) * 2};
testLayout = device.createPipelineLayout(vk::PipelineLayoutCreateInfo {{}, {}, pushConstantRange});
std::array<vk::PipelineShaderStageCreateInfo, 2> testStages {shaders.test_vert.stage(), shaders.test_frag.stage()};
vk::VertexInputBindingDescription vertexInputBindingDescription {0, 8, vk::VertexInputRate::eVertex};
vk::VertexInputAttributeDescription vertexInputAttributeDescription {0, 0, vk::Format::eR32G32Sfloat, 0};
vk::PipelineVertexInputStateCreateInfo vertexInputStateCreateInfo {{}, vertexInputBindingDescription, vertexInputAttributeDescription};
vk::PipelineInputAssemblyStateCreateInfo inputAssemblyStateCreateInfo {{}, vk::PrimitiveTopology::eTriangleFan, false};
vk::Viewport viewport;
vk::Rect2D scissor;
vk::PipelineViewportStateCreateInfo viewportStateCreateInfo {{}, viewport, scissor};
vk::PipelineRasterizationStateCreateInfo rasterizationStateCreateInfo {
{}, false, false, vk::PolygonMode::eFill, vk::CullModeFlagBits::eNone,
vk::FrontFace::eClockwise, false, 0, 0, 0, 1
};
vk::PipelineMultisampleStateCreateInfo multisampleStateCreateInfo {};
vk::PipelineColorBlendAttachmentState colorBlendAttachmentState {false}; // TODO No blending yet
colorBlendAttachmentState.colorWriteMask = vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG | vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA;
vk::PipelineColorBlendStateCreateInfo colorBlendStateCreateInfo {{}, false, vk::LogicOp::eXor, colorBlendAttachmentState};
std::array<vk::DynamicState, 2> dynamicStates {vk::DynamicState::eViewport, vk::DynamicState::eScissor};
vk::PipelineDynamicStateCreateInfo dynamicStateCreateInfo {{}, dynamicStates};
vk::PipelineRenderingCreateInfoKHR renderingCreateInfo {0, format};
auto pipelines = device.createGraphicsPipelines(nullptr, {
vk::GraphicsPipelineCreateInfo {
{}, testStages,
&vertexInputStateCreateInfo,
&inputAssemblyStateCreateInfo,
nullptr,
&viewportStateCreateInfo,
&rasterizationStateCreateInfo,
&multisampleStateCreateInfo,
nullptr,
&colorBlendStateCreateInfo,
&dynamicStateCreateInfo,
*testLayout,
*renderPass, 0, nullptr, 0,
dynamicRendering ? &renderingCreateInfo : nullptr
}
});
// TODO pipeline cache
test = std::move(pipelines[0]);
}

View File

@@ -0,0 +1,42 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, JetBrains s.r.o.. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef VKPipeline_h_Included
#define VKPipeline_h_Included
#include "VKShader.h"
struct VKPipelines {
VKShaders shaders;
// TODO we need a pool of pipelines and (optionally) render passes for different formats.
vk::raii::RenderPass renderPass = nullptr; // Render pass is only needed if dynamic rendering is off.
vk::raii::PipelineLayout testLayout = nullptr;
vk::raii::Pipeline test = nullptr;
void init(const vk::raii::Device& device, bool dynamicRendering);
};
#endif //VKPipeline_h_Included

View File

@@ -26,8 +26,6 @@
#ifndef HEADLESS
#include <stdlib.h>
#include "sun_java2d_pipe_BufferedOpCodes.h"
#include "sun_java2d_pipe_BufferedRenderPipe.h"
#include "sun_java2d_pipe_BufferedTextPipe.h"
@@ -35,6 +33,7 @@
#include "Trace.h"
#include "jlong.h"
#include "VKRenderQueue.h"
#include "VKRenderer.h"
#define BYTES_PER_POLY_POINT \
sun_java2d_pipe_BufferedRenderPipe_BYTES_PER_POLY_POINT
@@ -62,12 +61,13 @@
#define OFFSET_XFORM sun_java2d_vulkan_VKBlitLoops_OFFSET_XFORM
#define OFFSET_ISOBLIT sun_java2d_vulkan_VKBlitLoops_OFFSET_ISOBLIT
static VKRenderer renderer;
extern "C" JNIEXPORT void JNICALL
Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
(JNIEnv *env, jobject oglrq,
jlong buf, jint limit)
{
jboolean sync = JNI_FALSE;
unsigned char *b, *end;
J2dTraceLn1(J2D_TRACE_INFO,
@@ -87,7 +87,7 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
while (b < end) {
jint opcode = NEXT_INT(b);
J2dRlsTraceLn2(J2D_TRACE_VERBOSE,
J2dRlsTraceLn2(J2D_TRACE_VERBOSE2,
"VKRenderQueue_flushBuffer: opcode=%d, rem=%d",
opcode, (end-b));
@@ -103,6 +103,7 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
J2dRlsTraceLn4(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: DRAW_LINE(%d, %d, %d, %d)",
x1, y1, x2, y2);
renderer.drawLine(x1, y1, x2, y2);
}
break;
case sun_java2d_pipe_BufferedOpCodes_DRAW_RECT:
@@ -114,6 +115,7 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
J2dRlsTraceLn4(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: DRAW_RECT(%d, %d, %d, %d)",
x, y, w, h);
renderer.drawRect(x, y, w, h);
}
break;
case sun_java2d_pipe_BufferedOpCodes_DRAW_POLY:
@@ -125,8 +127,8 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
jint *xPoints = (jint *)b;
jint *yPoints = ((jint *)b) + nPoints;
J2dRlsTraceLn(J2D_TRACE_VERBOSE, "VKRenderQueue_flushBuffer: DRAW_POLY");
SKIP_BYTES(b, nPoints * BYTES_PER_POLY_POINT);
renderer.drawPoly();
}
break;
case sun_java2d_pipe_BufferedOpCodes_DRAW_PIXEL:
@@ -134,6 +136,7 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
jint x = NEXT_INT(b);
jint y = NEXT_INT(b);
J2dRlsTraceLn(J2D_TRACE_VERBOSE, "VKRenderQueue_flushBuffer: DRAW_PIXEL");
renderer.drawPixel(x, y);
}
break;
case sun_java2d_pipe_BufferedOpCodes_DRAW_SCANLINES:
@@ -141,6 +144,7 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
jint count = NEXT_INT(b);
J2dRlsTraceLn(J2D_TRACE_VERBOSE, "VKRenderQueue_flushBuffer: DRAW_SCANLINES");
SKIP_BYTES(b, count * BYTES_PER_SCANLINE);
renderer.drawScanlines();
}
break;
case sun_java2d_pipe_BufferedOpCodes_DRAW_PARALLELOGRAM:
@@ -156,6 +160,7 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
J2dRlsTraceLn8(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: DRAW_PARALLELOGRAM(%f, %f, %f, %f, %f, %f, %f, %f)",
x11, y11, dx21, dy21, dx12, dy12, lwr21, lwr12);
renderer.drawParallelogram(x11, y11, dx21, dy21, dx12, dy12, lwr21, lwr12);
}
break;
case sun_java2d_pipe_BufferedOpCodes_DRAW_AAPARALLELOGRAM:
@@ -183,6 +188,7 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
jint h = NEXT_INT(b);
J2dRlsTraceLn4(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: FILL_RECT(%d, %d, %d, %d)", x, y, w, h);
renderer.fillRect(x, y, w, h);
}
break;
case sun_java2d_pipe_BufferedOpCodes_FILL_SPANS:
@@ -191,6 +197,7 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: FILL_SPANS");
SKIP_BYTES(b, count * BYTES_PER_SPAN);
renderer.fillSpans();
}
break;
case sun_java2d_pipe_BufferedOpCodes_FILL_PARALLELOGRAM:
@@ -204,6 +211,7 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
J2dRlsTraceLn6(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: FILL_PARALLELOGRAM(%f, %f, %f, %f, %f, %f)",
x11, y11, dx21, dy21, dx12, dy12);
renderer.fillParallelogram(x11, y11, dx21, dy21, dx12, dy12);
}
break;
case sun_java2d_pipe_BufferedOpCodes_FILL_AAPARALLELOGRAM:
@@ -217,6 +225,7 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
J2dRlsTraceLn6(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: FILL_AAPARALLELOGRAM(%f, %f, %f, %f, %f, %f)",
x11, y11, dx21, dy21, dx12, dy12);
renderer.fillAAParallelogram(x11, y11, dx21, dy21, dx12, dy12);
}
break;
@@ -247,6 +256,7 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
}
J2dRlsTraceLn(J2D_TRACE_VERBOSE, "VKRenderQueue_flushBuffer: DRAW_GLYPH_LIST");
SKIP_BYTES(b, numGlyphs * bytesPerGlyph);
renderer.drawGlyphList();
}
break;
@@ -262,6 +272,7 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
J2dRlsTraceLn6(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: COPY_AREA(%d, %d, %d, %d, %d, %d)",
x, y, w, h, dx, dy);
renderer.copyArea(x, y, w, h, dx, dy);
}
break;
case sun_java2d_pipe_BufferedOpCodes_BLIT:
@@ -287,6 +298,7 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
jboolean isoblit = EXTRACT_BOOLEAN(packedParams,
OFFSET_ISOBLIT);
J2dRlsTraceLn(J2D_TRACE_VERBOSE, "VKRenderQueue_flushBuffer: BLIT");
renderer.blit();
}
break;
case sun_java2d_pipe_BufferedOpCodes_SURFACE_TO_SW_BLIT:
@@ -301,6 +313,7 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
jlong pSrc = NEXT_LONG(b);
jlong pDst = NEXT_LONG(b);
J2dRlsTraceLn(J2D_TRACE_VERBOSE, "VKRenderQueue_flushBuffer: SURFACE_TO_SW_BLIT");
renderer.surfaceToSwBlit();
}
break;
case sun_java2d_pipe_BufferedOpCodes_MASK_FILL:
@@ -315,6 +328,7 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
unsigned char *pMask = (masklen > 0) ? b : NULL;
J2dRlsTraceLn(J2D_TRACE_VERBOSE, "VKRenderQueue_flushBuffer: MASK_FILL");
SKIP_BYTES(b, masklen);
renderer.maskFill();
}
break;
case sun_java2d_pipe_BufferedOpCodes_MASK_BLIT:
@@ -326,6 +340,7 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
jint masklen = width * height * sizeof(jint);
J2dRlsTraceLn(J2D_TRACE_VERBOSE, "VKRenderQueue_flushBuffer: MASK_BLIT");
SKIP_BYTES(b, masklen);
renderer.maskBlit();
}
break;
@@ -339,12 +354,14 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
J2dRlsTraceLn4(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: SET_RECT_CLIP(%d, %d, %d, %d)",
x1, y1, x2, y2);
renderer.setRectClip(x1, y1, x2, y2);
}
break;
case sun_java2d_pipe_BufferedOpCodes_BEGIN_SHAPE_CLIP:
{
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: BEGIN_SHAPE_CLIP");
renderer.beginShapeClip();
}
break;
case sun_java2d_pipe_BufferedOpCodes_SET_SHAPE_CLIP_SPANS:
@@ -353,18 +370,21 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: SET_SHAPE_CLIP_SPANS");
SKIP_BYTES(b, count * BYTES_PER_SPAN);
renderer.setShapeClipSpans();
}
break;
case sun_java2d_pipe_BufferedOpCodes_END_SHAPE_CLIP:
{
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: END_SHAPE_CLIP");
renderer.endShapeClip();
}
break;
case sun_java2d_pipe_BufferedOpCodes_RESET_CLIP:
{
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: RESET_CLIP");
renderer.resetClip();
}
break;
case sun_java2d_pipe_BufferedOpCodes_SET_ALPHA_COMPOSITE:
@@ -374,6 +394,7 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
jint flags = NEXT_INT(b);
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: SET_ALPHA_COMPOSITE");
renderer.setAlphaComposite();
}
break;
case sun_java2d_pipe_BufferedOpCodes_SET_XOR_COMPOSITE:
@@ -381,12 +402,14 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
jint xorPixel = NEXT_INT(b);
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: SET_XOR_COMPOSITE");
renderer.setXorComposite();
}
break;
case sun_java2d_pipe_BufferedOpCodes_RESET_COMPOSITE:
{
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: RESET_COMPOSITE");
renderer.resetComposite();
}
break;
case sun_java2d_pipe_BufferedOpCodes_SET_TRANSFORM:
@@ -399,22 +422,25 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
jdouble m12 = NEXT_DOUBLE(b);
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: SET_TRANSFORM");
renderer.setTransform(m00, m10, m01, m11, m02, m12);
}
break;
case sun_java2d_pipe_BufferedOpCodes_RESET_TRANSFORM:
{
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: RESET_TRANSFORM");
renderer.resetTransform();
}
break;
// context-related ops
case sun_java2d_pipe_BufferedOpCodes_SET_SURFACES:
{
jlong pSrc = NEXT_LONG(b);
jlong pDst = NEXT_LONG(b);
VKSurfaceData* src = NEXT_SURFACE(b);
VKSurfaceData* dst = NEXT_SURFACE(b);
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: SET_SURFACES");
renderer.setSurfaces(*src, *dst);
}
break;
case sun_java2d_pipe_BufferedOpCodes_SET_SCRATCH_SURFACE:
@@ -422,20 +448,23 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
jlong pConfigInfo = NEXT_LONG(b);
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: SET_SCRATCH_SURFACE");
renderer.setScratchSurface();
}
break;
case sun_java2d_pipe_BufferedOpCodes_FLUSH_SURFACE:
{
jlong pData = NEXT_LONG(b);
VKSurfaceData* surface = NEXT_SURFACE(b);
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: FLUSH_SURFACE");
renderer.flushSurface(*surface);
}
break;
case sun_java2d_pipe_BufferedOpCodes_DISPOSE_SURFACE:
{
jlong pData = NEXT_LONG(b);
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: FLUSH_SURFACE");
"VKRenderQueue_flushBuffer: DISPOSE_SURFACE");
renderer.disposeSurface();
}
break;
case sun_java2d_pipe_BufferedOpCodes_DISPOSE_CONFIG:
@@ -443,18 +472,21 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
jlong pConfigInfo = NEXT_LONG(b);
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: DISPOSE_CONFIG");
renderer.disposeConfig();
}
break;
case sun_java2d_pipe_BufferedOpCodes_INVALIDATE_CONTEXT:
{
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: INVALIDATE_CONTEXT");
renderer.invalidateContext();
}
break;
case sun_java2d_pipe_BufferedOpCodes_SYNC:
{
sync = JNI_TRUE;
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: SYNC");
renderer.sync();
}
break;
@@ -464,11 +496,16 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
jlong window = NEXT_LONG(b);
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: SWAP_BUFFERS");
renderer.swapBuffers();
}
break;
// special no-op (mainly used for achieving 8-byte alignment)
case sun_java2d_pipe_BufferedOpCodes_NOOP:
{
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: NOOP");
}
break;
// paint-related ops
@@ -476,6 +513,7 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
{
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: RESET_PAINT");
renderer.resetPaint();
}
break;
case sun_java2d_pipe_BufferedOpCodes_SET_COLOR:
@@ -483,6 +521,7 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
jint pixel = NEXT_INT(b);
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: SET_COLOR");
renderer.setColor((uint32_t) pixel);
}
break;
case sun_java2d_pipe_BufferedOpCodes_SET_GRADIENT_PAINT:
@@ -496,6 +535,7 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
jint pixel2 = NEXT_INT(b);
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: SET_GRADIENT_PAINT");
renderer.setGradientPaint();
}
break;
case sun_java2d_pipe_BufferedOpCodes_SET_LINEAR_GRADIENT_PAINT:
@@ -512,6 +552,7 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
pixels = b; SKIP_BYTES(b, numStops * sizeof(jint));
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: SET_LINEAR_GRADIENT_PAINT");
renderer.setLinearGradientPaint();
}
break;
case sun_java2d_pipe_BufferedOpCodes_SET_RADIAL_GRADIENT_PAINT:
@@ -532,6 +573,7 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
pixels = b; SKIP_BYTES(b, numStops * sizeof(jint));
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: SET_RADIAL_GRADIENT_PAINT");
renderer.setRadialGradientPaint();
}
break;
case sun_java2d_pipe_BufferedOpCodes_SET_TEXTURE_PAINT:
@@ -547,6 +589,7 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
jdouble yp3 = NEXT_DOUBLE(b);
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: SET_TEXTURE_PAINT");
renderer.setTexturePaint();
}
break;
@@ -560,12 +603,14 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: ENABLE_CONVOLVE_OP");
SKIP_BYTES(b, kernelWidth * kernelHeight * sizeof(jfloat));
renderer.enableConvolveOp();
}
break;
case sun_java2d_pipe_BufferedOpCodes_DISABLE_CONVOLVE_OP:
{
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: DISABLE_CONVOLVE_OP");
renderer.disableConvolveOp();
}
break;
case sun_java2d_pipe_BufferedOpCodes_ENABLE_RESCALE_OP:
@@ -578,12 +623,14 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: ENABLE_RESCALE_OP");
SKIP_BYTES(b, numFactors * sizeof(jfloat) * 2);
renderer.enableRescaleOp();
}
break;
case sun_java2d_pipe_BufferedOpCodes_DISABLE_RESCALE_OP:
{
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: DISABLE_RESCALE_OP");
renderer.disableRescaleOp();
}
break;
case sun_java2d_pipe_BufferedOpCodes_ENABLE_LOOKUP_OP:
@@ -599,12 +646,14 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: ENABLE_LOOKUP_OP");
SKIP_BYTES(b, numBands * bandLength * bytesPerElem);
renderer.enableLookupOp();
}
break;
case sun_java2d_pipe_BufferedOpCodes_DISABLE_LOOKUP_OP:
{
J2dRlsTraceLn(J2D_TRACE_VERBOSE,
"VKRenderQueue_flushBuffer: DISABLE_LOOKUP_OP");
renderer.disableLookupOp();
}
break;
@@ -614,6 +663,7 @@ Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer
return;
}
}
renderer.flush();
}
#endif /* !HEADLESS */

View File

@@ -38,6 +38,7 @@
#define NEXT_BOOLEAN(buf) (jboolean)NEXT_INT(buf)
#define NEXT_LONG(buf) NEXT_VAL(buf, jlong)
#define NEXT_DOUBLE(buf) NEXT_VAL(buf, jdouble)
#define NEXT_SURFACE(buf) ((VKSurfaceData*) (SurfaceDataOps*) jlong_to_ptr(NEXT_LONG(buf)))
/*
* Increments a pointer (buf) by the given number of bytes.

View File

@@ -0,0 +1,282 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, JetBrains s.r.o.. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "VKRenderer.h"
VKRecorder::Vertex* VKRecorder::draw(uint32_t numVertices) {
uint32_t bytes = numVertices * sizeof(VKRecorder::Vertex);
if (_renderPass.vertexBuffer == nullptr && !_vertexBuffers.empty()) {
_renderPass.vertexBuffer = &_vertexBuffers.back();
_renderPass.commandBuffer->bindVertexBuffers(0, **_renderPass.vertexBuffer, vk::DeviceSize(0));
}
if (_renderPass.vertexBuffer == nullptr ||
_renderPass.vertexBuffer->position() + bytes > _renderPass.vertexBuffer->size()) {
_vertexBuffers.push_back(device().getVertexBuffer());
_renderPass.vertexBuffer = &_vertexBuffers.back(); // TODO check that our number of vertices fit into single buffer at all
_renderPass.commandBuffer->bindVertexBuffers(0, **_renderPass.vertexBuffer, vk::DeviceSize(0));
}
auto data = (uintptr_t) _renderPass.vertexBuffer->data() + _renderPass.vertexBuffer->position();
uint32_t firstVertex = _renderPass.vertexBuffer->position() / sizeof(VKRecorder::Vertex);
_renderPass.vertexBuffer->position() += bytes;
_renderPass.commandBuffer->draw(numVertices, 1, firstVertex, 0);
return (VKRecorder::Vertex*) data;
}
VKDevice* VKRecorder::setDevice(VKDevice *device) {
if (device != _device) {
if (_device != nullptr) {
flush();
}
std::swap(_device, device);
}
return device;
}
void VKRecorder::waitSemaphore(vk::Semaphore semaphore, vk::PipelineStageFlags stage) {
_waitSemaphores.push_back(semaphore);
_waitSemaphoreStages.push_back(stage);
}
void VKRecorder::signalSemaphore(vk::Semaphore semaphore) {
_signalSemaphores.push_back(semaphore);
}
const vk::raii::CommandBuffer& VKRecorder::record(bool flushRenderPass) {
if (!*_commandBuffer) {
_commandBuffer = device().getCommandBuffer(vk::CommandBufferLevel::ePrimary);
_commandBuffer.begin({vk::CommandBufferUsageFlagBits::eOneTimeSubmit });
}
if (flushRenderPass && _renderPass.commandBuffer != nullptr) {
_renderPass.commandBuffer->end();
vk::Rect2D renderArea {{0, 0}, {_renderPass.surface->width(), _renderPass.surface->height()}};
if (device().dynamicRendering()) {
vk::RenderingAttachmentInfoKHR colorAttachmentInfo {
_renderPass.surfaceView, vk::ImageLayout::eColorAttachmentOptimal,
vk::ResolveModeFlagBits::eNone, {}, {},
_renderPass.attachmentLoadOp, vk::AttachmentStoreOp::eStore,
_renderPass.clearValue
};
_commandBuffer.beginRenderingKHR(vk::RenderingInfoKHR{
vk::RenderingFlagBitsKHR::eContentsSecondaryCommandBuffers,
renderArea, 1, 0, colorAttachmentInfo, {}, {}
});
} else {
_commandBuffer.beginRenderPass(vk::RenderPassBeginInfo{
/*renderPass*/ *device().pipelines().renderPass,
/*framebuffer*/ _renderPass.surfaceFramebuffer,
/*renderArea*/ renderArea,
/*clearValueCount*/ 0,
/*pClearValues*/ nullptr
}, vk::SubpassContents::eSecondaryCommandBuffers);
}
_commandBuffer.executeCommands(**_renderPass.commandBuffer);
if (device().dynamicRendering()) {
_commandBuffer.endRenderingKHR();
} else {
_commandBuffer.endRenderPass();
}
_renderPass = {};
}
return _commandBuffer;
}
const vk::raii::CommandBuffer& VKRecorder::render(VKSurfaceData& surface, vk::ClearColorValue* clear) {
if (_renderPass.surface != &surface) {
if (_renderPass.commandBuffer != nullptr) {
record(true); // Flush current render pass
}
VKSurfaceImage i = surface.access(*this,
vk::PipelineStageFlagBits::eColorAttachmentOutput,
vk::AccessFlagBits::eColorAttachmentWrite,
vk::ImageLayout::eColorAttachmentOptimal);
_renderPass.surface = &surface;
_renderPass.surfaceView = i.view;
_renderPass.surfaceFramebuffer = i.framebuffer;
_renderPass.attachmentLoadOp = vk::AttachmentLoadOp::eLoad;
}
if (clear != nullptr) {
_renderPass.clearValue = *clear;
_renderPass.attachmentLoadOp = vk::AttachmentLoadOp::eClear;
}
if (_renderPass.commandBuffer == nullptr || clear != nullptr) {
if (_renderPass.commandBuffer == nullptr) {
_secondaryBuffers.push_back(device().getCommandBuffer(vk::CommandBufferLevel::eSecondary));
_renderPass.commandBuffer = &_secondaryBuffers.back();
} else {
// We already recorded some rendering commands, but it doesn't matter, as we'll clear the surface anyway.
_renderPass.commandBuffer->reset({});
}
vk::Format format = surface.format();
vk::CommandBufferInheritanceRenderingInfoKHR inheritanceRenderingInfo {
vk::RenderingFlagBitsKHR::eContentsSecondaryCommandBuffers,
0, format
};
vk::CommandBufferInheritanceInfo inheritanceInfo;
if (device().dynamicRendering()) {
inheritanceInfo.pNext = &inheritanceRenderingInfo;
} else {
inheritanceInfo.renderPass = *device().pipelines().renderPass;
inheritanceInfo.subpass = 0;
inheritanceInfo.framebuffer = _renderPass.surfaceFramebuffer;
}
_renderPass.commandBuffer->begin({ vk::CommandBufferUsageFlagBits::eOneTimeSubmit |
vk::CommandBufferUsageFlagBits::eRenderPassContinue, &inheritanceInfo });
if (clear != nullptr && !device().dynamicRendering()) {
// Our static render pass uses loadOp=LOAD, so clear attachment manually.
_renderPass.commandBuffer->clearAttachments(vk::ClearAttachment {
vk::ImageAspectFlagBits::eColor, 0, _renderPass.clearValue
}, vk::ClearRect {vk::Rect2D{{0, 0}, {_renderPass.surface->width(), _renderPass.surface->height()}}, 0, 1});
}
}
return *_renderPass.commandBuffer;
}
void VKRecorder::flush() {
if (!*_commandBuffer && _renderPass.commandBuffer == nullptr) {
return;
}
record(true).end();
device().submitCommandBuffer(std::move(_commandBuffer), _secondaryBuffers, _vertexBuffers,
_waitSemaphores, _waitSemaphoreStages, _signalSemaphores);
}
// draw ops
void VKRenderer::drawLine(jint x1, jint y1, jint x2, jint y2) {/*TODO*/}
void VKRenderer::drawRect(jint x, jint y, jint w, jint h) {/*TODO*/}
void VKRenderer::drawPoly(/*TODO*/) {/*TODO*/}
void VKRenderer::drawPixel(jint x, jint y) {/*TODO*/}
void VKRenderer::drawScanlines(/*TODO*/) {/*TODO*/}
void VKRenderer::drawParallelogram(jfloat x11, jfloat y11,
jfloat dx21, jfloat dy21,
jfloat dx12, jfloat dy12,
jfloat lwr21, jfloat lwr12) {/*TODO*/}
void VKRenderer::drawAAParallelogram(jfloat x11, jfloat y11,
jfloat dx21, jfloat dy21,
jfloat dx12, jfloat dy12,
jfloat lwr21, jfloat lwr12) {/*TODO*/}
// fill ops
void VKRenderer::fillRect(jint xi, jint yi, jint wi, jint hi) {
// TODO
auto& cb = render(*_dstSurface);
// cb.clearAttachments(vk::ClearAttachment {vk::ImageAspectFlagBits::eColor, 0, _color},
// vk::ClearRect {vk::Rect2D {{x, y}, {(uint32_t) w, (uint32_t) h}}, 0, 1});
cb.bindPipeline(vk::PipelineBindPoint::eGraphics, *device().pipelines().test);
cb.pushConstants<float>(*device().pipelines().testLayout, vk::ShaderStageFlagBits::eVertex, 0, {
2.0f/(float)_dstSurface->width(), 2.0f/(float)_dstSurface->height()
});
vk::Viewport viewport {0, 0, (float) _dstSurface->width(), (float) _dstSurface->height(), 0, 1};
cb.setViewport(0, viewport);
vk::Rect2D scissor {{0, 0}, {_dstSurface->width(), _dstSurface->height()}};
cb.setScissor(0, scissor);
auto x = (float) xi, y = (float) yi, w = (float) wi, h = (float) hi;
auto v = draw(4);
v[0] = {x, y};
v[1] = {x+w, y};
v[2] = {x+w, y+h};
v[3] = {x, y+h};
}
void VKRenderer::fillSpans(/*TODO*/) {/*TODO*/}
void VKRenderer::fillParallelogram(jfloat x11, jfloat y11,
jfloat dx21, jfloat dy21,
jfloat dx12, jfloat dy12) {/*TODO*/}
void VKRenderer::fillAAParallelogram(jfloat x11, jfloat y11,
jfloat dx21, jfloat dy21,
jfloat dx12, jfloat dy12) {/*TODO*/}
// text-related ops
void VKRenderer::drawGlyphList(/*TODO*/) {/*TODO*/}
// copy-related ops
void VKRenderer::copyArea(jint x, jint y, jint w, jint h, jint dx, jint dy) {/*TODO*/}
void VKRenderer::blit(/*TODO*/) {/*TODO*/}
void VKRenderer::surfaceToSwBlit(/*TODO*/) {/*TODO*/}
void VKRenderer::maskFill(/*TODO*/) {/*TODO*/}
void VKRenderer::maskBlit(/*TODO*/) {/*TODO*/}
// state-related ops
void VKRenderer::setRectClip(jint x1, jint y1, jint x2, jint y2) {/*TODO*/}
void VKRenderer::beginShapeClip() {/*TODO*/}
void VKRenderer::setShapeClipSpans(/*TODO*/) {/*TODO*/}
void VKRenderer::endShapeClip() {/*TODO*/}
void VKRenderer::resetClip() {/*TODO*/}
void VKRenderer::setAlphaComposite(/*TODO*/) {/*TODO*/}
void VKRenderer::setXorComposite(/*TODO*/) {/*TODO*/}
void VKRenderer::resetComposite() {/*TODO*/}
void VKRenderer::setTransform(jdouble m00, jdouble m10,
jdouble m01, jdouble m11,
jdouble m02, jdouble m12) {/*TODO*/}
void VKRenderer::resetTransform() {/*TODO*/}
// context-related ops
void VKRenderer::setSurfaces(VKSurfaceData& src, VKSurfaceData& dst) {
if (&src.device() != &dst.device()) {
throw std::runtime_error("src and dst surfaces use different devices!");
}
setDevice(&dst.device());
_dstSurface = &dst;
_srcSurface = &src;
}
void VKRenderer::setScratchSurface(/*TODO*/) {/*TODO*/}
void VKRenderer::flushSurface(VKSurfaceData& surface) {
VKDevice* old = setDevice(&surface.device());
surface.flush(*this);
setDevice(old);
}
void VKRenderer::disposeSurface(/*TODO*/) {/*TODO*/}
void VKRenderer::disposeConfig(/*TODO*/) {/*TODO*/}
void VKRenderer::invalidateContext() {/*TODO*/}
void VKRenderer::sync() {/*TODO*/}
// multibuffering ops
void VKRenderer::swapBuffers(/*TODO*/) {/*TODO*/}
// paint-related ops
void VKRenderer::resetPaint() {/*TODO*/}
void VKRenderer::setColor(uint32_t pixel) {
_color = pixel;
}
void VKRenderer::setGradientPaint(/*TODO*/) {/*TODO*/}
void VKRenderer::setLinearGradientPaint(/*TODO*/) {/*TODO*/}
void VKRenderer::setRadialGradientPaint(/*TODO*/) {/*TODO*/}
void VKRenderer::setTexturePaint(/*TODO*/) {/*TODO*/}
// BufferedImageOp-related ops
void VKRenderer::enableConvolveOp(/*TODO*/) {/*TODO*/}
void VKRenderer::disableConvolveOp() {/*TODO*/}
void VKRenderer::enableRescaleOp(/*TODO*/) {/*TODO*/}
void VKRenderer::disableRescaleOp() {/*TODO*/}
void VKRenderer::enableLookupOp() {/*TODO*/}
void VKRenderer::disableLookupOp() {/*TODO*/}

View File

@@ -0,0 +1,174 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, JetBrains s.r.o.. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef VKRenderer_h_Included
#define VKRenderer_h_Included
#ifdef __cplusplus
#include "VKBase.h"
#include "VKSurfaceData.h"
class VKRecorder{
VKDevice *_device;
vk::raii::CommandBuffer _commandBuffer = nullptr;
std::vector<vk::raii::CommandBuffer> _secondaryBuffers;
std::vector<vk::Semaphore> _waitSemaphores, _signalSemaphores;
std::vector<vk::PipelineStageFlags> _waitSemaphoreStages;
std::vector<VKBuffer> _vertexBuffers;
struct RenderPass {
vk::raii::CommandBuffer *commandBuffer = nullptr;
VKSurfaceData *surface = nullptr;
VKBuffer *vertexBuffer = nullptr;
vk::ImageView surfaceView;
vk::Framebuffer surfaceFramebuffer; // Only if dynamic rendering is off.
vk::AttachmentLoadOp attachmentLoadOp;
vk::ClearValue clearValue;
} _renderPass {};
protected:
struct Vertex {
float x, y;
};
Vertex* draw(uint32_t numVertices);
VKDevice& device() {
return *_device;
}
VKDevice* setDevice(VKDevice *device);
public:
void waitSemaphore(vk::Semaphore semaphore, vk::PipelineStageFlags stage);
void signalSemaphore(vk::Semaphore semaphore);
const vk::raii::CommandBuffer& record(bool flushRenderPass = true); // Prepare for ordinary commands
const vk::raii::CommandBuffer& render(VKSurfaceData& surface,
vk::ClearColorValue* clear = nullptr); // Prepare for render pass commands
void flush();
};
class VKRenderer : private VKRecorder{
VKSurfaceData *_srcSurface, *_dstSurface;
struct alignas(16) Color {
float r, g, b, a;
Color& operator=(uint32_t c) {
r = (float) ((c >> 16) & 0xff) / 255.0f;
g = (float) ((c >> 8) & 0xff) / 255.0f;
b = (float) (c & 0xff) / 255.0f;
a = (float) ((c >> 24) & 0xff) / 255.0f;
return *this;
}
operator vk::ClearValue() const {
return vk::ClearColorValue {r, g, b, a};
}
} _color;
public:
using VKRecorder::flush;
// draw ops
void drawLine(jint x1, jint y1, jint x2, jint y2);
void drawRect(jint x, jint y, jint w, jint h);
void drawPoly(/*TODO*/);
void drawPixel(jint x, jint y);
void drawScanlines(/*TODO*/);
void drawParallelogram(jfloat x11, jfloat y11,
jfloat dx21, jfloat dy21,
jfloat dx12, jfloat dy12,
jfloat lwr21, jfloat lwr12);
void drawAAParallelogram(jfloat x11, jfloat y11,
jfloat dx21, jfloat dy21,
jfloat dx12, jfloat dy12,
jfloat lwr21, jfloat lwr12);
// fill ops
void fillRect(jint x, jint y, jint w, jint h);
void fillSpans(/*TODO*/);
void fillParallelogram(jfloat x11, jfloat y11,
jfloat dx21, jfloat dy21,
jfloat dx12, jfloat dy12);
void fillAAParallelogram(jfloat x11, jfloat y11,
jfloat dx21, jfloat dy21,
jfloat dx12, jfloat dy12);
// text-related ops
void drawGlyphList(/*TODO*/);
// copy-related ops
void copyArea(jint x, jint y, jint w, jint h, jint dx, jint dy);
void blit(/*TODO*/);
void surfaceToSwBlit(/*TODO*/);
void maskFill(/*TODO*/);
void maskBlit(/*TODO*/);
// state-related ops
void setRectClip(jint x1, jint y1, jint x2, jint y2);
void beginShapeClip();
void setShapeClipSpans(/*TODO*/);
void endShapeClip();
void resetClip();
void setAlphaComposite(/*TODO*/);
void setXorComposite(/*TODO*/);
void resetComposite();
void setTransform(jdouble m00, jdouble m10,
jdouble m01, jdouble m11,
jdouble m02, jdouble m12);
void resetTransform();
// context-related ops
void setSurfaces(VKSurfaceData& src, VKSurfaceData& dst);
void setScratchSurface(/*TODO*/);
void flushSurface(VKSurfaceData& surface);
void disposeSurface(/*TODO*/);
void disposeConfig(/*TODO*/);
void invalidateContext();
void sync();
// multibuffering ops
void swapBuffers(/*TODO*/);
// paint-related ops
void resetPaint();
void setColor(uint32_t pixel);
void setGradientPaint(/*TODO*/);
void setLinearGradientPaint(/*TODO*/);
void setRadialGradientPaint(/*TODO*/);
void setTexturePaint(/*TODO*/);
// BufferedImageOp-related ops
void enableConvolveOp(/*TODO*/);
void disableConvolveOp();
void enableRescaleOp(/*TODO*/);
void disableRescaleOp();
void enableLookupOp();
void disableLookupOp();
};
#endif //__cplusplus
#endif //VKRenderer_h_Included

View File

@@ -0,0 +1,47 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, JetBrains s.r.o.. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "VKShader.h"
// Inline bytecode of all shaders
#define INCLUDE_BYTECODE
#define SHADER_ENTRY(NAME, TYPE) static uint32_t NAME ## _ ## TYPE ## _data[] = {
#define BYTECODE_END };
#include "vulkan/shader_list.h"
#undef INCLUDE_BYTECODE
#undef SHADER_ENTRY
#undef BYTECODE_END
void VKShaders::init(const vk::raii::Device& device) {
// Declare file extensions as stage flags
auto vert = vk::ShaderStageFlagBits::eVertex;
auto frag = vk::ShaderStageFlagBits::eFragment;
// Init all shader modules
# define SHADER_ENTRY(NAME, TYPE) \
NAME ## _ ## TYPE.init(device, sizeof NAME ## _ ## TYPE ## _data, NAME ## _ ## TYPE ## _data, TYPE);
# include "vulkan/shader_list.h"
# undef SHADER_ENTRY
}

View File

@@ -0,0 +1,57 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, JetBrains s.r.o.. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef VKShader_h_Included
#define VKShader_h_Included
#define VK_NO_PROTOTYPES
#define VULKAN_HPP_NO_DEFAULT_DISPATCHER
#include <vulkan/vulkan_raii.hpp>
class VKShader : public vk::raii::ShaderModule {
friend struct VKShaders;
vk::ShaderStageFlagBits _stage;
void init(const vk::raii::Device& device, size_t size, const uint32_t* data, vk::ShaderStageFlagBits stage) {
*((vk::raii::ShaderModule*) this) = device.createShaderModule({{}, size, data});
_stage = stage;
}
public:
VKShader() : vk::raii::ShaderModule(nullptr), _stage() {}
vk::PipelineShaderStageCreateInfo stage(const vk::SpecializationInfo *specializationInfo = nullptr) {
return vk::PipelineShaderStageCreateInfo {{}, _stage, **this, "main", specializationInfo};
}
};
struct VKShaders {
// Actual list of shaders is autogenerated from source file names
# define SHADER_ENTRY(NAME, TYPE) VKShader NAME ## _ ## TYPE;
# include "vulkan/shader_list.h"
# undef SHADER_ENTRY
void init(const vk::raii::Device& device);
};
#endif //VKShader_h_Included

View File

@@ -32,8 +32,4 @@ jboolean VK_Init() {
return 0;
}
jint VK_MaxTextureSize() {
return 0;
}
#endif

View File

@@ -0,0 +1,211 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, JetBrains s.r.o.. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "jni_util.h"
#include "Disposer.h"
#include "Trace.h"
#include "VKSurfaceData.h"
#include "VKRenderer.h"
void VKSurfaceData::attachToJavaSurface(JNIEnv *env, jobject javaSurfaceData) {
// SurfaceData utility functions operate on C structures and malloc/free,
// But we are using C++ classes, so set up the disposer manually.
// This is a C++ analogue of SurfaceData_InitOps / SurfaceData_SetOps.
jboolean exception = false;
if (JNU_GetFieldByName(env, &exception, javaSurfaceData, "pData", "J").j == 0 && !exception) {
jlong ptr = ptr_to_jlong((SurfaceDataOps*) this);
JNU_SetFieldByName(env, &exception, javaSurfaceData, "pData", "J", ptr);
/* Register the data for disposal */
Disposer_AddRecord(env, javaSurfaceData, [](JNIEnv *env, jlong ops) {
if (ops != 0) {
auto sd = (SurfaceDataOps*)jlong_to_ptr(ops);
jobject sdObject = sd->sdObject;
sd->Dispose(env, sd);
if (sdObject != nullptr) {
env->DeleteWeakGlobalRef(sdObject);
}
}
}, ptr);
} else if (!exception) {
throw std::runtime_error("Attempting to set SurfaceData ops twice");
}
if (exception) {
throw std::runtime_error("VKSurfaceData::attachToJavaSurface error");
}
sdObject = env->NewWeakGlobalRef(javaSurfaceData);
}
VKSurfaceData::VKSurfaceData(uint32_t w, uint32_t h, uint32_t s, uint32_t bgc)
: SurfaceDataOps(), _width(w), _height(h), _scale(s), _bg_color(bgc), _device(nullptr) {
Lock = [](JNIEnv *env, SurfaceDataOps *ops, SurfaceDataRasInfo *rasInfo, jint lockFlags) {
((VKSurfaceData*) ops)->_mutex.lock();
return SD_SUCCESS;
};
Unlock = [](JNIEnv *env, SurfaceDataOps *ops, SurfaceDataRasInfo *rasInfo) {
((VKSurfaceData*) ops)->_mutex.unlock();
};
Dispose = [](JNIEnv *env, SurfaceDataOps *ops) {
delete (VKSurfaceData*) ops;
};
}
bool VKSurfaceData::barrier(VKRecorder& recorder, vk::Image image,
vk::PipelineStageFlags stage, vk::AccessFlags access, vk::ImageLayout layout) {
// TODO consider write/read access
if (_lastStage != stage || _lastAccess != access || _layout != layout) {
if (_device->synchronization2()) {
vk::ImageMemoryBarrier2KHR barrier {
(vk::PipelineStageFlags2KHR) (VkFlags) _lastStage,
(vk::AccessFlags2KHR) (VkFlags) _lastAccess,
(vk::PipelineStageFlags2KHR) (VkFlags) stage,
(vk::AccessFlags2KHR) (VkFlags) access,
_layout, layout,
VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED,
image, vk::ImageSubresourceRange {vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1}
};
recorder.record(false).pipelineBarrier2KHR(vk::DependencyInfoKHR {{}, {}, {}, barrier});
} else {
vk::ImageMemoryBarrier barrier {
_lastAccess, access,
_layout, layout,
VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED,
image, vk::ImageSubresourceRange {vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1}
};
recorder.record(false).pipelineBarrier(_lastStage, stage, {}, {}, {}, barrier);
}
_lastStage = stage;
_lastAccess = access;
_layout = layout;
// TODO check write access
return true;
} else return false;
}
void VKSwapchainSurfaceData::revalidate(uint32_t w, uint32_t h, uint32_t s) {
if (*_swapchain && s == scale() && w == width() && h == height() ) {
J2dTraceLn1(J2D_TRACE_INFO,
"VKSwapchainSurfaceData_revalidate is skipped: swapchain(%p)",
*_swapchain);
return;
}
VKSurfaceData::revalidate(w, h, s);
if (!_device || !*_surface) {
J2dTraceLn2(J2D_TRACE_ERROR,
"VKSwapchainSurfaceData_revalidate is skipped: device(%p) surface(%p)",
_device, *_surface);
return;
}
vk::SurfaceCapabilitiesKHR surfaceCapabilities = device().getSurfaceCapabilitiesKHR(*_surface);
_format = vk::Format::eB8G8R8A8Unorm; // TODO?
// TODO all these parameters must be checked against device & surface capabilities
vk::SwapchainCreateInfoKHR swapchainCreateInfo{
{},
*_surface,
surfaceCapabilities.minImageCount,
format(),
vk::ColorSpaceKHR::eVkColorspaceSrgbNonlinear,
{width(), height()}, // TODO According to spec we need to use surfaceCapabilities.currentExtent, which is not available at this point (it gives -1)... We'll figure this out later
1,
vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferDst,
vk::SharingMode::eExclusive,
0,
nullptr,
vk::SurfaceTransformFlagBitsKHR::eIdentity,
vk::CompositeAlphaFlagBitsKHR::eOpaque,
vk::PresentModeKHR::eImmediate,
true, *_swapchain
};
device().waitIdle(); // TODO proper synchronization in case there are rendering operations for old swapchain in flight
_images.clear();
_swapchain = device().createSwapchainKHR(swapchainCreateInfo);
for (vk::Image image : _swapchain.getImages()) {
_images.push_back({image, device().createImageView(vk::ImageViewCreateInfo {
{}, image, vk::ImageViewType::e2D, format(), {},
vk::ImageSubresourceRange {vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1}
})});
if (!device().dynamicRendering()) {
_images.back().framebuffer = device().createFramebuffer(vk::FramebufferCreateInfo{
/*flags*/ {},
/*renderPass*/ *device().pipelines().renderPass,
/*attachmentCount*/ 1,
/*pAttachments*/ &*_images.back().view,
/*width*/ width(),
/*height*/ height(),
/*layers*/ 1
});
}
}
_currentImage = (uint32_t) -1;
_freeSemaphore = nullptr;
// TODO Now we need to repaint our surface... How is it done? No idea
}
VKSurfaceImage VKSwapchainSurfaceData::access(VKRecorder& recorder,
vk::PipelineStageFlags stage,
vk::AccessFlags access,
vk::ImageLayout layout) {
// Acquire image
bool semaphorePending = false;
if (_currentImage == (uint32_t) -1) {
if (!*_freeSemaphore) {
_freeSemaphore = device().createSemaphore({});
}
auto img = _swapchain.acquireNextImage(-1, *_freeSemaphore, nullptr);
vk::resultCheck(img.first, "vk::SwapchainKHR::acquireNextImage");
_layout = vk::ImageLayout::eUndefined;
_lastStage = _lastWriteStage = vk::PipelineStageFlagBits::eTopOfPipe;
_lastAccess = _lastWriteAccess = {};
_currentImage = (int) img.second;
std::swap(_images[_currentImage].semaphore, _freeSemaphore);
semaphorePending = true;
}
// Insert barrier & semaphore
auto& current = _images[_currentImage];
barrier(recorder, current.image, stage, access, layout);
if (semaphorePending) {
recorder.waitSemaphore(*current.semaphore,
vk::PipelineStageFlagBits::eColorAttachmentOutput | vk::PipelineStageFlagBits::eTransfer);
}
return {current.image, *current.view, *current.framebuffer};
}
void VKSwapchainSurfaceData::flush(VKRecorder& recorder) {
if (_currentImage == (uint32_t) -1) {
return; // Nothing to flush
}
recorder.record(true); // Force flush current render pass before accessing image with present layout.
access(recorder, vk::PipelineStageFlagBits::eTopOfPipe, {}, vk::ImageLayout::ePresentSrcKHR);
auto& current = _images[_currentImage];
recorder.signalSemaphore(*current.semaphore);
recorder.flush();
device().queue().presentKHR(vk::PresentInfoKHR {
*current.semaphore, *_swapchain, _currentImage
});
_currentImage = (uint32_t) -1;
}

View File

@@ -27,62 +27,10 @@
#ifndef VKSurfaceData_h_Included
#define VKSurfaceData_h_Included
#include "java_awt_image_AffineTransformOp.h"
#include "sun_java2d_pipe_hw_AccelSurface.h"
#include <mutex>
#include "jni.h"
#include "SurfaceData.h"
#include "Trace.h"
/**
* The VKSDOps structure describes a native Vulkan surface and contains all
* information pertaining to the native surface. Some information about
* the more important/different fields:
*
* void *privOps;
* Pointer to native-specific SurfaceData info, such as the
* native Drawable handle and GraphicsConfig data.
*
* jobject graphicsConfig;
* Strong reference to the VKGraphicsConfig used by this VKSurfaceData.
*
* jint drawableType;
* The surface type; can be any one of the surface type constants defined
* below (VK_WINDOW, VK_TEXTURE, etc).
*
* jboolean isOpaque;
* If true, the surface should be treated as being fully opaque.
*
* jboolean needsInit;
* If true, the surface requires some one-time initialization, which should
* be performed after a context has been made current to the surface for
* the first time.
*
* jint x/yOffset
* The offset in pixels of the Vulkan viewport origin from the lower-left
* corner of the heavyweight drawable.
*
* jint width/height;
* The cached surface bounds. For offscreen surface types (VK_RT_TEXTURE,
* VK_TEXTURE, etc.) these values must remain constant. Onscreen window
* surfaces (VK_WINDOW, etc.) may have their
* bounds changed in response to a programmatic or user-initiated event, so
* these values represent the last known dimensions. To determine the true
* current bounds of this surface, query the native Drawable through the
* privOps field.
*
*/
typedef struct _VKSDOps {
SurfaceDataOps sdOps;
void *privOps;
jobject graphicsConfig;
jint drawableType;
jboolean isOpaque;
jboolean needsInit;
jint xOffset;
jint yOffset;
jint width;
jint height;
} VKSDOps;
#include "VKBase.h"
/**
* These are shorthand names for the surface type constants defined in
@@ -92,4 +40,123 @@ typedef struct _VKSDOps {
#define VKSD_WINDOW sun_java2d_pipe_hw_AccelSurface_WINDOW
#define VKSD_TEXTURE sun_java2d_pipe_hw_AccelSurface_TEXTURE
#define VKSD_RT_TEXTURE sun_java2d_pipe_hw_AccelSurface_RT_TEXTURE
class VKRecorder;
struct VKSurfaceImage {
vk::Image image;
vk::ImageView view;
vk::Framebuffer framebuffer; // Only if dynamic rendering is off.
};
class VKSurfaceData : private SurfaceDataOps {
std::recursive_mutex _mutex;
uint32_t _width;
uint32_t _height;
uint32_t _scale; // TODO Is it needed there at all?
uint32_t _bg_color;
protected:
VKDevice* _device;
vk::Format _format;
vk::ImageLayout _layout = vk::ImageLayout::eUndefined;
// We track any access and write access separately, as read-read access does not need synchronization.
vk::PipelineStageFlags _lastStage = vk::PipelineStageFlagBits::eTopOfPipe;
vk::PipelineStageFlags _lastWriteStage = vk::PipelineStageFlagBits::eTopOfPipe;
vk::AccessFlags _lastAccess = {};
vk::AccessFlags _lastWriteAccess = {};
/// Insert barrier if needed for given access and layout.
bool barrier(VKRecorder& recorder, vk::Image image,
vk::PipelineStageFlags stage, vk::AccessFlags access, vk::ImageLayout layout);
public:
VKSurfaceData(uint32_t w, uint32_t h, uint32_t s, uint32_t bgc);
// No need to move, as object must only be created with "new".
VKSurfaceData(VKSurfaceData&&) = delete;
VKSurfaceData& operator=(VKSurfaceData&&) = delete;
void attachToJavaSurface(JNIEnv *env, jobject javaSurfaceData);
VKDevice& device() const {
return *_device;
}
vk::Format format() const {
return _format;
}
uint32_t width() const {
return _width;
}
uint32_t height() const {
return _height;
}
uint32_t scale() const {
return _scale;
}
uint32_t bg_color() const {
return _bg_color;
}
void set_bg_color(uint32_t bg_color) {
if (_bg_color != bg_color) {
_bg_color = bg_color;
// TODO now we need to repaint the surface???
}
}
virtual ~VKSurfaceData() = default;
virtual void revalidate(uint32_t w, uint32_t h, uint32_t s) {
_width = w;
_height = h;
_scale = s;
}
/// Prepare image for access (necessary barriers & layout transitions).
virtual VKSurfaceImage access(VKRecorder& recorder,
vk::PipelineStageFlags stage,
vk::AccessFlags access,
vk::ImageLayout layout) = 0;
/// Flush all pending changes to the surface, including screen presentation for on-screen surfaces.
virtual void flush(VKRecorder& recorder) = 0;
};
class VKSwapchainSurfaceData : public VKSurfaceData {
struct Image {
vk::Image image;
vk::raii::ImageView view;
vk::raii::Framebuffer framebuffer = nullptr; // Only if dynamic rendering is off.
vk::raii::Semaphore semaphore = nullptr;
};
vk::raii::SurfaceKHR _surface = nullptr;
vk::raii::SwapchainKHR _swapchain = nullptr;
std::vector<Image> _images;
uint32_t _currentImage = (uint32_t) -1;
vk::raii::Semaphore _freeSemaphore = nullptr;
protected:
void reset(VKDevice& device, vk::raii::SurfaceKHR surface) {
_images.clear();
_swapchain = nullptr;
_surface = std::move(surface);
_device = &device;
}
public:
VKSwapchainSurfaceData(uint32_t w, uint32_t h, uint32_t s, uint32_t bgc)
: VKSurfaceData(w, h, s, bgc) {};
virtual void revalidate(uint32_t w, uint32_t h, uint32_t s);
virtual VKSurfaceImage access(VKRecorder& recorder,
vk::PipelineStageFlags stage,
vk::AccessFlags access,
vk::ImageLayout layout);
virtual void flush(VKRecorder& recorder);
};
#endif /* VKSurfaceData_h_Included */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,135 @@
#ifndef VULKAN_MEMORY_ALLOCATOR_HPP
#define VULKAN_MEMORY_ALLOCATOR_HPP
#if !defined(AMD_VULKAN_MEMORY_ALLOCATOR_H)
#include <vk_mem_alloc.h>
#endif
#include <vulkan/vulkan.hpp>
#if !defined(VMA_HPP_NAMESPACE)
#define VMA_HPP_NAMESPACE vma
#endif
#define VMA_HPP_NAMESPACE_STRING VULKAN_HPP_STRINGIFY(VMA_HPP_NAMESPACE)
#ifndef VULKAN_HPP_NO_SMART_HANDLE
namespace VMA_HPP_NAMESPACE {
struct Dispatcher {}; // VMA uses function pointers from VmaAllocator instead
class Allocator;
template<class T>
VULKAN_HPP_NAMESPACE::UniqueHandle<T, Dispatcher> createUniqueHandle(const T& t) VULKAN_HPP_NOEXCEPT {
return VULKAN_HPP_NAMESPACE::UniqueHandle<T, Dispatcher>(t);
}
template<class T, class O>
VULKAN_HPP_NAMESPACE::UniqueHandle<T, Dispatcher> createUniqueHandle(const T& t, const O* o) VULKAN_HPP_NOEXCEPT {
return VULKAN_HPP_NAMESPACE::UniqueHandle<T, Dispatcher>(t, o);
}
template<class F, class S, class O>
std::pair<VULKAN_HPP_NAMESPACE::UniqueHandle<F, Dispatcher>, VULKAN_HPP_NAMESPACE::UniqueHandle<S, Dispatcher>>
createUniqueHandle(const std::pair<F, S>& t, const O* o) VULKAN_HPP_NOEXCEPT {
return {
VULKAN_HPP_NAMESPACE::UniqueHandle<F, Dispatcher>(t.first, o),
VULKAN_HPP_NAMESPACE::UniqueHandle<S, Dispatcher>(t.second, o)
};
}
template<class T, class UniqueVectorAllocator, class VectorAllocator, class O>
std::vector<VULKAN_HPP_NAMESPACE::UniqueHandle<T, Dispatcher>, UniqueVectorAllocator>
createUniqueHandleVector(const std::vector<T, VectorAllocator>& vector, const O* o,
const UniqueVectorAllocator& vectorAllocator) VULKAN_HPP_NOEXCEPT {
std::vector<VULKAN_HPP_NAMESPACE::UniqueHandle<T, Dispatcher>, UniqueVectorAllocator> result(vectorAllocator);
result.reserve(vector.size());
for (const T& t : vector) result.emplace_back(t, o);
return result;
}
template<class T, class Owner> class Deleter {
const Owner* owner;
public:
Deleter() = default;
Deleter(const Owner* owner) VULKAN_HPP_NOEXCEPT : owner(owner) {}
protected:
void destroy(const T& t) VULKAN_HPP_NOEXCEPT; // Implemented manually for each handle type
};
template<class T> class Deleter<T, void> {
protected:
void destroy(const T& t) VULKAN_HPP_NOEXCEPT { t.destroy(); }
};
}
namespace VULKAN_HPP_NAMESPACE {
template<> struct UniqueHandleTraits<Buffer, VMA_HPP_NAMESPACE::Dispatcher> {
using deleter = VMA_HPP_NAMESPACE::Deleter<Buffer, VMA_HPP_NAMESPACE::Allocator>;
};
template<> struct UniqueHandleTraits<Image, VMA_HPP_NAMESPACE::Dispatcher> {
using deleter = VMA_HPP_NAMESPACE::Deleter<Image, VMA_HPP_NAMESPACE::Allocator>;
};
}
namespace VMA_HPP_NAMESPACE {
using UniqueBuffer = VULKAN_HPP_NAMESPACE::UniqueHandle<VULKAN_HPP_NAMESPACE::Buffer, Dispatcher>;
using UniqueImage = VULKAN_HPP_NAMESPACE::UniqueHandle<VULKAN_HPP_NAMESPACE::Image, Dispatcher>;
}
#endif
#include "vk_mem_alloc_enums.hpp"
#include "vk_mem_alloc_handles.hpp"
#include "vk_mem_alloc_structs.hpp"
#include "vk_mem_alloc_funcs.hpp"
namespace VMA_HPP_NAMESPACE {
#ifndef VULKAN_HPP_NO_SMART_HANDLE
# define VMA_HPP_DESTROY_IMPL(NAME) \
template<> VULKAN_HPP_INLINE void VULKAN_HPP_NAMESPACE::UniqueHandleTraits<NAME, Dispatcher>::deleter::destroy(const NAME& t) VULKAN_HPP_NOEXCEPT
VMA_HPP_DESTROY_IMPL(VULKAN_HPP_NAMESPACE::Buffer) { owner->destroyBuffer(t, nullptr); }
VMA_HPP_DESTROY_IMPL(VULKAN_HPP_NAMESPACE::Image) { owner->destroyImage(t, nullptr); }
VMA_HPP_DESTROY_IMPL(Pool) { owner->destroyPool(t); }
VMA_HPP_DESTROY_IMPL(Allocation) { owner->freeMemory(t); }
VMA_HPP_DESTROY_IMPL(VirtualAllocation) { owner->virtualFree(t); }
# undef VMA_HPP_DESTROY_IMPL
#endif
template<class InstanceDispatcher, class DeviceDispatcher>
VULKAN_HPP_CONSTEXPR VulkanFunctions functionsFromDispatcher(InstanceDispatcher const * instance,
DeviceDispatcher const * device) VULKAN_HPP_NOEXCEPT {
return VulkanFunctions {
instance->vkGetInstanceProcAddr,
instance->vkGetDeviceProcAddr,
instance->vkGetPhysicalDeviceProperties,
instance->vkGetPhysicalDeviceMemoryProperties,
device->vkAllocateMemory,
device->vkFreeMemory,
device->vkMapMemory,
device->vkUnmapMemory,
device->vkFlushMappedMemoryRanges,
device->vkInvalidateMappedMemoryRanges,
device->vkBindBufferMemory,
device->vkBindImageMemory,
device->vkGetBufferMemoryRequirements,
device->vkGetImageMemoryRequirements,
device->vkCreateBuffer,
device->vkDestroyBuffer,
device->vkCreateImage,
device->vkDestroyImage,
device->vkCmdCopyBuffer,
device->vkGetBufferMemoryRequirements2KHR ? device->vkGetBufferMemoryRequirements2KHR : device->vkGetBufferMemoryRequirements2,
device->vkGetImageMemoryRequirements2KHR ? device->vkGetImageMemoryRequirements2KHR : device->vkGetImageMemoryRequirements2,
device->vkBindBufferMemory2KHR ? device->vkBindBufferMemory2KHR : device->vkBindBufferMemory2,
device->vkBindImageMemory2KHR ? device->vkBindImageMemory2KHR : device->vkBindImageMemory2,
instance->vkGetPhysicalDeviceMemoryProperties2KHR ? instance->vkGetPhysicalDeviceMemoryProperties2KHR : instance->vkGetPhysicalDeviceMemoryProperties2,
device->vkGetDeviceBufferMemoryRequirements,
device->vkGetDeviceImageMemoryRequirements
};
}
template<class Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE>
VULKAN_HPP_CONSTEXPR VulkanFunctions functionsFromDispatcher(Dispatch const & dispatch
VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) VULKAN_HPP_NOEXCEPT {
return functionsFromDispatcher(&dispatch, &dispatch);
}
}
#endif

View File

@@ -0,0 +1,478 @@
#ifndef VULKAN_MEMORY_ALLOCATOR_ENUMS_HPP
#define VULKAN_MEMORY_ALLOCATOR_ENUMS_HPP
namespace VMA_HPP_NAMESPACE {
enum class AllocatorCreateFlagBits : VmaAllocatorCreateFlags {
eExternallySynchronized = VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT,
eKhrDedicatedAllocation = VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT,
eKhrBindMemory2 = VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT,
eExtMemoryBudget = VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT,
eAmdDeviceCoherentMemory = VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT,
eBufferDeviceAddress = VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT,
eExtMemoryPriority = VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT
};
# if !defined( VULKAN_HPP_NO_TO_STRING )
VULKAN_HPP_INLINE std::string to_string(AllocatorCreateFlagBits value) {
if (value == AllocatorCreateFlagBits::eExternallySynchronized) return "ExternallySynchronized";
if (value == AllocatorCreateFlagBits::eKhrDedicatedAllocation) return "KhrDedicatedAllocation";
if (value == AllocatorCreateFlagBits::eKhrBindMemory2) return "KhrBindMemory2";
if (value == AllocatorCreateFlagBits::eExtMemoryBudget) return "ExtMemoryBudget";
if (value == AllocatorCreateFlagBits::eAmdDeviceCoherentMemory) return "AmdDeviceCoherentMemory";
if (value == AllocatorCreateFlagBits::eBufferDeviceAddress) return "BufferDeviceAddress";
if (value == AllocatorCreateFlagBits::eExtMemoryPriority) return "ExtMemoryPriority";
return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString(static_cast<uint32_t>(value)) + " )";
}
# endif
}
namespace VULKAN_HPP_NAMESPACE {
template<> struct FlagTraits<VMA_HPP_NAMESPACE::AllocatorCreateFlagBits> {
static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true;
static VULKAN_HPP_CONST_OR_CONSTEXPR Flags<VMA_HPP_NAMESPACE::AllocatorCreateFlagBits> allFlags =
VMA_HPP_NAMESPACE::AllocatorCreateFlagBits::eExternallySynchronized
| VMA_HPP_NAMESPACE::AllocatorCreateFlagBits::eKhrDedicatedAllocation
| VMA_HPP_NAMESPACE::AllocatorCreateFlagBits::eKhrBindMemory2
| VMA_HPP_NAMESPACE::AllocatorCreateFlagBits::eExtMemoryBudget
| VMA_HPP_NAMESPACE::AllocatorCreateFlagBits::eAmdDeviceCoherentMemory
| VMA_HPP_NAMESPACE::AllocatorCreateFlagBits::eBufferDeviceAddress
| VMA_HPP_NAMESPACE::AllocatorCreateFlagBits::eExtMemoryPriority;
};
}
namespace VMA_HPP_NAMESPACE {
using AllocatorCreateFlags = VULKAN_HPP_NAMESPACE::Flags<AllocatorCreateFlagBits>;
VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AllocatorCreateFlags operator|(AllocatorCreateFlagBits bit0, AllocatorCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT {
return AllocatorCreateFlags(bit0) | bit1;
}
VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AllocatorCreateFlags operator&(AllocatorCreateFlagBits bit0, AllocatorCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT {
return AllocatorCreateFlags(bit0) & bit1;
}
VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AllocatorCreateFlags operator^(AllocatorCreateFlagBits bit0, AllocatorCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT {
return AllocatorCreateFlags(bit0) ^ bit1;
}
VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AllocatorCreateFlags operator~(AllocatorCreateFlagBits bits) VULKAN_HPP_NOEXCEPT {
return ~(AllocatorCreateFlags(bits));
}
# if !defined( VULKAN_HPP_NO_TO_STRING )
VULKAN_HPP_INLINE std::string to_string(AllocatorCreateFlags value) {
if (!value) return "{}";
std::string result;
if (value & AllocatorCreateFlagBits::eExternallySynchronized) result += "ExternallySynchronized | ";
if (value & AllocatorCreateFlagBits::eKhrDedicatedAllocation) result += "KhrDedicatedAllocation | ";
if (value & AllocatorCreateFlagBits::eKhrBindMemory2) result += "KhrBindMemory2 | ";
if (value & AllocatorCreateFlagBits::eExtMemoryBudget) result += "ExtMemoryBudget | ";
if (value & AllocatorCreateFlagBits::eAmdDeviceCoherentMemory) result += "AmdDeviceCoherentMemory | ";
if (value & AllocatorCreateFlagBits::eBufferDeviceAddress) result += "BufferDeviceAddress | ";
if (value & AllocatorCreateFlagBits::eExtMemoryPriority) result += "ExtMemoryPriority | ";
return "{ " + result.substr( 0, result.size() - 3 ) + " }";
}
# endif
}
namespace VMA_HPP_NAMESPACE {
enum class MemoryUsage {
eUnknown = VMA_MEMORY_USAGE_UNKNOWN,
eGpuOnly = VMA_MEMORY_USAGE_GPU_ONLY,
eCpuOnly = VMA_MEMORY_USAGE_CPU_ONLY,
eCpuToGpu = VMA_MEMORY_USAGE_CPU_TO_GPU,
eGpuToCpu = VMA_MEMORY_USAGE_GPU_TO_CPU,
eCpuCopy = VMA_MEMORY_USAGE_CPU_COPY,
eGpuLazilyAllocated = VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED,
eAuto = VMA_MEMORY_USAGE_AUTO,
eAutoPreferDevice = VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE,
eAutoPreferHost = VMA_MEMORY_USAGE_AUTO_PREFER_HOST
};
# if !defined( VULKAN_HPP_NO_TO_STRING )
VULKAN_HPP_INLINE std::string to_string(MemoryUsage value) {
if (value == MemoryUsage::eUnknown) return "Unknown";
if (value == MemoryUsage::eGpuOnly) return "GpuOnly";
if (value == MemoryUsage::eCpuOnly) return "CpuOnly";
if (value == MemoryUsage::eCpuToGpu) return "CpuToGpu";
if (value == MemoryUsage::eGpuToCpu) return "GpuToCpu";
if (value == MemoryUsage::eCpuCopy) return "CpuCopy";
if (value == MemoryUsage::eGpuLazilyAllocated) return "GpuLazilyAllocated";
if (value == MemoryUsage::eAuto) return "Auto";
if (value == MemoryUsage::eAutoPreferDevice) return "AutoPreferDevice";
if (value == MemoryUsage::eAutoPreferHost) return "AutoPreferHost";
return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString(static_cast<uint32_t>(value)) + " )";
}
# endif
}
namespace VMA_HPP_NAMESPACE {
enum class AllocationCreateFlagBits : VmaAllocationCreateFlags {
eDedicatedMemory = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
eNeverAllocate = VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT,
eMapped = VMA_ALLOCATION_CREATE_MAPPED_BIT,
eUserDataCopyString = VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT,
eUpperAddress = VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT,
eDontBind = VMA_ALLOCATION_CREATE_DONT_BIND_BIT,
eWithinBudget = VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT,
eCanAlias = VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT,
eHostAccessSequentialWrite = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT,
eHostAccessRandom = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT,
eHostAccessAllowTransferInstead = VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT,
eStrategyMinMemory = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT,
eStrategyMinTime = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT,
eStrategyMinOffset = VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
eStrategyBestFit = VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT,
eStrategyFirstFit = VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
};
# if !defined( VULKAN_HPP_NO_TO_STRING )
VULKAN_HPP_INLINE std::string to_string(AllocationCreateFlagBits value) {
if (value == AllocationCreateFlagBits::eDedicatedMemory) return "DedicatedMemory";
if (value == AllocationCreateFlagBits::eNeverAllocate) return "NeverAllocate";
if (value == AllocationCreateFlagBits::eMapped) return "Mapped";
if (value == AllocationCreateFlagBits::eUserDataCopyString) return "UserDataCopyString";
if (value == AllocationCreateFlagBits::eUpperAddress) return "UpperAddress";
if (value == AllocationCreateFlagBits::eDontBind) return "DontBind";
if (value == AllocationCreateFlagBits::eWithinBudget) return "WithinBudget";
if (value == AllocationCreateFlagBits::eCanAlias) return "CanAlias";
if (value == AllocationCreateFlagBits::eHostAccessSequentialWrite) return "HostAccessSequentialWrite";
if (value == AllocationCreateFlagBits::eHostAccessRandom) return "HostAccessRandom";
if (value == AllocationCreateFlagBits::eHostAccessAllowTransferInstead) return "HostAccessAllowTransferInstead";
if (value == AllocationCreateFlagBits::eStrategyMinMemory) return "StrategyMinMemory";
if (value == AllocationCreateFlagBits::eStrategyMinTime) return "StrategyMinTime";
if (value == AllocationCreateFlagBits::eStrategyMinOffset) return "StrategyMinOffset";
if (value == AllocationCreateFlagBits::eStrategyBestFit) return "StrategyBestFit";
if (value == AllocationCreateFlagBits::eStrategyFirstFit) return "StrategyFirstFit";
return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString(static_cast<uint32_t>(value)) + " )";
}
# endif
}
namespace VULKAN_HPP_NAMESPACE {
template<> struct FlagTraits<VMA_HPP_NAMESPACE::AllocationCreateFlagBits> {
static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true;
static VULKAN_HPP_CONST_OR_CONSTEXPR Flags<VMA_HPP_NAMESPACE::AllocationCreateFlagBits> allFlags =
VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eDedicatedMemory
| VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eNeverAllocate
| VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eMapped
| VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eUserDataCopyString
| VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eUpperAddress
| VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eDontBind
| VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eWithinBudget
| VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eCanAlias
| VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eHostAccessSequentialWrite
| VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eHostAccessRandom
| VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eHostAccessAllowTransferInstead
| VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eStrategyMinMemory
| VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eStrategyMinTime
| VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eStrategyMinOffset
| VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eStrategyBestFit
| VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eStrategyFirstFit;
};
}
namespace VMA_HPP_NAMESPACE {
using AllocationCreateFlags = VULKAN_HPP_NAMESPACE::Flags<AllocationCreateFlagBits>;
VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AllocationCreateFlags operator|(AllocationCreateFlagBits bit0, AllocationCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT {
return AllocationCreateFlags(bit0) | bit1;
}
VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AllocationCreateFlags operator&(AllocationCreateFlagBits bit0, AllocationCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT {
return AllocationCreateFlags(bit0) & bit1;
}
VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AllocationCreateFlags operator^(AllocationCreateFlagBits bit0, AllocationCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT {
return AllocationCreateFlags(bit0) ^ bit1;
}
VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AllocationCreateFlags operator~(AllocationCreateFlagBits bits) VULKAN_HPP_NOEXCEPT {
return ~(AllocationCreateFlags(bits));
}
# if !defined( VULKAN_HPP_NO_TO_STRING )
VULKAN_HPP_INLINE std::string to_string(AllocationCreateFlags value) {
if (!value) return "{}";
std::string result;
if (value & AllocationCreateFlagBits::eDedicatedMemory) result += "DedicatedMemory | ";
if (value & AllocationCreateFlagBits::eNeverAllocate) result += "NeverAllocate | ";
if (value & AllocationCreateFlagBits::eMapped) result += "Mapped | ";
if (value & AllocationCreateFlagBits::eUserDataCopyString) result += "UserDataCopyString | ";
if (value & AllocationCreateFlagBits::eUpperAddress) result += "UpperAddress | ";
if (value & AllocationCreateFlagBits::eDontBind) result += "DontBind | ";
if (value & AllocationCreateFlagBits::eWithinBudget) result += "WithinBudget | ";
if (value & AllocationCreateFlagBits::eCanAlias) result += "CanAlias | ";
if (value & AllocationCreateFlagBits::eHostAccessSequentialWrite) result += "HostAccessSequentialWrite | ";
if (value & AllocationCreateFlagBits::eHostAccessRandom) result += "HostAccessRandom | ";
if (value & AllocationCreateFlagBits::eHostAccessAllowTransferInstead) result += "HostAccessAllowTransferInstead | ";
if (value & AllocationCreateFlagBits::eStrategyMinMemory) result += "StrategyMinMemory | ";
if (value & AllocationCreateFlagBits::eStrategyMinTime) result += "StrategyMinTime | ";
if (value & AllocationCreateFlagBits::eStrategyMinOffset) result += "StrategyMinOffset | ";
if (value & AllocationCreateFlagBits::eStrategyBestFit) result += "StrategyBestFit | ";
if (value & AllocationCreateFlagBits::eStrategyFirstFit) result += "StrategyFirstFit | ";
return "{ " + result.substr( 0, result.size() - 3 ) + " }";
}
# endif
}
namespace VMA_HPP_NAMESPACE {
enum class PoolCreateFlagBits : VmaPoolCreateFlags {
eIgnoreBufferImageGranularity = VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT,
eLinearAlgorithm = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
};
# if !defined( VULKAN_HPP_NO_TO_STRING )
VULKAN_HPP_INLINE std::string to_string(PoolCreateFlagBits value) {
if (value == PoolCreateFlagBits::eIgnoreBufferImageGranularity) return "IgnoreBufferImageGranularity";
if (value == PoolCreateFlagBits::eLinearAlgorithm) return "LinearAlgorithm";
return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString(static_cast<uint32_t>(value)) + " )";
}
# endif
}
namespace VULKAN_HPP_NAMESPACE {
template<> struct FlagTraits<VMA_HPP_NAMESPACE::PoolCreateFlagBits> {
static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true;
static VULKAN_HPP_CONST_OR_CONSTEXPR Flags<VMA_HPP_NAMESPACE::PoolCreateFlagBits> allFlags =
VMA_HPP_NAMESPACE::PoolCreateFlagBits::eIgnoreBufferImageGranularity
| VMA_HPP_NAMESPACE::PoolCreateFlagBits::eLinearAlgorithm;
};
}
namespace VMA_HPP_NAMESPACE {
using PoolCreateFlags = VULKAN_HPP_NAMESPACE::Flags<PoolCreateFlagBits>;
VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PoolCreateFlags operator|(PoolCreateFlagBits bit0, PoolCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT {
return PoolCreateFlags(bit0) | bit1;
}
VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PoolCreateFlags operator&(PoolCreateFlagBits bit0, PoolCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT {
return PoolCreateFlags(bit0) & bit1;
}
VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PoolCreateFlags operator^(PoolCreateFlagBits bit0, PoolCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT {
return PoolCreateFlags(bit0) ^ bit1;
}
VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PoolCreateFlags operator~(PoolCreateFlagBits bits) VULKAN_HPP_NOEXCEPT {
return ~(PoolCreateFlags(bits));
}
# if !defined( VULKAN_HPP_NO_TO_STRING )
VULKAN_HPP_INLINE std::string to_string(PoolCreateFlags value) {
if (!value) return "{}";
std::string result;
if (value & PoolCreateFlagBits::eIgnoreBufferImageGranularity) result += "IgnoreBufferImageGranularity | ";
if (value & PoolCreateFlagBits::eLinearAlgorithm) result += "LinearAlgorithm | ";
return "{ " + result.substr( 0, result.size() - 3 ) + " }";
}
# endif
}
namespace VMA_HPP_NAMESPACE {
enum class DefragmentationFlagBits : VmaDefragmentationFlags {
eFlagAlgorithmFast = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT,
eFlagAlgorithmBalanced = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT,
eFlagAlgorithmFull = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT,
eFlagAlgorithmExtensive = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT
};
# if !defined( VULKAN_HPP_NO_TO_STRING )
VULKAN_HPP_INLINE std::string to_string(DefragmentationFlagBits value) {
if (value == DefragmentationFlagBits::eFlagAlgorithmFast) return "FlagAlgorithmFast";
if (value == DefragmentationFlagBits::eFlagAlgorithmBalanced) return "FlagAlgorithmBalanced";
if (value == DefragmentationFlagBits::eFlagAlgorithmFull) return "FlagAlgorithmFull";
if (value == DefragmentationFlagBits::eFlagAlgorithmExtensive) return "FlagAlgorithmExtensive";
return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString(static_cast<uint32_t>(value)) + " )";
}
# endif
}
namespace VULKAN_HPP_NAMESPACE {
template<> struct FlagTraits<VMA_HPP_NAMESPACE::DefragmentationFlagBits> {
static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true;
static VULKAN_HPP_CONST_OR_CONSTEXPR Flags<VMA_HPP_NAMESPACE::DefragmentationFlagBits> allFlags =
VMA_HPP_NAMESPACE::DefragmentationFlagBits::eFlagAlgorithmFast
| VMA_HPP_NAMESPACE::DefragmentationFlagBits::eFlagAlgorithmBalanced
| VMA_HPP_NAMESPACE::DefragmentationFlagBits::eFlagAlgorithmFull
| VMA_HPP_NAMESPACE::DefragmentationFlagBits::eFlagAlgorithmExtensive;
};
}
namespace VMA_HPP_NAMESPACE {
using DefragmentationFlags = VULKAN_HPP_NAMESPACE::Flags<DefragmentationFlagBits>;
VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DefragmentationFlags operator|(DefragmentationFlagBits bit0, DefragmentationFlagBits bit1) VULKAN_HPP_NOEXCEPT {
return DefragmentationFlags(bit0) | bit1;
}
VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DefragmentationFlags operator&(DefragmentationFlagBits bit0, DefragmentationFlagBits bit1) VULKAN_HPP_NOEXCEPT {
return DefragmentationFlags(bit0) & bit1;
}
VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DefragmentationFlags operator^(DefragmentationFlagBits bit0, DefragmentationFlagBits bit1) VULKAN_HPP_NOEXCEPT {
return DefragmentationFlags(bit0) ^ bit1;
}
VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DefragmentationFlags operator~(DefragmentationFlagBits bits) VULKAN_HPP_NOEXCEPT {
return ~(DefragmentationFlags(bits));
}
# if !defined( VULKAN_HPP_NO_TO_STRING )
VULKAN_HPP_INLINE std::string to_string(DefragmentationFlags value) {
if (!value) return "{}";
std::string result;
if (value & DefragmentationFlagBits::eFlagAlgorithmFast) result += "FlagAlgorithmFast | ";
if (value & DefragmentationFlagBits::eFlagAlgorithmBalanced) result += "FlagAlgorithmBalanced | ";
if (value & DefragmentationFlagBits::eFlagAlgorithmFull) result += "FlagAlgorithmFull | ";
if (value & DefragmentationFlagBits::eFlagAlgorithmExtensive) result += "FlagAlgorithmExtensive | ";
return "{ " + result.substr( 0, result.size() - 3 ) + " }";
}
# endif
}
namespace VMA_HPP_NAMESPACE {
enum class DefragmentationMoveOperation {
eCopy = VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY,
eIgnore = VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE,
eDestroy = VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY
};
# if !defined( VULKAN_HPP_NO_TO_STRING )
VULKAN_HPP_INLINE std::string to_string(DefragmentationMoveOperation value) {
if (value == DefragmentationMoveOperation::eCopy) return "Copy";
if (value == DefragmentationMoveOperation::eIgnore) return "Ignore";
if (value == DefragmentationMoveOperation::eDestroy) return "Destroy";
return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString(static_cast<uint32_t>(value)) + " )";
}
# endif
}
namespace VMA_HPP_NAMESPACE {
enum class VirtualBlockCreateFlagBits : VmaVirtualBlockCreateFlags {
eLinearAlgorithm = VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT
};
# if !defined( VULKAN_HPP_NO_TO_STRING )
VULKAN_HPP_INLINE std::string to_string(VirtualBlockCreateFlagBits value) {
if (value == VirtualBlockCreateFlagBits::eLinearAlgorithm) return "LinearAlgorithm";
return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString(static_cast<uint32_t>(value)) + " )";
}
# endif
}
namespace VULKAN_HPP_NAMESPACE {
template<> struct FlagTraits<VMA_HPP_NAMESPACE::VirtualBlockCreateFlagBits> {
static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true;
static VULKAN_HPP_CONST_OR_CONSTEXPR Flags<VMA_HPP_NAMESPACE::VirtualBlockCreateFlagBits> allFlags =
VMA_HPP_NAMESPACE::VirtualBlockCreateFlagBits::eLinearAlgorithm;
};
}
namespace VMA_HPP_NAMESPACE {
using VirtualBlockCreateFlags = VULKAN_HPP_NAMESPACE::Flags<VirtualBlockCreateFlagBits>;
VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR VirtualBlockCreateFlags operator|(VirtualBlockCreateFlagBits bit0, VirtualBlockCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT {
return VirtualBlockCreateFlags(bit0) | bit1;
}
VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR VirtualBlockCreateFlags operator&(VirtualBlockCreateFlagBits bit0, VirtualBlockCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT {
return VirtualBlockCreateFlags(bit0) & bit1;
}
VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR VirtualBlockCreateFlags operator^(VirtualBlockCreateFlagBits bit0, VirtualBlockCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT {
return VirtualBlockCreateFlags(bit0) ^ bit1;
}
VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR VirtualBlockCreateFlags operator~(VirtualBlockCreateFlagBits bits) VULKAN_HPP_NOEXCEPT {
return ~(VirtualBlockCreateFlags(bits));
}
# if !defined( VULKAN_HPP_NO_TO_STRING )
VULKAN_HPP_INLINE std::string to_string(VirtualBlockCreateFlags value) {
if (!value) return "{}";
std::string result;
if (value & VirtualBlockCreateFlagBits::eLinearAlgorithm) result += "LinearAlgorithm | ";
return "{ " + result.substr( 0, result.size() - 3 ) + " }";
}
# endif
}
namespace VMA_HPP_NAMESPACE {
enum class VirtualAllocationCreateFlagBits : VmaVirtualAllocationCreateFlags {
eUpperAddress = VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT,
eStrategyMinMemory = VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT,
eStrategyMinTime = VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT,
eStrategyMinOffset = VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT
};
# if !defined( VULKAN_HPP_NO_TO_STRING )
VULKAN_HPP_INLINE std::string to_string(VirtualAllocationCreateFlagBits value) {
if (value == VirtualAllocationCreateFlagBits::eUpperAddress) return "UpperAddress";
if (value == VirtualAllocationCreateFlagBits::eStrategyMinMemory) return "StrategyMinMemory";
if (value == VirtualAllocationCreateFlagBits::eStrategyMinTime) return "StrategyMinTime";
if (value == VirtualAllocationCreateFlagBits::eStrategyMinOffset) return "StrategyMinOffset";
return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString(static_cast<uint32_t>(value)) + " )";
}
# endif
}
namespace VULKAN_HPP_NAMESPACE {
template<> struct FlagTraits<VMA_HPP_NAMESPACE::VirtualAllocationCreateFlagBits> {
static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true;
static VULKAN_HPP_CONST_OR_CONSTEXPR Flags<VMA_HPP_NAMESPACE::VirtualAllocationCreateFlagBits> allFlags =
VMA_HPP_NAMESPACE::VirtualAllocationCreateFlagBits::eUpperAddress
| VMA_HPP_NAMESPACE::VirtualAllocationCreateFlagBits::eStrategyMinMemory
| VMA_HPP_NAMESPACE::VirtualAllocationCreateFlagBits::eStrategyMinTime
| VMA_HPP_NAMESPACE::VirtualAllocationCreateFlagBits::eStrategyMinOffset;
};
}
namespace VMA_HPP_NAMESPACE {
using VirtualAllocationCreateFlags = VULKAN_HPP_NAMESPACE::Flags<VirtualAllocationCreateFlagBits>;
VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR VirtualAllocationCreateFlags operator|(VirtualAllocationCreateFlagBits bit0, VirtualAllocationCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT {
return VirtualAllocationCreateFlags(bit0) | bit1;
}
VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR VirtualAllocationCreateFlags operator&(VirtualAllocationCreateFlagBits bit0, VirtualAllocationCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT {
return VirtualAllocationCreateFlags(bit0) & bit1;
}
VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR VirtualAllocationCreateFlags operator^(VirtualAllocationCreateFlagBits bit0, VirtualAllocationCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT {
return VirtualAllocationCreateFlags(bit0) ^ bit1;
}
VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR VirtualAllocationCreateFlags operator~(VirtualAllocationCreateFlagBits bits) VULKAN_HPP_NOEXCEPT {
return ~(VirtualAllocationCreateFlags(bits));
}
# if !defined( VULKAN_HPP_NO_TO_STRING )
VULKAN_HPP_INLINE std::string to_string(VirtualAllocationCreateFlags value) {
if (!value) return "{}";
std::string result;
if (value & VirtualAllocationCreateFlagBits::eUpperAddress) result += "UpperAddress | ";
if (value & VirtualAllocationCreateFlagBits::eStrategyMinMemory) result += "StrategyMinMemory | ";
if (value & VirtualAllocationCreateFlagBits::eStrategyMinTime) result += "StrategyMinTime | ";
if (value & VirtualAllocationCreateFlagBits::eStrategyMinOffset) result += "StrategyMinOffset | ";
return "{ " + result.substr( 0, result.size() - 3 ) + " }";
}
# endif
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,935 @@
#ifndef VULKAN_MEMORY_ALLOCATOR_HANDLES_HPP
#define VULKAN_MEMORY_ALLOCATOR_HANDLES_HPP
namespace VMA_HPP_NAMESPACE {
struct DeviceMemoryCallbacks;
struct VulkanFunctions;
struct AllocatorCreateInfo;
struct AllocatorInfo;
struct Statistics;
struct DetailedStatistics;
struct TotalStatistics;
struct Budget;
struct AllocationCreateInfo;
struct PoolCreateInfo;
struct AllocationInfo;
struct DefragmentationInfo;
struct DefragmentationMove;
struct DefragmentationPassMoveInfo;
struct DefragmentationStats;
struct VirtualBlockCreateInfo;
struct VirtualAllocationCreateInfo;
struct VirtualAllocationInfo;
class Allocator;
class Pool;
class Allocation;
class DefragmentationContext;
class VirtualAllocation;
class VirtualBlock;
}
namespace VMA_HPP_NAMESPACE {
class Pool {
public:
using CType = VmaPool;
using NativeType = VmaPool;
public:
VULKAN_HPP_CONSTEXPR Pool() = default;
VULKAN_HPP_CONSTEXPR Pool(std::nullptr_t) VULKAN_HPP_NOEXCEPT {}
VULKAN_HPP_TYPESAFE_EXPLICIT Pool(VmaPool pool) VULKAN_HPP_NOEXCEPT : m_pool(pool) {}
#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
Pool& operator=(VmaPool pool) VULKAN_HPP_NOEXCEPT {
m_pool = pool;
return *this;
}
#endif
Pool& operator=(std::nullptr_t) VULKAN_HPP_NOEXCEPT {
m_pool = {};
return *this;
}
#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR )
auto operator<=>(Pool const &) const = default;
#else
bool operator==(Pool const & rhs) const VULKAN_HPP_NOEXCEPT {
return m_pool == rhs.m_pool;
}
#endif
VULKAN_HPP_TYPESAFE_EXPLICIT operator VmaPool() const VULKAN_HPP_NOEXCEPT {
return m_pool;
}
explicit operator bool() const VULKAN_HPP_NOEXCEPT {
return m_pool != VK_NULL_HANDLE;
}
bool operator!() const VULKAN_HPP_NOEXCEPT {
return m_pool == VK_NULL_HANDLE;
}
private:
VmaPool m_pool = {};
};
VULKAN_HPP_STATIC_ASSERT(sizeof(Pool) == sizeof(VmaPool),
"handle and wrapper have different size!");
}
#ifndef VULKAN_HPP_NO_SMART_HANDLE
namespace VULKAN_HPP_NAMESPACE {
template<> class UniqueHandleTraits<VMA_HPP_NAMESPACE::Pool, VMA_HPP_NAMESPACE::Dispatcher> {
public:
using deleter = VMA_HPP_NAMESPACE::Deleter<VMA_HPP_NAMESPACE::Pool, VMA_HPP_NAMESPACE::Allocator>;
};
}
namespace VMA_HPP_NAMESPACE { using UniquePool = VULKAN_HPP_NAMESPACE::UniqueHandle<Pool, Dispatcher>; }
#endif
namespace VMA_HPP_NAMESPACE {
class Allocation {
public:
using CType = VmaAllocation;
using NativeType = VmaAllocation;
public:
VULKAN_HPP_CONSTEXPR Allocation() = default;
VULKAN_HPP_CONSTEXPR Allocation(std::nullptr_t) VULKAN_HPP_NOEXCEPT {}
VULKAN_HPP_TYPESAFE_EXPLICIT Allocation(VmaAllocation allocation) VULKAN_HPP_NOEXCEPT : m_allocation(allocation) {}
#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
Allocation& operator=(VmaAllocation allocation) VULKAN_HPP_NOEXCEPT {
m_allocation = allocation;
return *this;
}
#endif
Allocation& operator=(std::nullptr_t) VULKAN_HPP_NOEXCEPT {
m_allocation = {};
return *this;
}
#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR )
auto operator<=>(Allocation const &) const = default;
#else
bool operator==(Allocation const & rhs) const VULKAN_HPP_NOEXCEPT {
return m_allocation == rhs.m_allocation;
}
#endif
VULKAN_HPP_TYPESAFE_EXPLICIT operator VmaAllocation() const VULKAN_HPP_NOEXCEPT {
return m_allocation;
}
explicit operator bool() const VULKAN_HPP_NOEXCEPT {
return m_allocation != VK_NULL_HANDLE;
}
bool operator!() const VULKAN_HPP_NOEXCEPT {
return m_allocation == VK_NULL_HANDLE;
}
private:
VmaAllocation m_allocation = {};
};
VULKAN_HPP_STATIC_ASSERT(sizeof(Allocation) == sizeof(VmaAllocation),
"handle and wrapper have different size!");
}
#ifndef VULKAN_HPP_NO_SMART_HANDLE
namespace VULKAN_HPP_NAMESPACE {
template<> class UniqueHandleTraits<VMA_HPP_NAMESPACE::Allocation, VMA_HPP_NAMESPACE::Dispatcher> {
public:
using deleter = VMA_HPP_NAMESPACE::Deleter<VMA_HPP_NAMESPACE::Allocation, VMA_HPP_NAMESPACE::Allocator>;
};
}
namespace VMA_HPP_NAMESPACE { using UniqueAllocation = VULKAN_HPP_NAMESPACE::UniqueHandle<Allocation, Dispatcher>; }
#endif
namespace VMA_HPP_NAMESPACE {
class DefragmentationContext {
public:
using CType = VmaDefragmentationContext;
using NativeType = VmaDefragmentationContext;
public:
VULKAN_HPP_CONSTEXPR DefragmentationContext() = default;
VULKAN_HPP_CONSTEXPR DefragmentationContext(std::nullptr_t) VULKAN_HPP_NOEXCEPT {}
VULKAN_HPP_TYPESAFE_EXPLICIT DefragmentationContext(VmaDefragmentationContext defragmentationContext) VULKAN_HPP_NOEXCEPT : m_defragmentationContext(defragmentationContext) {}
#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
DefragmentationContext& operator=(VmaDefragmentationContext defragmentationContext) VULKAN_HPP_NOEXCEPT {
m_defragmentationContext = defragmentationContext;
return *this;
}
#endif
DefragmentationContext& operator=(std::nullptr_t) VULKAN_HPP_NOEXCEPT {
m_defragmentationContext = {};
return *this;
}
#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR )
auto operator<=>(DefragmentationContext const &) const = default;
#else
bool operator==(DefragmentationContext const & rhs) const VULKAN_HPP_NOEXCEPT {
return m_defragmentationContext == rhs.m_defragmentationContext;
}
#endif
VULKAN_HPP_TYPESAFE_EXPLICIT operator VmaDefragmentationContext() const VULKAN_HPP_NOEXCEPT {
return m_defragmentationContext;
}
explicit operator bool() const VULKAN_HPP_NOEXCEPT {
return m_defragmentationContext != VK_NULL_HANDLE;
}
bool operator!() const VULKAN_HPP_NOEXCEPT {
return m_defragmentationContext == VK_NULL_HANDLE;
}
private:
VmaDefragmentationContext m_defragmentationContext = {};
};
VULKAN_HPP_STATIC_ASSERT(sizeof(DefragmentationContext) == sizeof(VmaDefragmentationContext),
"handle and wrapper have different size!");
}
#ifndef VULKAN_HPP_NO_SMART_HANDLE
namespace VULKAN_HPP_NAMESPACE {
template<> class UniqueHandleTraits<VMA_HPP_NAMESPACE::DefragmentationContext, VMA_HPP_NAMESPACE::Dispatcher> {
public:
using deleter = VMA_HPP_NAMESPACE::Deleter<VMA_HPP_NAMESPACE::DefragmentationContext, void>;
};
}
namespace VMA_HPP_NAMESPACE { using UniqueDefragmentationContext = VULKAN_HPP_NAMESPACE::UniqueHandle<DefragmentationContext, Dispatcher>; }
#endif
namespace VMA_HPP_NAMESPACE {
class Allocator {
public:
using CType = VmaAllocator;
using NativeType = VmaAllocator;
public:
VULKAN_HPP_CONSTEXPR Allocator() = default;
VULKAN_HPP_CONSTEXPR Allocator(std::nullptr_t) VULKAN_HPP_NOEXCEPT {}
VULKAN_HPP_TYPESAFE_EXPLICIT Allocator(VmaAllocator allocator) VULKAN_HPP_NOEXCEPT : m_allocator(allocator) {}
#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
Allocator& operator=(VmaAllocator allocator) VULKAN_HPP_NOEXCEPT {
m_allocator = allocator;
return *this;
}
#endif
Allocator& operator=(std::nullptr_t) VULKAN_HPP_NOEXCEPT {
m_allocator = {};
return *this;
}
#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR )
auto operator<=>(Allocator const &) const = default;
#else
bool operator==(Allocator const & rhs) const VULKAN_HPP_NOEXCEPT {
return m_allocator == rhs.m_allocator;
}
#endif
VULKAN_HPP_TYPESAFE_EXPLICIT operator VmaAllocator() const VULKAN_HPP_NOEXCEPT {
return m_allocator;
}
explicit operator bool() const VULKAN_HPP_NOEXCEPT {
return m_allocator != VK_NULL_HANDLE;
}
bool operator!() const VULKAN_HPP_NOEXCEPT {
return m_allocator == VK_NULL_HANDLE;
}
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
void destroy() const;
#else
void destroy() const;
#endif
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS AllocatorInfo getAllocatorInfo() const;
#endif
void getAllocatorInfo(AllocatorInfo* allocatorInfo) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS const VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties* getPhysicalDeviceProperties() const;
#endif
void getPhysicalDeviceProperties(const VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties** physicalDeviceProperties) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS const VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties* getMemoryProperties() const;
#endif
void getMemoryProperties(const VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties** physicalDeviceMemoryProperties) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_NAMESPACE::MemoryPropertyFlags getMemoryTypeProperties(uint32_t memoryTypeIndex) const;
#endif
void getMemoryTypeProperties(uint32_t memoryTypeIndex,
VULKAN_HPP_NAMESPACE::MemoryPropertyFlags* flags) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
void setCurrentFrameIndex(uint32_t frameIndex) const;
#else
void setCurrentFrameIndex(uint32_t frameIndex) const;
#endif
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS TotalStatistics calculateStatistics() const;
#endif
void calculateStatistics(TotalStatistics* stats) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
template<typename VectorAllocator = std::allocator<Budget>,
typename B = VectorAllocator,
typename std::enable_if<std::is_same<typename B::value_type, Budget>::value, int>::type = 0>
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS std::vector<Budget, VectorAllocator> getHeapBudgets(VectorAllocator& vectorAllocator) const;
template<typename VectorAllocator = std::allocator<Budget>>
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS std::vector<Budget, VectorAllocator> getHeapBudgets() const;
#endif
void getHeapBudgets(Budget* budgets) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<uint32_t>::type findMemoryTypeIndex(uint32_t memoryTypeBits,
const AllocationCreateInfo& allocationCreateInfo) const;
#endif
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result findMemoryTypeIndex(uint32_t memoryTypeBits,
const AllocationCreateInfo* allocationCreateInfo,
uint32_t* memoryTypeIndex) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<uint32_t>::type findMemoryTypeIndexForBufferInfo(const VULKAN_HPP_NAMESPACE::BufferCreateInfo& bufferCreateInfo,
const AllocationCreateInfo& allocationCreateInfo) const;
#endif
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result findMemoryTypeIndexForBufferInfo(const VULKAN_HPP_NAMESPACE::BufferCreateInfo* bufferCreateInfo,
const AllocationCreateInfo* allocationCreateInfo,
uint32_t* memoryTypeIndex) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<uint32_t>::type findMemoryTypeIndexForImageInfo(const VULKAN_HPP_NAMESPACE::ImageCreateInfo& imageCreateInfo,
const AllocationCreateInfo& allocationCreateInfo) const;
#endif
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result findMemoryTypeIndexForImageInfo(const VULKAN_HPP_NAMESPACE::ImageCreateInfo* imageCreateInfo,
const AllocationCreateInfo* allocationCreateInfo,
uint32_t* memoryTypeIndex) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<Pool>::type createPool(const PoolCreateInfo& createInfo) const;
#ifndef VULKAN_HPP_NO_SMART_HANDLE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<UniquePool>::type createPoolUnique(const PoolCreateInfo& createInfo) const;
#endif
#endif
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result createPool(const PoolCreateInfo* createInfo,
Pool* pool) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
void destroyPool(Pool pool) const;
#else
void destroyPool(Pool pool) const;
#endif
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS Statistics getPoolStatistics(Pool pool) const;
#endif
void getPoolStatistics(Pool pool,
Statistics* poolStats) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS DetailedStatistics calculatePoolStatistics(Pool pool) const;
#endif
void calculatePoolStatistics(Pool pool,
DetailedStatistics* poolStats) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
typename VULKAN_HPP_NAMESPACE::ResultValueType<void>::type checkPoolCorruption(Pool pool) const;
#else
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result checkPoolCorruption(Pool pool) const;
#endif
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS const char* getPoolName(Pool pool) const;
#endif
void getPoolName(Pool pool,
const char** name) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
void setPoolName(Pool pool,
const char* name) const;
#else
void setPoolName(Pool pool,
const char* name) const;
#endif
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<Allocation>::type allocateMemory(const VULKAN_HPP_NAMESPACE::MemoryRequirements& vkMemoryRequirements,
const AllocationCreateInfo& createInfo,
VULKAN_HPP_NAMESPACE::Optional<AllocationInfo> allocationInfo = nullptr) const;
#ifndef VULKAN_HPP_NO_SMART_HANDLE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<UniqueAllocation>::type allocateMemoryUnique(const VULKAN_HPP_NAMESPACE::MemoryRequirements& vkMemoryRequirements,
const AllocationCreateInfo& createInfo,
VULKAN_HPP_NAMESPACE::Optional<AllocationInfo> allocationInfo = nullptr) const;
#endif
#endif
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result allocateMemory(const VULKAN_HPP_NAMESPACE::MemoryRequirements* vkMemoryRequirements,
const AllocationCreateInfo* createInfo,
Allocation* allocation,
AllocationInfo* allocationInfo) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
template<typename VectorAllocator = std::allocator<Allocation>,
typename B = VectorAllocator,
typename std::enable_if<std::is_same<typename B::value_type, Allocation>::value, int>::type = 0>
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<std::vector<Allocation, VectorAllocator>>::type allocateMemoryPages(VULKAN_HPP_NAMESPACE::ArrayProxy<const VULKAN_HPP_NAMESPACE::MemoryRequirements> vkMemoryRequirements,
VULKAN_HPP_NAMESPACE::ArrayProxy<const AllocationCreateInfo> createInfo,
VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries<AllocationInfo> allocationInfo,
VectorAllocator& vectorAllocator) const;
template<typename VectorAllocator = std::allocator<Allocation>>
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<std::vector<Allocation, VectorAllocator>>::type allocateMemoryPages(VULKAN_HPP_NAMESPACE::ArrayProxy<const VULKAN_HPP_NAMESPACE::MemoryRequirements> vkMemoryRequirements,
VULKAN_HPP_NAMESPACE::ArrayProxy<const AllocationCreateInfo> createInfo,
VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries<AllocationInfo> allocationInfo = nullptr) const;
#ifndef VULKAN_HPP_NO_SMART_HANDLE
template<typename VectorAllocator = std::allocator<UniqueAllocation>,
typename B = VectorAllocator,
typename std::enable_if<std::is_same<typename B::value_type, UniqueAllocation>::value, int>::type = 0>
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<std::vector<UniqueAllocation, VectorAllocator>>::type allocateMemoryPagesUnique(VULKAN_HPP_NAMESPACE::ArrayProxy<const VULKAN_HPP_NAMESPACE::MemoryRequirements> vkMemoryRequirements,
VULKAN_HPP_NAMESPACE::ArrayProxy<const AllocationCreateInfo> createInfo,
VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries<AllocationInfo> allocationInfo,
VectorAllocator& vectorAllocator) const;
template<typename VectorAllocator = std::allocator<UniqueAllocation>>
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<std::vector<UniqueAllocation, VectorAllocator>>::type allocateMemoryPagesUnique(VULKAN_HPP_NAMESPACE::ArrayProxy<const VULKAN_HPP_NAMESPACE::MemoryRequirements> vkMemoryRequirements,
VULKAN_HPP_NAMESPACE::ArrayProxy<const AllocationCreateInfo> createInfo,
VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries<AllocationInfo> allocationInfo = nullptr) const;
#endif
#endif
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result allocateMemoryPages(const VULKAN_HPP_NAMESPACE::MemoryRequirements* vkMemoryRequirements,
const AllocationCreateInfo* createInfo,
size_t allocationCount,
Allocation* allocations,
AllocationInfo* allocationInfo) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<Allocation>::type allocateMemoryForBuffer(VULKAN_HPP_NAMESPACE::Buffer buffer,
const AllocationCreateInfo& createInfo,
VULKAN_HPP_NAMESPACE::Optional<AllocationInfo> allocationInfo = nullptr) const;
#ifndef VULKAN_HPP_NO_SMART_HANDLE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<UniqueAllocation>::type allocateMemoryForBufferUnique(VULKAN_HPP_NAMESPACE::Buffer buffer,
const AllocationCreateInfo& createInfo,
VULKAN_HPP_NAMESPACE::Optional<AllocationInfo> allocationInfo = nullptr) const;
#endif
#endif
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result allocateMemoryForBuffer(VULKAN_HPP_NAMESPACE::Buffer buffer,
const AllocationCreateInfo* createInfo,
Allocation* allocation,
AllocationInfo* allocationInfo) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<Allocation>::type allocateMemoryForImage(VULKAN_HPP_NAMESPACE::Image image,
const AllocationCreateInfo& createInfo,
VULKAN_HPP_NAMESPACE::Optional<AllocationInfo> allocationInfo = nullptr) const;
#ifndef VULKAN_HPP_NO_SMART_HANDLE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<UniqueAllocation>::type allocateMemoryForImageUnique(VULKAN_HPP_NAMESPACE::Image image,
const AllocationCreateInfo& createInfo,
VULKAN_HPP_NAMESPACE::Optional<AllocationInfo> allocationInfo = nullptr) const;
#endif
#endif
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result allocateMemoryForImage(VULKAN_HPP_NAMESPACE::Image image,
const AllocationCreateInfo* createInfo,
Allocation* allocation,
AllocationInfo* allocationInfo) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
void freeMemory(Allocation allocation) const;
#else
void freeMemory(Allocation allocation) const;
#endif
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
void freeMemoryPages(VULKAN_HPP_NAMESPACE::ArrayProxy<const Allocation> allocations) const;
#endif
void freeMemoryPages(size_t allocationCount,
const Allocation* allocations) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS AllocationInfo getAllocationInfo(Allocation allocation) const;
#endif
void getAllocationInfo(Allocation allocation,
AllocationInfo* allocationInfo) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
void setAllocationUserData(Allocation allocation,
void* userData) const;
#else
void setAllocationUserData(Allocation allocation,
void* userData) const;
#endif
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
void setAllocationName(Allocation allocation,
const char* name) const;
#else
void setAllocationName(Allocation allocation,
const char* name) const;
#endif
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_NAMESPACE::MemoryPropertyFlags getAllocationMemoryProperties(Allocation allocation) const;
#endif
void getAllocationMemoryProperties(Allocation allocation,
VULKAN_HPP_NAMESPACE::MemoryPropertyFlags* flags) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<void*>::type mapMemory(Allocation allocation) const;
#endif
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result mapMemory(Allocation allocation,
void** data) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
void unmapMemory(Allocation allocation) const;
#else
void unmapMemory(Allocation allocation) const;
#endif
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
typename VULKAN_HPP_NAMESPACE::ResultValueType<void>::type flushAllocation(Allocation allocation,
VULKAN_HPP_NAMESPACE::DeviceSize offset,
VULKAN_HPP_NAMESPACE::DeviceSize size) const;
#else
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result flushAllocation(Allocation allocation,
VULKAN_HPP_NAMESPACE::DeviceSize offset,
VULKAN_HPP_NAMESPACE::DeviceSize size) const;
#endif
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
typename VULKAN_HPP_NAMESPACE::ResultValueType<void>::type invalidateAllocation(Allocation allocation,
VULKAN_HPP_NAMESPACE::DeviceSize offset,
VULKAN_HPP_NAMESPACE::DeviceSize size) const;
#else
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result invalidateAllocation(Allocation allocation,
VULKAN_HPP_NAMESPACE::DeviceSize offset,
VULKAN_HPP_NAMESPACE::DeviceSize size) const;
#endif
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
typename VULKAN_HPP_NAMESPACE::ResultValueType<void>::type flushAllocations(VULKAN_HPP_NAMESPACE::ArrayProxy<const Allocation> allocations,
VULKAN_HPP_NAMESPACE::ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> offsets,
VULKAN_HPP_NAMESPACE::ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> sizes) const;
#endif
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result flushAllocations(uint32_t allocationCount,
const Allocation* allocations,
const VULKAN_HPP_NAMESPACE::DeviceSize* offsets,
const VULKAN_HPP_NAMESPACE::DeviceSize* sizes) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
typename VULKAN_HPP_NAMESPACE::ResultValueType<void>::type invalidateAllocations(VULKAN_HPP_NAMESPACE::ArrayProxy<const Allocation> allocations,
VULKAN_HPP_NAMESPACE::ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> offsets,
VULKAN_HPP_NAMESPACE::ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> sizes) const;
#endif
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result invalidateAllocations(uint32_t allocationCount,
const Allocation* allocations,
const VULKAN_HPP_NAMESPACE::DeviceSize* offsets,
const VULKAN_HPP_NAMESPACE::DeviceSize* sizes) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
typename VULKAN_HPP_NAMESPACE::ResultValueType<void>::type checkCorruption(uint32_t memoryTypeBits) const;
#else
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result checkCorruption(uint32_t memoryTypeBits) const;
#endif
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<DefragmentationContext>::type beginDefragmentation(const DefragmentationInfo& info) const;
#endif
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result beginDefragmentation(const DefragmentationInfo* info,
DefragmentationContext* context) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
void endDefragmentation(DefragmentationContext context,
VULKAN_HPP_NAMESPACE::Optional<DefragmentationStats> stats = nullptr) const;
#endif
void endDefragmentation(DefragmentationContext context,
DefragmentationStats* stats) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<DefragmentationPassMoveInfo>::type beginDefragmentationPass(DefragmentationContext context) const;
#endif
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result beginDefragmentationPass(DefragmentationContext context,
DefragmentationPassMoveInfo* passInfo) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<DefragmentationPassMoveInfo>::type endDefragmentationPass(DefragmentationContext context) const;
#endif
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result endDefragmentationPass(DefragmentationContext context,
DefragmentationPassMoveInfo* passInfo) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
typename VULKAN_HPP_NAMESPACE::ResultValueType<void>::type bindBufferMemory(Allocation allocation,
VULKAN_HPP_NAMESPACE::Buffer buffer) const;
#else
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result bindBufferMemory(Allocation allocation,
VULKAN_HPP_NAMESPACE::Buffer buffer) const;
#endif
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
typename VULKAN_HPP_NAMESPACE::ResultValueType<void>::type bindBufferMemory2(Allocation allocation,
VULKAN_HPP_NAMESPACE::DeviceSize allocationLocalOffset,
VULKAN_HPP_NAMESPACE::Buffer buffer,
const void* next) const;
#else
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result bindBufferMemory2(Allocation allocation,
VULKAN_HPP_NAMESPACE::DeviceSize allocationLocalOffset,
VULKAN_HPP_NAMESPACE::Buffer buffer,
const void* next) const;
#endif
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
typename VULKAN_HPP_NAMESPACE::ResultValueType<void>::type bindImageMemory(Allocation allocation,
VULKAN_HPP_NAMESPACE::Image image) const;
#else
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result bindImageMemory(Allocation allocation,
VULKAN_HPP_NAMESPACE::Image image) const;
#endif
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
typename VULKAN_HPP_NAMESPACE::ResultValueType<void>::type bindImageMemory2(Allocation allocation,
VULKAN_HPP_NAMESPACE::DeviceSize allocationLocalOffset,
VULKAN_HPP_NAMESPACE::Image image,
const void* next) const;
#else
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result bindImageMemory2(Allocation allocation,
VULKAN_HPP_NAMESPACE::DeviceSize allocationLocalOffset,
VULKAN_HPP_NAMESPACE::Image image,
const void* next) const;
#endif
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<std::pair<VULKAN_HPP_NAMESPACE::Buffer, Allocation>>::type createBuffer(const VULKAN_HPP_NAMESPACE::BufferCreateInfo& bufferCreateInfo,
const AllocationCreateInfo& allocationCreateInfo,
VULKAN_HPP_NAMESPACE::Optional<AllocationInfo> allocationInfo = nullptr) const;
#ifndef VULKAN_HPP_NO_SMART_HANDLE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<std::pair<UniqueBuffer, UniqueAllocation>>::type createBufferUnique(const VULKAN_HPP_NAMESPACE::BufferCreateInfo& bufferCreateInfo,
const AllocationCreateInfo& allocationCreateInfo,
VULKAN_HPP_NAMESPACE::Optional<AllocationInfo> allocationInfo = nullptr) const;
#endif
#endif
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result createBuffer(const VULKAN_HPP_NAMESPACE::BufferCreateInfo* bufferCreateInfo,
const AllocationCreateInfo* allocationCreateInfo,
VULKAN_HPP_NAMESPACE::Buffer* buffer,
Allocation* allocation,
AllocationInfo* allocationInfo) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<std::pair<VULKAN_HPP_NAMESPACE::Buffer, Allocation>>::type createBufferWithAlignment(const VULKAN_HPP_NAMESPACE::BufferCreateInfo& bufferCreateInfo,
const AllocationCreateInfo& allocationCreateInfo,
VULKAN_HPP_NAMESPACE::DeviceSize minAlignment,
VULKAN_HPP_NAMESPACE::Optional<AllocationInfo> allocationInfo = nullptr) const;
#ifndef VULKAN_HPP_NO_SMART_HANDLE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<std::pair<UniqueBuffer, UniqueAllocation>>::type createBufferWithAlignmentUnique(const VULKAN_HPP_NAMESPACE::BufferCreateInfo& bufferCreateInfo,
const AllocationCreateInfo& allocationCreateInfo,
VULKAN_HPP_NAMESPACE::DeviceSize minAlignment,
VULKAN_HPP_NAMESPACE::Optional<AllocationInfo> allocationInfo = nullptr) const;
#endif
#endif
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result createBufferWithAlignment(const VULKAN_HPP_NAMESPACE::BufferCreateInfo* bufferCreateInfo,
const AllocationCreateInfo* allocationCreateInfo,
VULKAN_HPP_NAMESPACE::DeviceSize minAlignment,
VULKAN_HPP_NAMESPACE::Buffer* buffer,
Allocation* allocation,
AllocationInfo* allocationInfo) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<VULKAN_HPP_NAMESPACE::Buffer>::type createAliasingBuffer(Allocation allocation,
const VULKAN_HPP_NAMESPACE::BufferCreateInfo& bufferCreateInfo) const;
#endif
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result createAliasingBuffer(Allocation allocation,
const VULKAN_HPP_NAMESPACE::BufferCreateInfo* bufferCreateInfo,
VULKAN_HPP_NAMESPACE::Buffer* buffer) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
void destroyBuffer(VULKAN_HPP_NAMESPACE::Buffer buffer,
Allocation allocation) const;
#else
void destroyBuffer(VULKAN_HPP_NAMESPACE::Buffer buffer,
Allocation allocation) const;
#endif
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<std::pair<VULKAN_HPP_NAMESPACE::Image, Allocation>>::type createImage(const VULKAN_HPP_NAMESPACE::ImageCreateInfo& imageCreateInfo,
const AllocationCreateInfo& allocationCreateInfo,
VULKAN_HPP_NAMESPACE::Optional<AllocationInfo> allocationInfo = nullptr) const;
#ifndef VULKAN_HPP_NO_SMART_HANDLE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<std::pair<UniqueImage, UniqueAllocation>>::type createImageUnique(const VULKAN_HPP_NAMESPACE::ImageCreateInfo& imageCreateInfo,
const AllocationCreateInfo& allocationCreateInfo,
VULKAN_HPP_NAMESPACE::Optional<AllocationInfo> allocationInfo = nullptr) const;
#endif
#endif
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result createImage(const VULKAN_HPP_NAMESPACE::ImageCreateInfo* imageCreateInfo,
const AllocationCreateInfo* allocationCreateInfo,
VULKAN_HPP_NAMESPACE::Image* image,
Allocation* allocation,
AllocationInfo* allocationInfo) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<VULKAN_HPP_NAMESPACE::Image>::type createAliasingImage(Allocation allocation,
const VULKAN_HPP_NAMESPACE::ImageCreateInfo& imageCreateInfo) const;
#endif
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result createAliasingImage(Allocation allocation,
const VULKAN_HPP_NAMESPACE::ImageCreateInfo* imageCreateInfo,
VULKAN_HPP_NAMESPACE::Image* image) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
void destroyImage(VULKAN_HPP_NAMESPACE::Image image,
Allocation allocation) const;
#else
void destroyImage(VULKAN_HPP_NAMESPACE::Image image,
Allocation allocation) const;
#endif
#if VMA_STATS_STRING_ENABLED
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS char* buildStatsString(VULKAN_HPP_NAMESPACE::Bool32 detailedMap) const;
#endif
void buildStatsString(char** statsString,
VULKAN_HPP_NAMESPACE::Bool32 detailedMap) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
void freeStatsString(char* statsString) const;
#else
void freeStatsString(char* statsString) const;
#endif
#endif
private:
VmaAllocator m_allocator = {};
};
VULKAN_HPP_STATIC_ASSERT(sizeof(Allocator) == sizeof(VmaAllocator),
"handle and wrapper have different size!");
}
#ifndef VULKAN_HPP_NO_SMART_HANDLE
namespace VULKAN_HPP_NAMESPACE {
template<> class UniqueHandleTraits<VMA_HPP_NAMESPACE::Allocator, VMA_HPP_NAMESPACE::Dispatcher> {
public:
using deleter = VMA_HPP_NAMESPACE::Deleter<VMA_HPP_NAMESPACE::Allocator, void>;
};
}
namespace VMA_HPP_NAMESPACE { using UniqueAllocator = VULKAN_HPP_NAMESPACE::UniqueHandle<Allocator, Dispatcher>; }
#endif
namespace VMA_HPP_NAMESPACE {
class VirtualAllocation {
public:
using CType = VmaVirtualAllocation;
using NativeType = VmaVirtualAllocation;
public:
VULKAN_HPP_CONSTEXPR VirtualAllocation() = default;
VULKAN_HPP_CONSTEXPR VirtualAllocation(std::nullptr_t) VULKAN_HPP_NOEXCEPT {}
VULKAN_HPP_TYPESAFE_EXPLICIT VirtualAllocation(VmaVirtualAllocation virtualAllocation) VULKAN_HPP_NOEXCEPT : m_virtualAllocation(virtualAllocation) {}
#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
VirtualAllocation& operator=(VmaVirtualAllocation virtualAllocation) VULKAN_HPP_NOEXCEPT {
m_virtualAllocation = virtualAllocation;
return *this;
}
#endif
VirtualAllocation& operator=(std::nullptr_t) VULKAN_HPP_NOEXCEPT {
m_virtualAllocation = {};
return *this;
}
#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR )
auto operator<=>(VirtualAllocation const &) const = default;
#else
bool operator==(VirtualAllocation const & rhs) const VULKAN_HPP_NOEXCEPT {
return m_virtualAllocation == rhs.m_virtualAllocation;
}
#endif
VULKAN_HPP_TYPESAFE_EXPLICIT operator VmaVirtualAllocation() const VULKAN_HPP_NOEXCEPT {
return m_virtualAllocation;
}
explicit operator bool() const VULKAN_HPP_NOEXCEPT {
return m_virtualAllocation != VK_NULL_HANDLE;
}
bool operator!() const VULKAN_HPP_NOEXCEPT {
return m_virtualAllocation == VK_NULL_HANDLE;
}
private:
VmaVirtualAllocation m_virtualAllocation = {};
};
VULKAN_HPP_STATIC_ASSERT(sizeof(VirtualAllocation) == sizeof(VmaVirtualAllocation),
"handle and wrapper have different size!");
}
#ifndef VULKAN_HPP_NO_SMART_HANDLE
namespace VULKAN_HPP_NAMESPACE {
template<> class UniqueHandleTraits<VMA_HPP_NAMESPACE::VirtualAllocation, VMA_HPP_NAMESPACE::Dispatcher> {
public:
using deleter = VMA_HPP_NAMESPACE::Deleter<VMA_HPP_NAMESPACE::VirtualAllocation, VMA_HPP_NAMESPACE::VirtualBlock>;
};
}
namespace VMA_HPP_NAMESPACE { using UniqueVirtualAllocation = VULKAN_HPP_NAMESPACE::UniqueHandle<VirtualAllocation, Dispatcher>; }
#endif
namespace VMA_HPP_NAMESPACE {
class VirtualBlock {
public:
using CType = VmaVirtualBlock;
using NativeType = VmaVirtualBlock;
public:
VULKAN_HPP_CONSTEXPR VirtualBlock() = default;
VULKAN_HPP_CONSTEXPR VirtualBlock(std::nullptr_t) VULKAN_HPP_NOEXCEPT {}
VULKAN_HPP_TYPESAFE_EXPLICIT VirtualBlock(VmaVirtualBlock virtualBlock) VULKAN_HPP_NOEXCEPT : m_virtualBlock(virtualBlock) {}
#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
VirtualBlock& operator=(VmaVirtualBlock virtualBlock) VULKAN_HPP_NOEXCEPT {
m_virtualBlock = virtualBlock;
return *this;
}
#endif
VirtualBlock& operator=(std::nullptr_t) VULKAN_HPP_NOEXCEPT {
m_virtualBlock = {};
return *this;
}
#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR )
auto operator<=>(VirtualBlock const &) const = default;
#else
bool operator==(VirtualBlock const & rhs) const VULKAN_HPP_NOEXCEPT {
return m_virtualBlock == rhs.m_virtualBlock;
}
#endif
VULKAN_HPP_TYPESAFE_EXPLICIT operator VmaVirtualBlock() const VULKAN_HPP_NOEXCEPT {
return m_virtualBlock;
}
explicit operator bool() const VULKAN_HPP_NOEXCEPT {
return m_virtualBlock != VK_NULL_HANDLE;
}
bool operator!() const VULKAN_HPP_NOEXCEPT {
return m_virtualBlock == VK_NULL_HANDLE;
}
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
void destroy() const;
#else
void destroy() const;
#endif
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_NAMESPACE::Bool32 isVirtualBlockEmpty() const;
#else
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Bool32 isVirtualBlockEmpty() const;
#endif
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VirtualAllocationInfo getVirtualAllocationInfo(VirtualAllocation allocation) const;
#endif
void getVirtualAllocationInfo(VirtualAllocation allocation,
VirtualAllocationInfo* virtualAllocInfo) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<VirtualAllocation>::type virtualAllocate(const VirtualAllocationCreateInfo& createInfo,
VULKAN_HPP_NAMESPACE::Optional<VULKAN_HPP_NAMESPACE::DeviceSize> offset = nullptr) const;
#ifndef VULKAN_HPP_NO_SMART_HANDLE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<UniqueVirtualAllocation>::type virtualAllocateUnique(const VirtualAllocationCreateInfo& createInfo,
VULKAN_HPP_NAMESPACE::Optional<VULKAN_HPP_NAMESPACE::DeviceSize> offset = nullptr) const;
#endif
#endif
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result virtualAllocate(const VirtualAllocationCreateInfo* createInfo,
VirtualAllocation* allocation,
VULKAN_HPP_NAMESPACE::DeviceSize* offset) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
void virtualFree(VirtualAllocation allocation) const;
#else
void virtualFree(VirtualAllocation allocation) const;
#endif
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
void clearVirtualBlock() const;
#else
void clearVirtualBlock() const;
#endif
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
void setVirtualAllocationUserData(VirtualAllocation allocation,
void* userData) const;
#else
void setVirtualAllocationUserData(VirtualAllocation allocation,
void* userData) const;
#endif
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS Statistics getVirtualBlockStatistics() const;
#endif
void getVirtualBlockStatistics(Statistics* stats) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS DetailedStatistics calculateVirtualBlockStatistics() const;
#endif
void calculateVirtualBlockStatistics(DetailedStatistics* stats) const;
#if VMA_STATS_STRING_ENABLED
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS char* buildVirtualBlockStatsString(VULKAN_HPP_NAMESPACE::Bool32 detailedMap) const;
#endif
void buildVirtualBlockStatsString(char** statsString,
VULKAN_HPP_NAMESPACE::Bool32 detailedMap) const;
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
void freeVirtualBlockStatsString(char* statsString) const;
#else
void freeVirtualBlockStatsString(char* statsString) const;
#endif
#endif
private:
VmaVirtualBlock m_virtualBlock = {};
};
VULKAN_HPP_STATIC_ASSERT(sizeof(VirtualBlock) == sizeof(VmaVirtualBlock),
"handle and wrapper have different size!");
}
#ifndef VULKAN_HPP_NO_SMART_HANDLE
namespace VULKAN_HPP_NAMESPACE {
template<> class UniqueHandleTraits<VMA_HPP_NAMESPACE::VirtualBlock, VMA_HPP_NAMESPACE::Dispatcher> {
public:
using deleter = VMA_HPP_NAMESPACE::Deleter<VMA_HPP_NAMESPACE::VirtualBlock, void>;
};
}
namespace VMA_HPP_NAMESPACE { using UniqueVirtualBlock = VULKAN_HPP_NAMESPACE::UniqueHandle<VirtualBlock, Dispatcher>; }
#endif
namespace VMA_HPP_NAMESPACE {
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<Allocator>::type createAllocator(const AllocatorCreateInfo& createInfo);
#ifndef VULKAN_HPP_NO_SMART_HANDLE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<UniqueAllocator>::type createAllocatorUnique(const AllocatorCreateInfo& createInfo);
#endif
#endif
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result createAllocator(const AllocatorCreateInfo* createInfo,
Allocator* allocator);
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<VirtualBlock>::type createVirtualBlock(const VirtualBlockCreateInfo& createInfo);
#ifndef VULKAN_HPP_NO_SMART_HANDLE
VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType<UniqueVirtualBlock>::type createVirtualBlockUnique(const VirtualBlockCreateInfo& createInfo);
#endif
#endif
VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result createVirtualBlock(const VirtualBlockCreateInfo* createInfo,
VirtualBlock* virtualBlock);
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -67,7 +67,6 @@ public final class WLVKGraphicsConfig extends WLGraphicsConfig
PlatformLogger.getLogger("sun.java2d.vulkan.WLVKGraphicsConfig");
private static ImageCapabilities imageCaps = new VKImageCaps();
private final int maxTextureSize;
private BufferCapabilities bufferCaps;
private ContextCapabilities vkCaps;
@@ -75,17 +74,9 @@ public final class WLVKGraphicsConfig extends WLGraphicsConfig
private static native long getVKConfigInfo();
/**
* Returns maximum texture size supported by Vulkan. Must be
* called under VKRQ lock.
*/
private static native int nativeGetMaxTextureSize();
public WLVKGraphicsConfig(WLGraphicsDevice device, int width, int height, int scale, int maxTextureSize,
ContextCapabilities vkCaps) {
public WLVKGraphicsConfig(WLGraphicsDevice device, int width, int height, int scale, ContextCapabilities vkCaps) {
super(device, width, height, scale);
this.vkCaps = vkCaps;
this.maxTextureSize = maxTextureSize;
context = new VKContext(VKRenderQueue.getInstance());
}
@@ -100,7 +91,7 @@ public final class WLVKGraphicsConfig extends WLGraphicsConfig
CAPS_PS30 | CAPS_PS20 | CAPS_RT_TEXTURE_ALPHA |
CAPS_RT_TEXTURE_OPAQUE | CAPS_MULTITEXTURE | CAPS_TEXNONPOW2 |
CAPS_TEXNONSQUARE, null);
return new WLVKGraphicsConfig(device, width, height, scale, nativeGetMaxTextureSize(), caps);
return new WLVKGraphicsConfig(device, width, height, scale, caps);
}
/**

View File

@@ -44,7 +44,6 @@ public abstract class WLVKSurfaceData extends VKSurfaceData implements WLSurface
protected WLComponentPeer peer;
protected WLVKGraphicsConfig graphicsConfig;
private native void initOps(WLVKGraphicsConfig gc, WLComponentPeer peer);
@Override
public native void assignSurface(long surfacePtr);

View File

@@ -27,12 +27,4 @@
#include "jni.h"
#include "VKBase.h"
/*
* Class: sun_java2d_vulkan_WLVKGraphicsConfig
* Method: nativeGetMaxTextureSize
* Signature: ()I
*/
extern "C" JNIEXPORT jint JNICALL Java_sun_java2d_vulkan_WLVKGraphicsConfig_nativeGetMaxTextureSize
(JNIEnv *env, jclass vkgc) {
return VK_MaxTextureSize();
}
// TODO ?

View File

@@ -36,121 +36,32 @@
extern struct wl_display *wl_display;
/**
* This is the implementation of the general surface LockFunc defined in
* SurfaceData.h.
*/
jint
WLVKSD_Lock(JNIEnv *env,
SurfaceDataOps *ops,
SurfaceDataRasInfo *pRasInfo,
jint lockflags)
{
#ifndef HEADLESS
VKSDOps *vsdo = (VKSDOps*)ops;
J2dTrace1(J2D_TRACE_INFO, "WLVKSD_Unlock: %p\n", ops);
pthread_mutex_lock(&((WLVKSDOps*)vsdo->privOps)->lock);
#endif
return SD_SUCCESS;
}
static void
WLVKSD_GetRasInfo(JNIEnv *env,
SurfaceDataOps *ops,
SurfaceDataRasInfo *pRasInfo)
{
#ifndef HEADLESS
VKSDOps *vsdo = (VKSDOps*)ops;
#endif
}
static void
WLVKSD_Unlock(JNIEnv *env,
SurfaceDataOps *ops,
SurfaceDataRasInfo *pRasInfo)
{
#ifndef HEADLESS
VKSDOps *vsdo = (VKSDOps*)ops;
J2dTrace1(J2D_TRACE_INFO, "WLVKSD_Unlock: %p\n", ops);
pthread_mutex_unlock(&((WLVKSDOps*)vsdo->privOps)->lock);
#endif
}
static void
WLVKSD_Dispose(JNIEnv *env, SurfaceDataOps *ops)
{
#ifndef HEADLESS
/* ops is assumed non-null as it is checked in SurfaceData_DisposeOps */
VKSDOps *vsdo = (VKSDOps*)ops;
J2dTrace1(J2D_TRACE_INFO, "WLSD_Dispose %p\n", ops);
WLVKSDOps *wlvksdOps = (WLVKSDOps*)vsdo->privOps;
pthread_mutex_destroy(&wlvksdOps->lock);
if (wlvksdOps->wlvkSD !=nullptr) {
delete wlvksdOps->wlvkSD;
wlvksdOps->wlvkSD = nullptr;
}
#endif
}
extern "C" JNIEXPORT void JNICALL Java_sun_java2d_vulkan_WLVKSurfaceData_initOps
(JNIEnv *env, jclass vksd, jint width, jint height, jint scale, jint backgroundRGB) {
(JNIEnv *env, jobject vksd, jint width, jint height, jint scale, jint backgroundRGB) {
#ifndef HEADLESS
VKSDOps *vsdo = (VKSDOps*)SurfaceData_InitOps(env, vksd, sizeof(VKSDOps));
J2dRlsTraceLn1(J2D_TRACE_INFO, "WLVKSurfaceData_initOps: %p", vsdo);
jboolean hasException;
if (vsdo == NULL) {
JNU_ThrowOutOfMemoryError(env, "Initialization of SurfaceData failed.");
return;
}
if (width <= 0) {
width = 1;
}
if (height <= 0) {
height = 1;
}
WLVKSDOps *wlvksdOps = (WLVKSDOps *)malloc(sizeof(WLVKSDOps));
if (wlvksdOps == NULL) {
JNU_ThrowOutOfMemoryError(env, "creating native WLVK ops");
return;
}
vsdo->privOps = wlvksdOps;
vsdo->sdOps.Lock = WLVKSD_Lock;
vsdo->sdOps.Unlock = WLVKSD_Unlock;
vsdo->sdOps.GetRasInfo = WLVKSD_GetRasInfo;
vsdo->sdOps.Dispose = WLVKSD_Dispose;
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
// Recursive mutex is required because blit can be done with both source
// and destination being the same surface (during scrolling, for example).
// So WLSD_Lock() should be able to lock the same surface twice in a row.
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE_NP);
pthread_mutex_init(&wlvksdOps->lock, &attr);
wlvksdOps->wlvkSD = new WLVKSurfaceData(width, height, scale, backgroundRGB);
J2dTrace3(J2D_TRACE_INFO, "Create WLVKSurfaceData with size %d x %d and scale %d\n", width, height, scale);
width /= scale; // TODO This is incorrect, but we'll deal with this later, we probably need to do something on Wayland side for app-controlled scaling
height /= scale; // TODO This is incorrect, but we'll deal with this later, we probably need to do something on Wayland side for app-controlled scaling
auto *sd = new WLVKSurfaceData(width, height, scale, backgroundRGB);
sd->attachToJavaSurface(env, vksd);
#endif /* !HEADLESS */
}
extern "C" JNIEXPORT void JNICALL
Java_sun_java2d_vulkan_WLVKSurfaceData_assignSurface(JNIEnv *env, jobject wsd,
jlong wlSurfacePtr)
Java_sun_java2d_vulkan_WLVKSurfaceData_assignSurface(JNIEnv *env, jobject wsd, jlong wlSurfacePtr)
{
#ifndef HEADLESS
VKSDOps *vsdo = (VKSDOps*)SurfaceData_GetOps(env, wsd);
if (vsdo == NULL) {
auto sd = (WLVKSurfaceData*)SurfaceData_GetOps(env, wsd);
if (sd == nullptr) {
return;
}
WLVKSDOps *wlvksdOps = (WLVKSDOps*)vsdo->privOps;
wl_surface* wlSurface = (struct wl_surface*)jlong_to_ptr(wlSurfacePtr);
auto wlSurface = (struct wl_surface*)jlong_to_ptr(wlSurfacePtr);
J2dTraceLn2(J2D_TRACE_INFO, "WLVKSurfaceData_assignSurface wl_surface(%p) wl_display(%p)",
wlSurface, wl_display);
try {
wlvksdOps->wlvkSD->validate(wlSurface);
sd->validate(wlSurface);
} catch (std::exception& e) {
J2dRlsTrace1(J2D_TRACE_ERROR, "WLVKSurfaceData_assignSurface: %s\n", e.what());
}
@@ -162,6 +73,7 @@ Java_sun_java2d_vulkan_WLVKSurfaceData_flush(JNIEnv *env, jobject wsd)
{
#ifndef HEADLESS
J2dTrace(J2D_TRACE_INFO, "WLVKSurfaceData_flush\n");
// TODO?
#endif /* !HEADLESS */
}
@@ -169,17 +81,17 @@ extern "C" JNIEXPORT void JNICALL
Java_sun_java2d_vulkan_WLVKSurfaceData_revalidate(JNIEnv *env, jobject wsd,
jint width, jint height, jint scale)
{
width /= scale; // TODO This is incorrect, but we'll deal with this later, we probably need to do something on Wayland side for app-controlled scaling
height /= scale; // TODO This is incorrect, but we'll deal with this later, we probably need to do something on Wayland side for app-controlled scaling
#ifndef HEADLESS
VKSDOps *vsdo = (VKSDOps*)SurfaceData_GetOps(env, wsd);
if (vsdo == NULL) {
auto sd = (WLVKSurfaceData*)SurfaceData_GetOps(env, wsd);
if (sd == nullptr) {
return;
}
J2dTrace3(J2D_TRACE_INFO, "WLVKSurfaceData_revalidate to size %d x %d and scale %d\n", width, height, scale);
WLVKSDOps *wlvksdOps = (WLVKSDOps*)vsdo->privOps;
try {
wlvksdOps->wlvkSD->revalidate(width, height, scale);
wlvksdOps->wlvkSD->update();
sd->revalidate(width, height, scale);
} catch (std::exception& e) {
J2dRlsTrace1(J2D_TRACE_ERROR, "WLVKSurfaceData_revalidate: %s\n", e.what());
}
@@ -187,153 +99,18 @@ Java_sun_java2d_vulkan_WLVKSurfaceData_revalidate(JNIEnv *env, jobject wsd,
#endif /* !HEADLESS */
}
extern "C" JNIEXPORT void JNICALL
JNI_OnUnload(JavaVM *vm, void *reserved) {
#ifndef HEADLESS
VKGraphicsEnvironment::dispose();
#endif /* !HEADLESS */
}
WLVKSurfaceData::WLVKSurfaceData(uint32_t w, uint32_t h, uint32_t s, uint32_t bgc)
:VKSurfaceData(w, h, s, bgc), _wl_surface(nullptr), _surface_khr(nullptr), _swapchain_khr(nullptr)
{
J2dTrace3(J2D_TRACE_INFO, "Create WLVKSurfaceData with size %d x %d and scale %d\n", w, h, s);
}
void WLVKSurfaceData::validate(wl_surface* wls)
{
if (wls ==_wl_surface) {
return;
}
auto& device = VKGraphicsEnvironment::graphics_environment()->default_device();
device.waitIdle(); // TODO wait until device is done with old swapchain
auto surface = VKGraphicsEnvironment::graphics_environment()->vk_instance()
.createWaylandSurfaceKHR({{}, wl_display, wls});
_wl_surface = wls;
vk::WaylandSurfaceCreateInfoKHR createInfoKhr = {
{}, wl_display, _wl_surface
};
_surface_khr =
VKGraphicsEnvironment::graphics_environment()->vk_instance().createWaylandSurfaceKHR(
createInfoKhr);
reset(device, std::move(surface));
revalidate(width(), height(), scale());
update();
}
void WLVKSurfaceData::revalidate(uint32_t w, uint32_t h, uint32_t s)
{
if (s == scale() && w == width() && h == height() ) {
if (!*_surface_khr || *_swapchain_khr) {
J2dTraceLn2(J2D_TRACE_INFO,
"WLVKSurfaceData_revalidate is skipped: surface_khr(%p) swapchain_khr(%p)",
*_surface_khr, *_swapchain_khr);
return;
}
} else {
VKSurfaceData::revalidate(w, h, s);
if (!*_surface_khr) {
J2dTraceLn1(J2D_TRACE_INFO,"WLVKSurfaceData_revalidate is skipped: surface_khr(%p)",
*_surface_khr);
return;
}
}
vk::SwapchainCreateInfoKHR swapchainCreateInfoKhr{
{},
*_surface_khr,
1, vk::Format::eB8G8R8A8Unorm,
vk::ColorSpaceKHR::eVkColorspaceSrgbNonlinear,
{width()/scale(), height()/scale()},
1,
vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferDst,
vk::SharingMode::eExclusive,
0,
nullptr,
vk::SurfaceTransformFlagBitsKHR::eIdentity,
vk::CompositeAlphaFlagBitsKHR::eOpaque,
vk::PresentModeKHR::eImmediate,
false, *_swapchain_khr
};
auto& device = VKGraphicsEnvironment::graphics_environment()->default_device();
_swapchain_khr = device.createSwapchainKHR(swapchainCreateInfoKhr);
}
void WLVKSurfaceData::set_bg_color(uint32_t bgc)
{
if (bg_color() == bgc) {
return;
}
VKSurfaceData::set_bg_color(bgc);
update();
}
void WLVKSurfaceData::update()
{
if (!*_swapchain_khr) {
return;
}
auto& device = VKGraphicsEnvironment::graphics_environment()->default_device();
vk::SemaphoreCreateInfo semInfo = {};
vk::raii::Semaphore presentCompleteSem = device.createSemaphore(semInfo);
std::pair<vk::Result, uint32_t> img = _swapchain_khr.acquireNextImage(0, *presentCompleteSem, nullptr);
if (img.first!=vk::Result::eSuccess) {
J2dRlsTraceLn(J2D_TRACE_INFO, "failed to get image");
}
else {
J2dRlsTraceLn(J2D_TRACE_INFO, "obtained image");
}
auto images = _swapchain_khr.getImages();
vk::CommandPoolCreateInfo poolInfo{
{vk::CommandPoolCreateFlagBits::eResetCommandBuffer},
static_cast<uint32_t>(device.queue_family())
};
auto pool = device.createCommandPool(poolInfo);
vk::CommandBufferAllocateInfo buffInfo{
*pool, vk::CommandBufferLevel::ePrimary, (uint32_t) 1, nullptr
};
auto buffers = device.allocateCommandBuffers(buffInfo);
vk::CommandBufferBeginInfo begInfo{
};
uint32_t alpha = (bg_color() >> 24) & 0xFF;
uint32_t red = (bg_color() >> 16) & 0xFF;
uint32_t green = (bg_color() >> 8) & 0xFF;
uint32_t blue = bg_color() & 0xFF;
vk::ClearColorValue color = {
static_cast<float>(red)/255.0f,
static_cast<float>(green)/255.0f,
static_cast<float>(blue)/255.0f,
static_cast<float>(alpha)/255.0f
};
std::vector<vk::ImageSubresourceRange> range = {{
vk::ImageAspectFlagBits::eColor,
0, 1, 0, 1}};
buffers[0].begin(begInfo);
buffers[0].clearColorImage(images[img.second], vk::ImageLayout::eSharedPresentKHR, color, range);
buffers[0].end();
vk::SubmitInfo submitInfo{
nullptr, nullptr, *buffers[0], nullptr
};
auto queue = device.getQueue(device.queue_family(), 0);
queue.submit(submitInfo, nullptr);
queue.waitIdle();
vk::PresentInfoKHR presentInfo;
presentInfo.swapchainCount = 1;
presentInfo.pSwapchains = &*_swapchain_khr;
presentInfo.pImageIndices = &(img.second);
queue.presentKHR(presentInfo);
queue.waitIdle();
device.waitIdle();
}

View File

@@ -27,45 +27,18 @@
#ifndef WLVKSurfaceData_h_Included
#define WLVKSurfaceData_h_Included
#include <cstdlib>
#include <vulkan/vulkan.h>
#include <SurfaceData.h>
#include <VKBase.h>
#include "VKSurfaceData.h"
#ifdef HEADLESS
#define WLVKSDOps void
#else /* HEADLESS */
#ifndef HEADLESS
class WLVKSurfaceData : public VKSurfaceData {
wl_surface* _wl_surface;
vk::raii::SurfaceKHR _surface_khr;
vk::raii::SwapchainKHR _swapchain_khr;
class WLVKSurfaceData : public VKSwapchainSurfaceData {
wl_surface* _wl_surface;
public:
WLVKSurfaceData(uint32_t w, uint32_t h, uint32_t s, uint32_t bgc);
WLVKSurfaceData(uint32_t w, uint32_t h, uint32_t s, uint32_t bgc)
: VKSwapchainSurfaceData(w, h, s, bgc), _wl_surface(nullptr) {}
void validate(wl_surface* wls);
void revalidate(uint32_t w, uint32_t h, uint32_t s);
void set_bg_color(uint32_t bgc);
void update();
};
/**
* The WLVKSDOps structure contains the WLVK-specific information for a given
* WLVKSurfaceData. It is referenced by the native OGLSDOps structure.
*
* wl_surface* wlSurface;
* For onscreen windows, we maintain a reference to that window's associated
* wl_surface handle here. Offscreen surfaces have no associated Window, so for
* those surfaces, this value will simply be zero.
*
* VkSurfaceKHR* surface;
* Vulkan surface associated with this surface.
*/
typedef struct _WLVKSDOps {
SurfaceDataOps sdOps;
WLVKSurfaceData* wlvkSD;
pthread_mutex_t lock;
} WLVKSDOps;
#endif /* HEADLESS */
#endif /* WLVKSurfaceData_h_Included */