Vulkan/OpenXR: Add support for VK_KHR_external_memory_fd

Current implementation uses a CPU roundtrip to transfer render result
to the Xr Swapchain. This PR adds support for sharing the render result
on Linux systems by using file descriptors.

To extend this solution to win32 or dx handles can be done by extending
the data transfer modes, register the correct extensions. When not
using the same GPU between Blender and OpenXR the CPU roundtrip
will still be used.

Solution has been validated with monado simulator and seems to be as
fast as OpenGL.

Performance can be improved by using GPU based synchronization.
Current API is limited as we cannot chain the different renders and
swapchains.

Pull Request: https://projects.blender.org/blender/blender/pulls/136933
This commit is contained in:
Jeroen Bakker
2025-04-04 16:01:06 +02:00
parent 2a092a623b
commit a46643af0f
14 changed files with 476 additions and 32 deletions

View File

@@ -764,18 +764,69 @@ typedef struct {
VkFence submission_fence;
} GHOST_VulkanSwapChainData;
typedef struct {
/** Resolution of the frame-buffer image. */
VkExtent2D extent;
typedef enum {
/**
* Host accessible data containing the image data. Data is stored in the selected swapchain
* format.
* Use RAM to transfer the render result to the XR swapchain.
*
* Application renders a view, downloads the result to CPU RAM, GHOST_XrGraphicsBindingVulkan
* will upload it to a GPU buffer and copy the buffer to the XR swapchain.
*/
// NOTE: This is a temporary solution with quite a large performance overhead. The solution we
// would like to implement would use VK_KHR_external_memory. The documentation/samples around
// using this in our situation is scarce. We will start prototyping in a smaller scale and when
// experience is gained, we will implement the solution.
void *image_data;
GHOST_kVulkanXRModeCPU,
/**
* Use Linux FD to transfer the render result to the XR swapchain.
*
* Application renders a view, export the memory in an FD handle. GHOST_XrGraphicsBindingVulkan
* will import the memory and copy the image to the swapchain.
*/
GHOST_kVulkanXRModeFD,
} GHOST_TVulkanXRModes;
typedef struct {
/**
* Mode to use for data transfer between the application rendered result and the OpenXR
* swapchain. This is set by the GHOST and should be respected by the application.
*/
GHOST_TVulkanXRModes data_transfer_mode;
/**
* Resolution of view render result.
*/
VkExtent2D extent;
union {
struct {
/**
* Host accessible data containing the image data. Data is stored in the selected swapchain
* format. Only used when data_transfer_mode == GHOST_kVulkanXRModeCPU.
*/
void *image_data;
} cpu;
struct {
/**
* Handle of the exported GPU memory. Depending on the data_transfer_mode the actual handle
* type can be different (voidptr/int/..).
*/
uint64_t image_handle;
/**
* Data format of the image.
*/
VkFormat image_format;
/**
* Allocation size of the exported memory.
*/
VkDeviceSize memory_size;
/**
* Offset of the texture/buffer inside the allocated memory.
*/
VkDeviceSize memory_offset;
} gpu;
};
} GHOST_VulkanOpenXRData;
typedef struct {

View File

@@ -1064,6 +1064,15 @@ GHOST_TSuccess GHOST_ContextVK::initializeDrawingContext()
required_device_extensions.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
}
/* External memory extensions. */
required_device_extensions.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
#ifdef _WIN32
/* Placeholder to add VK_KHR_external_memory_win32 */
#elif not defined(__APPLE__)
optional_device_extensions.push_back(VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME);
#endif
#ifdef __APPLE__
optional_device_extensions.push_back(VK_EXT_PROVOKING_VERTEX_EXTENSION_NAME);
#else

View File

@@ -235,6 +235,20 @@ void GHOST_XrGraphicsBindingVulkan::initFromGhostContext(GHOST_Context &ghost_ct
1};
vkAllocateCommandBuffers(m_vk_device, &vk_command_buffer_allocate_info, &m_vk_command_buffer);
/* Select the best data transfer mode based on the OpenXR device and ContextVK. */
m_data_transfer_mode = choseDataTransferMode();
if (m_data_transfer_mode == GHOST_kVulkanXRModeCPU) {
/* VMA */
VmaAllocatorCreateInfo allocator_create_info = {};
allocator_create_info.flags = VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT;
allocator_create_info.vulkanApiVersion = VK_API_VERSION_1_2;
allocator_create_info.physicalDevice = m_vk_physical_device;
allocator_create_info.device = m_vk_device;
allocator_create_info.instance = m_vk_instance;
vmaCreateAllocator(&allocator_create_info, &m_vma_allocator);
}
/* Update the binding struct */
oxr_binding.vk.type = XR_TYPE_GRAPHICS_BINDING_VULKAN_KHR;
oxr_binding.vk.next = nullptr;
@@ -243,15 +257,68 @@ void GHOST_XrGraphicsBindingVulkan::initFromGhostContext(GHOST_Context &ghost_ct
oxr_binding.vk.device = m_vk_device;
oxr_binding.vk.queueFamilyIndex = m_graphics_queue_family;
oxr_binding.vk.queueIndex = 0;
}
/* VMA */
VmaAllocatorCreateInfo allocator_create_info = {};
allocator_create_info.flags = VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT;
allocator_create_info.vulkanApiVersion = VK_API_VERSION_1_2;
allocator_create_info.physicalDevice = m_vk_physical_device;
allocator_create_info.device = m_vk_device;
allocator_create_info.instance = m_vk_instance;
vmaCreateAllocator(&allocator_create_info, &m_vma_allocator);
GHOST_TVulkanXRModes GHOST_XrGraphicsBindingVulkan::choseDataTransferMode()
{
GHOST_VulkanHandles vulkan_handles;
m_ghost_ctx->getVulkanHandles(vulkan_handles);
/* Retrieve the Context physical device properties. */
VkPhysicalDeviceVulkan11Properties vk_physical_device_vulkan11_properties = {
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES};
VkPhysicalDeviceProperties2 vk_physical_device_properties = {
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2, &vk_physical_device_vulkan11_properties};
vkGetPhysicalDeviceProperties2(vulkan_handles.physical_device, &vk_physical_device_properties);
/* Retrieve OpenXR physical device properties. */
VkPhysicalDeviceVulkan11Properties xr_physical_device_vulkan11_properties = {
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES};
VkPhysicalDeviceProperties2 xr_physical_device_properties = {
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2, &xr_physical_device_vulkan11_properties};
vkGetPhysicalDeviceProperties2(m_vk_physical_device, &xr_physical_device_properties);
/* When the physical device properties match between the Vulkan device and the Xr devices we
* assume that they are the same physical device in the machine and we can use shared memory.
* If not we fall back to CPU based data transfer.*/
const bool is_same_physical_device = memcmp(&vk_physical_device_vulkan11_properties,
&xr_physical_device_vulkan11_properties,
sizeof(VkPhysicalDeviceVulkan11Properties)) == 0;
if (!is_same_physical_device) {
return GHOST_kVulkanXRModeCPU;
}
/* Check for available extensions. We assume that the needed extensions are enabled when
* available during construction. */
uint32_t device_extension_count;
vkEnumerateDeviceExtensionProperties(
vulkan_handles.physical_device, nullptr, &device_extension_count, nullptr);
std::vector<VkExtensionProperties> available_device_extensions(device_extension_count);
vkEnumerateDeviceExtensionProperties(vulkan_handles.physical_device,
nullptr,
&device_extension_count,
available_device_extensions.data());
auto has_extension = [=](const char *extension_name) {
for (const auto &extension : available_device_extensions) {
if (strcmp(extension_name, extension.extensionName) == 0) {
return true;
}
}
return false;
};
#ifdef _WIN32
#elif defined(__APPLE__)
#else /* UNIX/Linux */
bool has_vk_khr_external_memory_fd_extension = has_extension(
VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME);
if (has_vk_khr_external_memory_fd_extension) {
return GHOST_kVulkanXRModeFD;
}
#endif
return GHOST_kVulkanXRModeCPU;
}
static std::optional<int64_t> choose_swapchain_format_from_candidates(
@@ -336,8 +403,31 @@ void GHOST_XrGraphicsBindingVulkan::submitToSwapchainImage(
{
XrSwapchainImageVulkan2KHR &vulkan_image = *reinterpret_cast<XrSwapchainImageVulkan2KHR *>(
&swapchain_image);
switch (m_data_transfer_mode) {
case GHOST_kVulkanXRModeFD:
submitToSwapchainImageFd(vulkan_image, draw_info);
break;
case GHOST_kVulkanXRModeCPU:
submitToSwapchainImageCpu(vulkan_image, draw_info);
break;
default:
// assert(false);
break;
}
}
/* -------------------------------------------------------------------- */
/** \name Data transfer CPU
* \{ */
void GHOST_XrGraphicsBindingVulkan::submitToSwapchainImageCpu(
XrSwapchainImageVulkan2KHR &swapchain_image, const GHOST_XrDrawViewInfo &draw_info)
{
/* Acquire frame buffer image. */
GHOST_VulkanOpenXRData openxr_data;
GHOST_VulkanOpenXRData openxr_data = {GHOST_kVulkanXRModeCPU};
m_ghost_ctx->openxr_acquire_framebuffer_image_callback_(&openxr_data);
/* Import render result. */
@@ -379,7 +469,8 @@ void GHOST_XrGraphicsBindingVulkan::submitToSwapchainImage(
vmaMapMemory(
m_vma_allocator, m_vk_buffer_allocation, &m_vk_buffer_allocation_info.pMappedData);
}
std::memcpy(m_vk_buffer_allocation_info.pMappedData, openxr_data.image_data, image_data_size);
std::memcpy(
m_vk_buffer_allocation_info.pMappedData, openxr_data.cpu.image_data, image_data_size);
/* Copy frame buffer image to swapchain image. */
VkCommandBuffer vk_command_buffer = m_vk_command_buffer;
@@ -401,7 +492,7 @@ void GHOST_XrGraphicsBindingVulkan::submitToSwapchainImage(
VK_IMAGE_LAYOUT_GENERAL,
VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED,
vulkan_image.image,
swapchain_image.image,
{VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}};
vkCmdPipelineBarrier(vk_command_buffer,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
@@ -424,7 +515,7 @@ void GHOST_XrGraphicsBindingVulkan::submitToSwapchainImage(
{openxr_data.extent.width, openxr_data.extent.height, 1}};
vkCmdCopyBufferToImage(vk_command_buffer,
m_vk_buffer,
vulkan_image.image,
swapchain_image.image,
VK_IMAGE_LAYOUT_GENERAL,
1,
&vk_buffer_image_copy);
@@ -446,6 +537,134 @@ void GHOST_XrGraphicsBindingVulkan::submitToSwapchainImage(
m_ghost_ctx->openxr_release_framebuffer_image_callback_(&openxr_data);
}
/* \} */
/* -------------------------------------------------------------------- */
/** \name Data transfer FD
* \{ */
void GHOST_XrGraphicsBindingVulkan::submitToSwapchainImageFd(
XrSwapchainImageVulkan2KHR &swapchain_image, const GHOST_XrDrawViewInfo &draw_info)
{
GHOST_VulkanOpenXRData openxr_data = {GHOST_kVulkanXRModeFD};
m_ghost_ctx->openxr_acquire_framebuffer_image_callback_(&openxr_data);
/* Create an image handle */
VkExternalMemoryImageCreateInfo vk_external_memory_image_info = {
VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
nullptr,
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT};
VkImageCreateInfo vk_image_info = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
&vk_external_memory_image_info,
0,
VK_IMAGE_TYPE_2D,
openxr_data.gpu.image_format,
{openxr_data.extent.width, openxr_data.extent.height, 1},
1,
1,
VK_SAMPLE_COUNT_1_BIT,
VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
VK_SHARING_MODE_EXCLUSIVE,
0,
nullptr,
VK_IMAGE_LAYOUT_UNDEFINED};
VkImage vk_image;
vkCreateImage(m_vk_device, &vk_image_info, nullptr, &vk_image);
/* Import the memory */
VkMemoryDedicatedAllocateInfo vk_memory_dedicated_allocation_info = {
VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO, nullptr, vk_image, VK_NULL_HANDLE};
VkImportMemoryFdInfoKHR import_memory_info = {VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR,
&vk_memory_dedicated_allocation_info,
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT,
int(openxr_data.gpu.image_handle)};
VkMemoryAllocateInfo allocate_info = {
VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, &import_memory_info, openxr_data.gpu.memory_size};
VkDeviceMemory device_memory;
vkAllocateMemory(m_vk_device, &allocate_info, nullptr, &device_memory);
/* Bind the imported memory to the image. */
vkBindImageMemory(m_vk_device, vk_image, device_memory, openxr_data.gpu.memory_offset);
/* Copy frame buffer image to swapchain image. */
VkCommandBuffer vk_command_buffer = m_vk_command_buffer;
/* Begin command recording */
VkCommandBufferBeginInfo vk_command_buffer_begin_info = {
VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
nullptr,
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
nullptr};
vkBeginCommandBuffer(vk_command_buffer, &vk_command_buffer_begin_info);
/* Transfer imported render result & swap chain image (UNDEFINED -> GENERAL) */
VkImageMemoryBarrier vk_image_memory_barrier[] = {{VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
nullptr,
0,
VK_ACCESS_TRANSFER_READ_BIT,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_GENERAL,
VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED,
vk_image,
{VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}},
{VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
nullptr,
0,
VK_ACCESS_TRANSFER_WRITE_BIT,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_GENERAL,
VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED,
swapchain_image.image,
{VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}}};
vkCmdPipelineBarrier(vk_command_buffer,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
0,
0,
nullptr,
0,
nullptr,
2,
vk_image_memory_barrier);
/* Copy image to swapchain */
VkImageCopy vk_image_copy = {{VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1},
{0, 0, 0},
{VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1},
{draw_info.ofsx, draw_info.ofsy, 0},
{openxr_data.extent.width, openxr_data.extent.height, 1}};
vkCmdCopyImage(vk_command_buffer,
vk_image,
VK_IMAGE_LAYOUT_GENERAL,
swapchain_image.image,
VK_IMAGE_LAYOUT_GENERAL,
1,
&vk_image_copy);
/* End command recording. */
vkEndCommandBuffer(vk_command_buffer);
/* Submit command buffer to queue. */
VkSubmitInfo vk_submit_info = {
VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, nullptr, 1, &vk_command_buffer};
vkQueueSubmit(m_vk_queue, 1, &vk_submit_info, VK_NULL_HANDLE);
/* Wait until device is idle. */
vkQueueWaitIdle(m_vk_queue);
/* Reset command buffer for next eye/frame. */
vkResetCommandBuffer(vk_command_buffer, 0);
vkDestroyImage(m_vk_device, vk_image, nullptr);
vkFreeMemory(m_vk_device, device_memory, nullptr);
}
/* \} */
bool GHOST_XrGraphicsBindingVulkan::needsUpsideDownDrawing(GHOST_Context &ghost_ctx) const
{
return ghost_ctx.isUpsideDown();

View File

@@ -52,10 +52,17 @@ class GHOST_XrGraphicsBindingVulkan : public GHOST_IXrGraphicsBinding {
VmaAllocation m_vk_buffer_allocation = VK_NULL_HANDLE;
VkBuffer m_vk_buffer = VK_NULL_HANDLE;
VmaAllocationInfo m_vk_buffer_allocation_info = {};
GHOST_TVulkanXRModes m_data_transfer_mode = GHOST_kVulkanXRModeCPU;
std::list<std::vector<XrSwapchainImageVulkan2KHR>> m_image_cache;
VkCommandPool m_vk_command_pool = VK_NULL_HANDLE;
GHOST_TVulkanXRModes choseDataTransferMode();
void submitToSwapchainImageCpu(XrSwapchainImageVulkan2KHR &swapchain_image,
const GHOST_XrDrawViewInfo &draw_info);
void submitToSwapchainImageFd(XrSwapchainImageVulkan2KHR &swapchain_image,
const GHOST_XrDrawViewInfo &draw_info);
/**
* Single VkCommandBuffer that is used for all views/swap-chains.
*

View File

@@ -278,6 +278,7 @@ set(VULKAN_SRC
vulkan/vk_immediate.hh
vulkan/vk_index_buffer.hh
vulkan/vk_memory_layout.hh
vulkan/vk_memory.hh
vulkan/vk_pipeline_pool.hh
vulkan/vk_pixel_buffer.hh
vulkan/vk_push_constants.hh

View File

@@ -532,10 +532,12 @@ enum eGPUTextureUsage {
GPU_TEXTURE_USAGE_MEMORYLESS = (1 << 5),
/* Whether a texture can support atomic operations. */
GPU_TEXTURE_USAGE_ATOMIC = (1 << 6),
/* Whether a texture can be exported to other instances/processes. */
GPU_TEXTURE_USAGE_MEMORY_EXPORT = (1 << 7),
/* Create a texture whose usage cannot be defined prematurely.
* This is unoptimized and should not be used. */
GPU_TEXTURE_USAGE_GENERAL = (0xFF &
(~(GPU_TEXTURE_USAGE_MEMORYLESS | GPU_TEXTURE_USAGE_ATOMIC))),
GPU_TEXTURE_USAGE_GENERAL = (0xFF & (~(GPU_TEXTURE_USAGE_MEMORYLESS | GPU_TEXTURE_USAGE_ATOMIC |
GPU_TEXTURE_USAGE_MEMORY_EXPORT))),
};
ENUM_OPERATORS(eGPUTextureUsage, GPU_TEXTURE_USAGE_GENERAL);

View File

@@ -429,15 +429,44 @@ void VKContext::openxr_acquire_framebuffer_image_handler(GHOST_VulkanOpenXRData
{
VKFrameBuffer &framebuffer = *unwrap(active_fb);
VKTexture *color_attachment = unwrap(unwrap(framebuffer.color_tex(0)));
openxr_data.image_data = color_attachment->read(0, GPU_DATA_HALF_FLOAT);
openxr_data.extent.width = color_attachment->width_get();
openxr_data.extent.height = color_attachment->height_get();
switch (openxr_data.data_transfer_mode) {
case GHOST_kVulkanXRModeCPU:
openxr_data.cpu.image_data = color_attachment->read(0, GPU_DATA_HALF_FLOAT);
break;
case GHOST_kVulkanXRModeFD: {
flush_render_graph(RenderGraphFlushFlags::SUBMIT |
RenderGraphFlushFlags::WAIT_FOR_COMPLETION |
RenderGraphFlushFlags::RENEW_RENDER_GRAPH);
VKMemoryExport exported_memory = color_attachment->export_memory(
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT);
openxr_data.gpu.image_handle = exported_memory.handle;
openxr_data.gpu.image_format = to_vk_format(color_attachment->device_format_get());
openxr_data.gpu.memory_size = exported_memory.memory_size;
openxr_data.gpu.memory_offset = exported_memory.memory_offset;
break;
}
}
}
void VKContext::openxr_release_framebuffer_image_handler(GHOST_VulkanOpenXRData &openxr_data)
{
MEM_freeN(openxr_data.image_data);
openxr_data.image_data = nullptr;
switch (openxr_data.data_transfer_mode) {
case GHOST_kVulkanXRModeCPU:
MEM_freeN(openxr_data.cpu.image_data);
openxr_data.cpu.image_data = nullptr;
break;
case GHOST_kVulkanXRModeFD:
/* Nothing to do as import of the handle by the XrInstance removes the ownership of the
* handle. Ref
* https://registry.khronos.org/vulkan/specs/latest/man/html/VK_KHR_external_memory_fd.html#_issues
*/
break;
}
}
/** \} */

View File

@@ -56,6 +56,7 @@ void VKDevice::deinit()
pipelines.free_data();
descriptor_set_layouts_.deinit();
orphaned_data.deinit(*this);
vmaDestroyPool(mem_allocator_, vma_pools.external_memory);
vmaDestroyAllocator(mem_allocator_);
mem_allocator_ = VK_NULL_HANDLE;
@@ -133,6 +134,10 @@ void VKDevice::init_functions()
functions.vkSetDebugUtilsObjectName = LOAD_FUNCTION(vkSetDebugUtilsObjectNameEXT);
functions.vkCreateDebugUtilsMessenger = LOAD_FUNCTION(vkCreateDebugUtilsMessengerEXT);
functions.vkDestroyDebugUtilsMessenger = LOAD_FUNCTION(vkDestroyDebugUtilsMessengerEXT);
/* VK_KHR_external_memory_fd */
functions.vkGetMemoryFd = LOAD_FUNCTION(vkGetMemoryFdKHR);
#undef LOAD_FUNCTION
}
@@ -208,6 +213,42 @@ void VKDevice::init_memory_allocator()
info.device = vk_device_;
info.instance = vk_instance_;
vmaCreateAllocator(&info, &mem_allocator_);
/* External memory pool */
/* Initialize a dummy image create info to find the memory type index that will be used for
* allocating. */
VkExternalMemoryImageCreateInfo external_image_create_info = {
VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
nullptr,
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT};
VkImageCreateInfo image_create_info = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
&external_image_create_info,
0,
VK_IMAGE_TYPE_2D,
VK_FORMAT_R8G8B8A8_UNORM,
{1024, 1024, 1},
1,
1,
VK_SAMPLE_COUNT_1_BIT,
VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
VK_IMAGE_USAGE_TRANSFER_DST_BIT |
VK_IMAGE_USAGE_SAMPLED_BIT,
VK_SHARING_MODE_EXCLUSIVE,
0,
nullptr,
VK_IMAGE_LAYOUT_UNDEFINED};
VmaAllocationCreateInfo allocation_create_info = {};
allocation_create_info.usage = VMA_MEMORY_USAGE_AUTO;
uint32_t memory_type_index;
vmaFindMemoryTypeIndexForImageInfo(
mem_allocator_, &image_create_info, &allocation_create_info, &memory_type_index);
vma_pools.external_memory_info.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
VmaPoolCreateInfo pool_create_info = {};
pool_create_info.memoryTypeIndex = memory_type_index;
pool_create_info.pMemoryAllocateNext = &vma_pools.external_memory_info;
vmaCreatePool(mem_allocator_, &pool_create_info, &vma_pools.external_memory);
}
void VKDevice::init_dummy_buffer()

View File

@@ -232,8 +232,19 @@ class VKDevice : public NonCopyable {
PFN_vkSetDebugUtilsObjectNameEXT vkSetDebugUtilsObjectName = nullptr;
PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessenger = nullptr;
PFN_vkDestroyDebugUtilsMessengerEXT vkDestroyDebugUtilsMessenger = nullptr;
/* Extension: VK_KHR_external_memory_fd */
PFN_vkGetMemoryFdKHR vkGetMemoryFd = nullptr;
} functions;
struct {
/* NOTE: This attribute needs to be kept alive as it will be read by VMA when allocating from
* `external_memory` pool. */
VkExportMemoryAllocateInfoKHR external_memory_info = {
VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR};
VmaPool external_memory = VK_NULL_HANDLE;
} vma_pools;
const char *extension_name_get(int index) const
{
return device_extensions_[index].extensionName;

View File

@@ -0,0 +1,30 @@
/* SPDX-FileCopyrightText: 2023 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup gpu
*/
#pragma once
#include "vk_common.hh"
namespace blender::gpu {
/** Information about an exported buffer/image. */
struct VKMemoryExport {
/** Handle that has been exported. */
uint64_t handle;
/**
* Allocated memory size. Allocation size can be larger than actually requested due to memory
* alignment/allocation rules.
*/
VkDeviceSize memory_size;
/**
* Actually content offset inside the exported memory. A memory allocation can contain multiple
* buffers or images. The offset points to the specific buffer/image that is exported.
*/
VkDeviceSize memory_offset;
};
} // namespace blender::gpu

View File

@@ -393,6 +393,24 @@ uint VKTexture::gl_bindcode_get() const
return 0;
}
VKMemoryExport VKTexture::export_memory(VkExternalMemoryHandleTypeFlagBits handle_type)
{
BLI_assert_msg(
bool(gpu_image_usage_flags_ & GPU_TEXTURE_USAGE_MEMORY_EXPORT),
"Can only import external memory when usage flag contains GPU_TEXTURE_USAGE_MEMORY_EXPORT.");
BLI_assert_msg(allocation_ != nullptr,
"Cannot export memory when the texture is not backed by any device memory.");
const VKDevice &device = VKBackend::get().device;
VkMemoryGetFdInfoKHR vk_memory_get_fd_info = {VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR,
nullptr,
allocation_info_.deviceMemory,
handle_type};
int fd_handle = 0;
device.functions.vkGetMemoryFd(device.vk_handle(), &vk_memory_get_fd_info, &fd_handle);
return {uint64_t(fd_handle), allocation_info_.size, allocation_info_.offset};
}
bool VKTexture::init_internal()
{
const VKDevice &device = VKBackend::get().device;
@@ -529,10 +547,12 @@ bool VKTexture::allocate()
return false;
}
const eGPUTextureUsage texture_usage = usage_get();
VKDevice &device = VKBackend::get().device;
VkImageCreateInfo image_info = {};
image_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_info.flags = to_vk_image_create(type_, format_flag_, usage_get());
image_info.flags = to_vk_image_create(type_, format_flag_, texture_usage);
image_info.imageType = to_vk_image_type(type_);
image_info.extent = vk_extent;
image_info.mipLevels = max_ii(mipmaps_, 1);
@@ -564,15 +584,25 @@ bool VKTexture::allocate()
}
}
VkExternalMemoryImageCreateInfo external_memory_create_info = {
VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
nullptr,
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT};
VmaAllocationCreateInfo allocCreateInfo = {};
allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
allocCreateInfo.priority = 1.0f;
if (bool(texture_usage & GPU_TEXTURE_USAGE_MEMORY_EXPORT)) {
image_info.pNext = &external_memory_create_info;
allocCreateInfo.pool = device.vma_pools.external_memory;
}
result = vmaCreateImage(device.mem_allocator_get(),
&image_info,
&allocCreateInfo,
&vk_image_,
&allocation_,
nullptr);
&allocation_info_);
if (result != VK_SUCCESS) {
return false;
}

View File

@@ -12,6 +12,7 @@
#include "vk_context.hh"
#include "vk_image_view.hh"
#include "vk_memory.hh"
namespace blender::gpu {
@@ -51,6 +52,7 @@ class VKTexture : public Texture {
VKVertexBuffer *source_buffer_ = nullptr;
VkImage vk_image_ = VK_NULL_HANDLE;
VmaAllocation allocation_ = VK_NULL_HANDLE;
VmaAllocationInfo allocation_info_ = {};
/**
* Image views are owned by VKTexture. When a specific image view is needed it will be created
@@ -111,6 +113,13 @@ class VKTexture : public Texture {
/* TODO(fclem): Legacy. Should be removed at some point. */
uint gl_bindcode_get() const override;
/**
* Export the memory associated with this texture to be imported by a different
* API/Process/Instance.
*
* Returns the handle + offset of the image inside the handle.
*/
VKMemoryExport export_memory(VkExternalMemoryHandleTypeFlagBits handle_type);
VkImage vk_image_handle() const
{

View File

@@ -1408,8 +1408,13 @@ bool wm_xr_session_surface_offscreen_ensure(wmXrSurfaceData *surface_data,
}
BLI_assert(format != GPU_R8);
offscreen = vp->offscreen = GPU_offscreen_create(
draw_view->width, draw_view->height, true, format, GPU_TEXTURE_USAGE_SHADER_READ, err_out);
offscreen = vp->offscreen = GPU_offscreen_create(draw_view->width,
draw_view->height,
true,
format,
GPU_TEXTURE_USAGE_SHADER_READ |
GPU_TEXTURE_USAGE_MEMORY_EXPORT,
err_out);
if (offscreen) {
viewport = vp->viewport = GPU_viewport_create();
if (!viewport) {