Vulkan: Automap Buffers.

It is recommended to map buffers once and not each time the
mapped memory is needed. This patch will map the buffer when
created and unmap the buffer when the buffer is freed.

This patch will reduce the overhead where the Vulkan driver or
the virtual memory manager needs to be accessed.

Pull Request: https://projects.blender.org/blender/blender/pulls/105588
This commit is contained in:
Jeroen Bakker
2023-03-09 09:27:42 +01:00
parent c77b78ad53
commit 0ad06cd39d
7 changed files with 50 additions and 42 deletions

View File

@@ -70,35 +70,53 @@ bool VKBuffer::create(VKContext &context,
VkResult result = vmaCreateBuffer(
allocator, &create_info, &vma_create_info, &vk_buffer_, &allocation_, nullptr);
return result == VK_SUCCESS;
}
bool VKBuffer::update(VKContext &context, const void *data)
{
void *mapped_memory;
bool result = map(context, &mapped_memory);
if (result) {
memcpy(mapped_memory, data, size_in_bytes_);
unmap(context);
if (result != VK_SUCCESS) {
return false;
}
return result;
/* All buffers are mapped to virtual memory. */
return map(context);
}
bool VKBuffer::map(VKContext &context, void **r_mapped_memory) const
void VKBuffer::update(const void *data) const
{
BLI_assert_msg(is_mapped(), "Cannot update a non-mapped buffer.");
memcpy(mapped_memory_, data, size_in_bytes_);
}
void VKBuffer::read(void *data) const
{
BLI_assert_msg(is_mapped(), "Cannot read a non-mapped buffer.");
memcpy(data, mapped_memory_, size_in_bytes_);
}
bool VKBuffer::is_mapped() const
{
return mapped_memory_ != nullptr;
}
bool VKBuffer::map(VKContext &context)
{
BLI_assert(!is_mapped());
VmaAllocator allocator = context.mem_allocator_get();
VkResult result = vmaMapMemory(allocator, allocation_, r_mapped_memory);
VkResult result = vmaMapMemory(allocator, allocation_, &mapped_memory_);
return result == VK_SUCCESS;
}
void VKBuffer::unmap(VKContext &context) const
void VKBuffer::unmap(VKContext &context)
{
BLI_assert(is_mapped());
VmaAllocator allocator = context.mem_allocator_get();
vmaUnmapMemory(allocator, allocation_);
mapped_memory_ = nullptr;
}
bool VKBuffer::free(VKContext &context)
{
if (is_mapped()) {
unmap(context);
}
VmaAllocator allocator = context.mem_allocator_get();
vmaDestroyBuffer(allocator, vk_buffer_, allocation_);
return true;

View File

@@ -21,6 +21,8 @@ class VKBuffer {
int64_t size_in_bytes_;
VkBuffer vk_buffer_ = VK_NULL_HANDLE;
VmaAllocation allocation_ = VK_NULL_HANDLE;
/* Pointer to the virtually mapped memory. */
void *mapped_memory_ = nullptr;
public:
VKBuffer() = default;
@@ -33,10 +35,9 @@ class VKBuffer {
int64_t size,
GPUUsageType usage,
VkBufferUsageFlagBits buffer_usage);
bool update(VKContext &context, const void *data);
void update(const void *data) const;
void read(void *data) const;
bool free(VKContext &context);
bool map(VKContext &context, void **r_mapped_memory) const;
void unmap(VKContext &context) const;
int64_t size_in_bytes() const
{
@@ -47,5 +48,11 @@ class VKBuffer {
{
return vk_buffer_;
}
private:
/** Check if this buffer is mapped. */
bool is_mapped() const;
bool map(VKContext &context);
void unmap(VKContext &context);
};
} // namespace blender::gpu

View File

@@ -35,11 +35,7 @@ void VKIndexBuffer::read(uint32_t *data) const
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.submit();
void *mapped_memory;
if (buffer_.map(context, &mapped_memory)) {
memcpy(data, mapped_memory, size_get());
buffer_.unmap(context);
}
buffer_.read(data);
}
void VKIndexBuffer::update_sub(uint /*start*/, uint /*len*/, const void * /*data*/)

View File

@@ -14,11 +14,11 @@ namespace blender::gpu {
void VKStorageBuffer::update(const void *data)
{
VKContext &context = *VKContext::get();
if (!buffer_.is_allocated()) {
VKContext &context = *VKContext::get();
allocate(context);
}
buffer_.update(context, data);
buffer_.update(data);
}
void VKStorageBuffer::allocate(VKContext &context)
@@ -65,11 +65,7 @@ void VKStorageBuffer::read(void *data)
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.submit();
void *mapped_memory;
if (buffer_.map(context, &mapped_memory)) {
memcpy(data, mapped_memory, size_in_bytes_);
buffer_.unmap(context);
}
buffer_.read(data);
}
} // namespace blender::gpu

View File

@@ -78,17 +78,13 @@ void *VKTexture::read(int mip, eGPUDataFormat format)
command_buffer.copy(staging_buffer, *this, Span<VkBufferImageCopy>(&region, 1));
command_buffer.submit();
void *mapped_data;
staging_buffer.map(context, &mapped_data);
void *data = MEM_mallocN(host_memory_size, __func__);
/* TODO: add conversion when data format is different. */
BLI_assert_msg(device_memory_size == host_memory_size,
"Memory data conversions not implemented yet");
memcpy(data, mapped_data, host_memory_size);
staging_buffer.unmap(context);
staging_buffer.read(data);
return data;
}

View File

@@ -12,11 +12,11 @@ namespace blender::gpu {
void VKUniformBuffer::update(const void *data)
{
VKContext &context = *VKContext::get();
if (!buffer_.is_allocated()) {
VKContext &context = *VKContext::get();
allocate(context);
}
buffer_.update(context, data);
buffer_.update(data);
}
void VKUniformBuffer::allocate(VKContext &context)

View File

@@ -49,12 +49,7 @@ void VKVertexBuffer::read(void *data) const
VKContext &context = *VKContext::get();
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.submit();
void *mapped_memory;
if (buffer_.map(context, &mapped_memory)) {
memcpy(data, mapped_memory, size_used_get());
buffer_.unmap(context);
}
buffer_.read(data);
}
void VKVertexBuffer::acquire_data()