GPU: Add support for GPU_vertbuf_update_sub

`GPU_vertbuf_update_sub` is used by GPU based subdivision to integrate
quads, triangles and edges. This is just an implementation to make it
work as we are planning bigger changes to improve performance of
uploading data to the GPU.

Pull Request: https://projects.blender.org/blender/blender/pulls/135774
This commit is contained in:
Jeroen Bakker
2025-03-11 10:14:00 +01:00
parent 96c9f480b3
commit cdc37b2235
5 changed files with 47 additions and 9 deletions

View File

@@ -78,9 +78,16 @@ bool VKBuffer::create(size_t size_in_bytes,
}
void VKBuffer::update_immediately(const void *data) const
{
update_sub_immediately(0, size_in_bytes_, data);
}
void VKBuffer::update_sub_immediately(size_t start_offset,
size_t data_size,
const void *data) const
{
BLI_assert_msg(is_mapped(), "Cannot update a non-mapped buffer.");
memcpy(mapped_memory_, data, size_in_bytes_);
memcpy(static_cast<uint8_t *>(mapped_memory_) + start_offset, data, data_size);
}
void VKBuffer::update_render_graph(VKContext &context, void *data) const

View File

@@ -47,6 +47,7 @@ class VKBuffer : public NonCopyable {
VmaAllocationCreateFlags vma_allocation_flags);
void clear(VKContext &context, uint32_t clear_value);
void update_immediately(const void *data) const;
void update_sub_immediately(size_t start_offset, size_t data_size, const void *data) const;
/**
* Update the buffer as part of the render graph evaluation. The ownership of data will be

View File

@@ -11,8 +11,13 @@
namespace blender::gpu {
VKStagingBuffer::VKStagingBuffer(const VKBuffer &device_buffer, Direction direction)
: device_buffer_(device_buffer)
VKStagingBuffer::VKStagingBuffer(const VKBuffer &device_buffer,
Direction direction,
VkDeviceSize device_buffer_offset,
VkDeviceSize region_size)
: device_buffer_(device_buffer),
device_buffer_offset_(device_buffer_offset),
region_size_(region_size == UINT64_MAX ? device_buffer.size_in_bytes() : region_size)
{
VkBufferUsageFlags usage;
switch (direction) {
@@ -23,7 +28,7 @@ VKStagingBuffer::VKStagingBuffer(const VKBuffer &device_buffer, Direction direct
usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
}
host_buffer_.create(device_buffer.size_in_bytes(),
host_buffer_.create(region_size_,
usage,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
@@ -38,7 +43,8 @@ void VKStagingBuffer::copy_to_device(VKContext &context)
render_graph::VKCopyBufferNode::CreateInfo copy_buffer = {};
copy_buffer.src_buffer = host_buffer_.vk_handle();
copy_buffer.dst_buffer = device_buffer_.vk_handle();
copy_buffer.region.size = device_buffer_.size_in_bytes();
copy_buffer.region.dstOffset = device_buffer_offset_;
copy_buffer.region.size = region_size_;
context.render_graph().add_node(copy_buffer);
}
@@ -49,7 +55,8 @@ void VKStagingBuffer::copy_from_device(VKContext &context)
render_graph::VKCopyBufferNode::CreateInfo copy_buffer = {};
copy_buffer.src_buffer = device_buffer_.vk_handle();
copy_buffer.dst_buffer = host_buffer_.vk_handle();
copy_buffer.region.size = device_buffer_.size_in_bytes();
copy_buffer.region.srcOffset = device_buffer_offset_;
copy_buffer.region.size = region_size_;
context.render_graph().add_node(copy_buffer);
}

View File

@@ -45,8 +45,14 @@ class VKStagingBuffer {
*/
VKBuffer host_buffer_;
VkDeviceSize device_buffer_offset_;
VkDeviceSize region_size_;
public:
VKStagingBuffer(const VKBuffer &device_buffer, Direction direction);
VKStagingBuffer(const VKBuffer &device_buffer,
Direction direction,
VkDeviceSize device_buffer_offset = 0,
VkDeviceSize region_size = UINT64_MAX);
/**
* Copy the content of the host buffer to the device buffer.

View File

@@ -65,9 +65,26 @@ void VKVertexBuffer::wrap_handle(uint64_t /*handle*/)
NOT_YET_IMPLEMENTED
}
void VKVertexBuffer::update_sub(uint /*start*/, uint /*len*/, const void * /*data*/)
void VKVertexBuffer::update_sub(uint start_offset, uint data_size_in_bytes, const void *data)
{
NOT_YET_IMPLEMENTED
device_format_ensure();
if (buffer_.is_mapped() && !vertex_format_converter.needs_conversion()) {
buffer_.update_sub_immediately(start_offset, data_size_in_bytes, data);
}
else {
VKContext &context = *VKContext::get();
VKStagingBuffer staging_buffer(
buffer_, VKStagingBuffer::Direction::HostToDevice, start_offset, data_size_in_bytes);
if (vertex_format_converter.needs_conversion()) {
vertex_format_converter.convert(staging_buffer.host_buffer_get().mapped_memory_get(),
data_,
data_size_in_bytes / vertex_len);
}
else {
memcpy(staging_buffer.host_buffer_get().mapped_memory_get(), data, data_size_in_bytes);
}
staging_buffer.copy_to_device(context);
}
}
void VKVertexBuffer::read(void *data) const