Previously a storage buffer was used to store draw list commands as it matches already existing APIs. Unfortunately StorageBuffers prefers to be stored on the GPU device and would reduce the benefit of a dynamic draw list. This PR replaces the storage buffer with a regular buffer, which keeps more control where to store the buffer. Pull Request: https://projects.blender.org/blender/blender/pulls/117712
119 lines
3.3 KiB
C++
119 lines
3.3 KiB
C++
/* SPDX-FileCopyrightText: 2022 Blender Authors
|
|
*
|
|
* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
|
|
/** \file
|
|
* \ingroup gpu
|
|
*/
|
|
#include "vk_shader.hh"
|
|
#include "vk_shader_interface.hh"
|
|
#include "vk_staging_buffer.hh"
|
|
#include "vk_state_manager.hh"
|
|
#include "vk_vertex_buffer.hh"
|
|
|
|
#include "vk_storage_buffer.hh"
|
|
|
|
namespace blender::gpu {
|
|
|
|
VKStorageBuffer::VKStorageBuffer(int size, GPUUsageType usage, const char *name)
|
|
: StorageBuf(size, name), usage_(usage)
|
|
{
|
|
}
|
|
|
|
void VKStorageBuffer::update(const void *data)
|
|
{
|
|
VKContext &context = *VKContext::get();
|
|
ensure_allocated();
|
|
VKStagingBuffer staging_buffer(buffer_, VKStagingBuffer::Direction::HostToDevice);
|
|
staging_buffer.host_buffer_get().update(data);
|
|
staging_buffer.copy_to_device(context);
|
|
}
|
|
|
|
void VKStorageBuffer::ensure_allocated()
|
|
{
|
|
if (!buffer_.is_allocated()) {
|
|
allocate();
|
|
}
|
|
}
|
|
|
|
void VKStorageBuffer::allocate()
|
|
{
|
|
const bool is_host_visible = false;
|
|
const VkBufferUsageFlags buffer_usage_flags = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT |
|
|
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
|
|
VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
|
|
VK_BUFFER_USAGE_TRANSFER_DST_BIT;
|
|
buffer_.create(size_in_bytes_, usage_, buffer_usage_flags, is_host_visible);
|
|
debug::object_label(buffer_.vk_handle(), name_);
|
|
}
|
|
|
|
void VKStorageBuffer::bind(int slot)
|
|
{
|
|
VKContext &context = *VKContext::get();
|
|
context.state_manager_get().storage_buffer_bind(*this, slot);
|
|
}
|
|
|
|
void VKStorageBuffer::bind(int slot,
|
|
shader::ShaderCreateInfo::Resource::BindType bind_type,
|
|
const GPUSamplerState /*sampler_state*/)
|
|
{
|
|
VKContext &context = *VKContext::get();
|
|
VKShader *shader = static_cast<VKShader *>(context.shader);
|
|
ensure_allocated();
|
|
const VKShaderInterface &shader_interface = shader->interface_get();
|
|
const std::optional<VKDescriptorSet::Location> location =
|
|
shader_interface.descriptor_set_location(bind_type, slot);
|
|
if (location) {
|
|
VKDescriptorSetTracker &descriptor_set = context.descriptor_set_get();
|
|
descriptor_set.bind(*this, *location);
|
|
}
|
|
}
|
|
|
|
void VKStorageBuffer::unbind()
|
|
{
|
|
unbind_from_active_context();
|
|
}
|
|
|
|
void VKStorageBuffer::clear(uint32_t clear_value)
|
|
{
|
|
ensure_allocated();
|
|
VKContext &context = *VKContext::get();
|
|
buffer_.clear(context, clear_value);
|
|
}
|
|
|
|
void VKStorageBuffer::copy_sub(VertBuf *src, uint dst_offset, uint src_offset, uint copy_size)
|
|
{
|
|
ensure_allocated();
|
|
|
|
VKVertexBuffer &src_vertex_buffer = *unwrap(src);
|
|
src_vertex_buffer.upload();
|
|
|
|
VkBufferCopy region = {};
|
|
region.srcOffset = src_offset;
|
|
region.dstOffset = dst_offset;
|
|
region.size = copy_size;
|
|
|
|
VKContext &context = *VKContext::get();
|
|
VKCommandBuffers &command_buffers = context.command_buffers_get();
|
|
command_buffers.copy(buffer_, src_vertex_buffer.vk_handle(), Span<VkBufferCopy>(®ion, 1));
|
|
context.flush();
|
|
}
|
|
|
|
void VKStorageBuffer::async_flush_to_host()
|
|
{
|
|
GPU_memory_barrier(GPU_BARRIER_BUFFER_UPDATE);
|
|
}
|
|
|
|
void VKStorageBuffer::read(void *data)
|
|
{
|
|
ensure_allocated();
|
|
VKContext &context = *VKContext::get();
|
|
context.flush();
|
|
|
|
VKStagingBuffer staging_buffer(buffer_, VKStagingBuffer::Direction::DeviceToHost);
|
|
staging_buffer.copy_from_device(context);
|
|
staging_buffer.host_buffer_get().read(data);
|
|
}
|
|
|
|
} // namespace blender::gpu
|