Vulkan: Use Generic Buffer to Store DrawList Commands
Previously a storage buffer was used to store draw list commands as it matches already existing APIs. Unfortunately StorageBuffers prefers to be stored on the GPU device and would reduce the benefit of a dynamic draw list. This PR replaces the storage buffer with a regular buffer, which keeps more control where to store the buffer. Pull Request: https://projects.blender.org/blender/blender/pulls/117712
This commit is contained in:
@@ -74,13 +74,21 @@ void VKBatch::draw_indirect(GPUStorageBuf *indirect_buf, intptr_t offset)
|
||||
}
|
||||
|
||||
void VKBatch::multi_draw_indirect(GPUStorageBuf *indirect_buf,
|
||||
int count,
|
||||
intptr_t offset,
|
||||
intptr_t stride)
|
||||
const int count,
|
||||
const intptr_t offset,
|
||||
const intptr_t stride)
|
||||
{
|
||||
VKStorageBuffer &indirect_buffer = *unwrap(unwrap(indirect_buf));
|
||||
multi_draw_indirect(indirect_buffer.vk_handle(), count, offset, stride);
|
||||
}
|
||||
|
||||
void VKBatch::multi_draw_indirect(const VkBuffer indirect_buffer,
|
||||
const int count,
|
||||
const intptr_t offset,
|
||||
const intptr_t stride)
|
||||
{
|
||||
draw_setup();
|
||||
|
||||
VKStorageBuffer &indirect_buffer = *unwrap(unwrap(indirect_buf));
|
||||
VKContext &context = *VKContext::get();
|
||||
VKIndexBuffer *index_buffer = index_buffer_get();
|
||||
const bool draw_indexed = index_buffer != nullptr;
|
||||
|
||||
@@ -8,6 +8,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "vk_common.hh"
|
||||
|
||||
#include "gpu_batch_private.hh"
|
||||
|
||||
namespace blender::gpu {
|
||||
@@ -22,6 +24,7 @@ class VKBatch : public Batch {
|
||||
int count,
|
||||
intptr_t offset,
|
||||
intptr_t stride) override;
|
||||
void multi_draw_indirect(VkBuffer indirect_buf, int count, intptr_t offset, intptr_t stride);
|
||||
|
||||
VKVertexBuffer *vertex_buffer_get(int index);
|
||||
VKVertexBuffer *instance_buffer_get(int index);
|
||||
|
||||
@@ -554,31 +554,29 @@ void VKCommandBuffers::draw_indexed(
|
||||
command_buffer.command_recorded();
|
||||
}
|
||||
|
||||
void VKCommandBuffers::draw_indirect(const VKStorageBuffer &buffer,
|
||||
VkDeviceSize offset,
|
||||
uint32_t draw_count,
|
||||
uint32_t stride)
|
||||
void VKCommandBuffers::draw_indirect(const VkBuffer buffer,
|
||||
const VkDeviceSize offset,
|
||||
const uint32_t draw_count,
|
||||
const uint32_t stride)
|
||||
{
|
||||
validate_framebuffer_exists();
|
||||
ensure_active_framebuffer();
|
||||
|
||||
VKCommandBuffer &command_buffer = command_buffer_get(Type::Graphics);
|
||||
vkCmdDrawIndirect(
|
||||
command_buffer.vk_command_buffer(), buffer.vk_handle(), offset, draw_count, stride);
|
||||
vkCmdDrawIndirect(command_buffer.vk_command_buffer(), buffer, offset, draw_count, stride);
|
||||
command_buffer.command_recorded();
|
||||
}
|
||||
|
||||
void VKCommandBuffers::draw_indexed_indirect(const VKStorageBuffer &buffer,
|
||||
VkDeviceSize offset,
|
||||
uint32_t draw_count,
|
||||
uint32_t stride)
|
||||
void VKCommandBuffers::draw_indexed_indirect(const VkBuffer buffer,
|
||||
const VkDeviceSize offset,
|
||||
const uint32_t draw_count,
|
||||
const uint32_t stride)
|
||||
{
|
||||
validate_framebuffer_exists();
|
||||
ensure_active_framebuffer();
|
||||
|
||||
VKCommandBuffer &command_buffer = command_buffer_get(Type::Graphics);
|
||||
vkCmdDrawIndexedIndirect(
|
||||
command_buffer.vk_command_buffer(), buffer.vk_handle(), offset, draw_count, stride);
|
||||
vkCmdDrawIndexedIndirect(command_buffer.vk_command_buffer(), buffer, offset, draw_count, stride);
|
||||
command_buffer.command_recorded();
|
||||
}
|
||||
|
||||
|
||||
@@ -130,11 +130,8 @@ class VKCommandBuffers : public NonCopyable, NonMovable {
|
||||
void draw_indexed(
|
||||
int index_count, int instance_count, int first_index, int vertex_offset, int first_instance);
|
||||
|
||||
void draw_indirect(const VKStorageBuffer &buffer,
|
||||
VkDeviceSize offset,
|
||||
uint32_t draw_count,
|
||||
uint32_t stride);
|
||||
void draw_indexed_indirect(const VKStorageBuffer &buffer,
|
||||
void draw_indirect(VkBuffer buffer, VkDeviceSize offset, uint32_t draw_count, uint32_t stride);
|
||||
void draw_indexed_indirect(VkBuffer buffer,
|
||||
VkDeviceSize offset,
|
||||
uint32_t draw_count,
|
||||
uint32_t stride);
|
||||
|
||||
@@ -16,12 +16,12 @@
|
||||
|
||||
namespace blender::gpu {
|
||||
|
||||
VKDrawList::VKDrawList(int list_length)
|
||||
: command_buffer_(
|
||||
list_length * sizeof(VkDrawIndexedIndirectCommand), GPU_USAGE_STREAM, __func__),
|
||||
length_(list_length)
|
||||
VKDrawList::VKDrawList(int list_length) : length_(list_length)
|
||||
{
|
||||
command_buffer_.ensure_allocated();
|
||||
command_buffer_.create(list_length * sizeof(VkDrawIndexedIndirectCommand),
|
||||
GPU_USAGE_DYNAMIC,
|
||||
VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT,
|
||||
true);
|
||||
}
|
||||
|
||||
void VKDrawList::append(GPUBatch *gpu_batch, int instance_first, int instance_count)
|
||||
@@ -72,11 +72,14 @@ void VKDrawList::submit()
|
||||
batch_ = nullptr;
|
||||
return;
|
||||
}
|
||||
if (command_index_ > 1) {
|
||||
printf("%s: %d\n", __func__, command_index_);
|
||||
}
|
||||
|
||||
const VKIndexBuffer *index_buffer = batch_->index_buffer_get();
|
||||
const bool is_indexed = index_buffer != nullptr;
|
||||
command_buffer_.buffer_get().flush();
|
||||
batch_->multi_draw_indirect(wrap(wrap(&command_buffer_)),
|
||||
command_buffer_.flush();
|
||||
batch_->multi_draw_indirect(command_buffer_.vk_handle(),
|
||||
command_index_,
|
||||
0,
|
||||
is_indexed ? sizeof(VkDrawIndexedIndirectCommand) :
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
|
||||
#include "gpu_drawlist_private.hh"
|
||||
|
||||
#include "vk_storage_buffer.hh"
|
||||
#include "vk_buffer.hh"
|
||||
|
||||
namespace blender::gpu {
|
||||
class VKBatch;
|
||||
@@ -23,15 +23,15 @@ class VKDrawList : public DrawList {
|
||||
VKBatch *batch_ = nullptr;
|
||||
|
||||
/**
|
||||
* Storage buffer containing the commands.
|
||||
* Buffer containing the commands.
|
||||
*
|
||||
* The storage buffer is host visible and new commands are directly added to the buffer. Reducing
|
||||
* The buffer is host visible and new commands are directly added to the buffer. Reducing
|
||||
* the need to copy the commands from an intermediate buffer to the GPU. The commands are only
|
||||
* written once and used once.
|
||||
*
|
||||
* The data can be used to record VkDrawIndirectCommands or VkDrawIndirectIndexedCommands.
|
||||
* The buffer contains VkDrawIndirectCommands or VkDrawIndirectIndexedCommands.
|
||||
*/
|
||||
VKStorageBuffer command_buffer_;
|
||||
VKBuffer command_buffer_;
|
||||
|
||||
/**
|
||||
* Maximum number of commands that can be recorded per batch. Commands will be flushed when this
|
||||
@@ -69,8 +69,7 @@ class VKDrawList : public DrawList {
|
||||
template<typename CommandType> CommandType &get_command() const
|
||||
{
|
||||
return MutableSpan<CommandType>(
|
||||
static_cast<CommandType *>(command_buffer_.buffer_get().mapped_memory_get()),
|
||||
length_)[command_index_];
|
||||
static_cast<CommandType *>(command_buffer_.mapped_memory_get()), length_)[command_index_];
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -38,13 +38,11 @@ void VKStorageBuffer::ensure_allocated()
|
||||
|
||||
void VKStorageBuffer::allocate()
|
||||
{
|
||||
const bool is_host_visible = ELEM(usage_, GPU_USAGE_STREAM);
|
||||
VkBufferUsageFlags buffer_usage_flags = ELEM(usage_, GPU_USAGE_STREAM) ?
|
||||
VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT :
|
||||
VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT |
|
||||
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
|
||||
VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
|
||||
VK_BUFFER_USAGE_TRANSFER_DST_BIT;
|
||||
const bool is_host_visible = false;
|
||||
const VkBufferUsageFlags buffer_usage_flags = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT |
|
||||
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
|
||||
VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
|
||||
VK_BUFFER_USAGE_TRANSFER_DST_BIT;
|
||||
buffer_.create(size_in_bytes_, usage_, buffer_usage_flags, is_host_visible);
|
||||
debug::object_label(buffer_.vk_handle(), name_);
|
||||
}
|
||||
|
||||
@@ -49,11 +49,6 @@ class VKStorageBuffer : public StorageBuf, public VKBindableResource {
|
||||
|
||||
void ensure_allocated();
|
||||
|
||||
const VKBuffer &buffer_get() const
|
||||
{
|
||||
return buffer_;
|
||||
}
|
||||
|
||||
private:
|
||||
void allocate();
|
||||
};
|
||||
@@ -62,9 +57,5 @@ BLI_INLINE VKStorageBuffer *unwrap(StorageBuf *storage_buffer)
|
||||
{
|
||||
return static_cast<VKStorageBuffer *>(storage_buffer);
|
||||
}
|
||||
BLI_INLINE StorageBuf *wrap(VKStorageBuffer *storage_buffer)
|
||||
{
|
||||
return static_cast<StorageBuf *>(storage_buffer);
|
||||
}
|
||||
|
||||
} // namespace blender::gpu
|
||||
|
||||
Reference in New Issue
Block a user