Descriptor sets/pools are known to be troublesome as it doesn't match how GPUs work, or how application want to work, adding more complexity than needed. This results is quite an overhead allocating and deallocating descriptor sets. This PR will use descriptor buffers when they are available. Most platforms support descriptor buffers. When not available descriptor pools/sets will be used. Although this is a feature I would like to land it in 4.5 due to the API changes. This makes it easier to fix issues when 4.5 is released. The feature can easily be disabled by setting the feature to false if it has to many problems. Pull Request: https://projects.blender.org/blender/blender/pulls/138266
180 lines
4.7 KiB
C++
180 lines
4.7 KiB
C++
/* SPDX-FileCopyrightText: 2022 Blender Authors
|
|
*
|
|
* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
|
|
/** \file
|
|
* \ingroup gpu
|
|
*/
|
|
|
|
#include "MEM_guardedalloc.h"
|
|
|
|
#include "vk_data_conversion.hh"
|
|
#include "vk_shader.hh"
|
|
#include "vk_shader_interface.hh"
|
|
#include "vk_staging_buffer.hh"
|
|
#include "vk_state_manager.hh"
|
|
#include "vk_vertex_buffer.hh"
|
|
|
|
namespace blender::gpu {
|
|
|
|
VKVertexBuffer::~VKVertexBuffer()
|
|
{
|
|
release_data();
|
|
}
|
|
|
|
void VKVertexBuffer::bind_as_ssbo(uint binding)
|
|
{
|
|
VKContext &context = *VKContext::get();
|
|
VKStateManager &state_manager = context.state_manager_get();
|
|
state_manager.storage_buffer_bind(BindSpaceStorageBuffers::Type::VertexBuffer, this, binding);
|
|
}
|
|
|
|
void VKVertexBuffer::bind_as_texture(uint binding)
|
|
{
|
|
VKContext &context = *VKContext::get();
|
|
VKStateManager &state_manager = context.state_manager_get();
|
|
state_manager.texel_buffer_bind(*this, binding);
|
|
}
|
|
|
|
void VKVertexBuffer::ensure_updated()
|
|
{
|
|
upload_data();
|
|
}
|
|
|
|
void VKVertexBuffer::ensure_buffer_view()
|
|
{
|
|
if (vk_buffer_view_ != VK_NULL_HANDLE) {
|
|
return;
|
|
}
|
|
|
|
VkBufferViewCreateInfo buffer_view_info = {};
|
|
buffer_view_info.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
|
|
buffer_view_info.buffer = buffer_.vk_handle();
|
|
buffer_view_info.format = to_vk_format();
|
|
buffer_view_info.range = buffer_.size_in_bytes();
|
|
|
|
const VKDevice &device = VKBackend::get().device;
|
|
vkCreateBufferView(device.vk_handle(), &buffer_view_info, nullptr, &vk_buffer_view_);
|
|
debug::object_label(vk_buffer_view_, "VertexBufferView");
|
|
}
|
|
|
|
void VKVertexBuffer::wrap_handle(uint64_t /*handle*/)
|
|
{
|
|
NOT_YET_IMPLEMENTED
|
|
}
|
|
|
|
void VKVertexBuffer::update_sub(uint start_offset, uint data_size_in_bytes, const void *data)
|
|
{
|
|
if (buffer_.is_mapped()) {
|
|
buffer_.update_sub_immediately(start_offset, data_size_in_bytes, data);
|
|
}
|
|
else {
|
|
VKContext &context = *VKContext::get();
|
|
VKStagingBuffer staging_buffer(
|
|
buffer_, VKStagingBuffer::Direction::HostToDevice, start_offset, data_size_in_bytes);
|
|
memcpy(staging_buffer.host_buffer_get().mapped_memory_get(), data, data_size_in_bytes);
|
|
staging_buffer.copy_to_device(context);
|
|
}
|
|
}
|
|
|
|
void VKVertexBuffer::read(void *data) const
|
|
{
|
|
VKContext &context = *VKContext::get();
|
|
if (buffer_.is_mapped()) {
|
|
buffer_.read(context, data);
|
|
return;
|
|
}
|
|
|
|
VKStagingBuffer staging_buffer(buffer_, VKStagingBuffer::Direction::DeviceToHost);
|
|
staging_buffer.copy_from_device(context);
|
|
staging_buffer.host_buffer_get().read(context, data);
|
|
}
|
|
|
|
void VKVertexBuffer::acquire_data()
|
|
{
|
|
if (usage_ == GPU_USAGE_DEVICE_ONLY) {
|
|
return;
|
|
}
|
|
|
|
/* Discard previous data if any. */
|
|
/* TODO: Use mapped memory. */
|
|
MEM_SAFE_FREE(data_);
|
|
data_ = MEM_malloc_arrayN<uchar>(this->size_alloc_get(), __func__);
|
|
}
|
|
|
|
void VKVertexBuffer::resize_data()
|
|
{
|
|
if (usage_ == GPU_USAGE_DEVICE_ONLY) {
|
|
return;
|
|
}
|
|
|
|
data_ = (uchar *)MEM_reallocN(data_, sizeof(uchar) * this->size_alloc_get());
|
|
}
|
|
|
|
void VKVertexBuffer::release_data()
|
|
{
|
|
if (vk_buffer_view_ != VK_NULL_HANDLE) {
|
|
VKDiscardPool::discard_pool_get().discard_buffer_view(vk_buffer_view_);
|
|
vk_buffer_view_ = VK_NULL_HANDLE;
|
|
}
|
|
|
|
MEM_SAFE_FREE(data_);
|
|
}
|
|
|
|
void VKVertexBuffer::upload_data_direct(const VKBuffer &host_buffer)
|
|
{
|
|
host_buffer.update_immediately(data_);
|
|
}
|
|
|
|
void VKVertexBuffer::upload_data_via_staging_buffer(VKContext &context)
|
|
{
|
|
VKStagingBuffer staging_buffer(buffer_, VKStagingBuffer::Direction::HostToDevice);
|
|
upload_data_direct(staging_buffer.host_buffer_get());
|
|
staging_buffer.copy_to_device(context);
|
|
}
|
|
|
|
void VKVertexBuffer::upload_data()
|
|
{
|
|
if (!buffer_.is_allocated()) {
|
|
allocate();
|
|
}
|
|
if (!ELEM(usage_, GPU_USAGE_STATIC, GPU_USAGE_STREAM, GPU_USAGE_DYNAMIC)) {
|
|
return;
|
|
}
|
|
|
|
if (flag & GPU_VERTBUF_DATA_DIRTY) {
|
|
if (buffer_.is_mapped() && !data_uploaded_) {
|
|
upload_data_direct(buffer_);
|
|
}
|
|
else {
|
|
VKContext &context = *VKContext::get();
|
|
upload_data_via_staging_buffer(context);
|
|
}
|
|
if (usage_ == GPU_USAGE_STATIC) {
|
|
MEM_SAFE_FREE(data_);
|
|
}
|
|
data_uploaded_ = true;
|
|
|
|
flag &= ~GPU_VERTBUF_DATA_DIRTY;
|
|
flag |= GPU_VERTBUF_DATA_UPLOADED;
|
|
}
|
|
}
|
|
|
|
void VKVertexBuffer::allocate()
|
|
{
|
|
VkBufferUsageFlags vk_buffer_usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
|
|
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
|
|
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT |
|
|
VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT |
|
|
VK_BUFFER_USAGE_TRANSFER_DST_BIT;
|
|
|
|
buffer_.create(size_alloc_get(),
|
|
vk_buffer_usage,
|
|
0,
|
|
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
|
|
VmaAllocationCreateFlags(0));
|
|
debug::object_label(buffer_.vk_handle(), "VertexBuffer");
|
|
}
|
|
|
|
} // namespace blender::gpu
|