Files
test2/source/blender/gpu/intern/gpu_storage_buffer.cc
Jason Fielder 1b0ddfa6cb GPU: Add explicit API to sync storage buffer back to host
PR Introduces GPU_storagebuf_sync_to_host as an explicit routine to
flush GPU-resident storage buffer memory back to the host within the
GPU command stream.

The previous implmentation relied on implicit synchronization of
resources using OpenGL barriers which does not match the
paradigm of explicit APIs, where indiviaul resources may need
to be tracked.

This patch ensures GPU_storagebuf_read can be called without
stalling the GPU pipeline while work finishes executing. There are
two possible use cases:

1) If GPU_storagebuf_read is called AFTER an explicit call to
GPU_storagebuf_sync_to_host, the read will be synchronized.
If the dependent work is still executing on the GPU, the host
will stall until GPU work has completed and results are available.

2) If GPU_storagebuf_read is called WITHOUT an explicit call to
GPU_storagebuf_sync_to_host, the read will be asynchronous
and whatever memory is visible to the host at that time will be used.
(This is the same as assuming a sync event has already been signalled.)

This patch also addresses a gap in the Metal implementation where
there was missing read support for GPU-only storage buffers.
This routine now uses a staging buffer to copy results if no
host-visible buffer was available.

Reading from a GPU-only storage buffer will always stall
the host, as it is not possible to pre-flush results, as no
host-resident buffer is available.

Authored by Apple: Michael Parkin-White

Pull Request: https://projects.blender.org/blender/blender/pulls/113456
2023-10-20 17:04:36 +02:00

120 lines
2.4 KiB
C++

/* SPDX-FileCopyrightText: 2022 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup gpu
*/
#include "MEM_guardedalloc.h"
#include <cstring>
#include "BLI_blenlib.h"
#include "BLI_math_base.h"
#include "gpu_backend.hh"
#include "GPU_material.h"
#include "GPU_vertex_buffer.h" /* For GPUUsageType. */
#include "GPU_storage_buffer.h"
#include "gpu_storage_buffer_private.hh"
#include "gpu_vertex_buffer_private.hh"
/* -------------------------------------------------------------------- */
/** \name Creation & Deletion
* \{ */
namespace blender::gpu {
StorageBuf::StorageBuf(size_t size, const char *name)
{
/* Make sure that UBO is padded to size of vec4 */
BLI_assert((size % 16) == 0);
size_in_bytes_ = size;
STRNCPY(name_, name);
}
StorageBuf::~StorageBuf()
{
MEM_SAFE_FREE(data_);
}
} // namespace blender::gpu
/** \} */
/* -------------------------------------------------------------------- */
/** \name C-API
* \{ */
using namespace blender::gpu;
GPUStorageBuf *GPU_storagebuf_create_ex(size_t size,
const void *data,
GPUUsageType usage,
const char *name)
{
StorageBuf *ssbo = GPUBackend::get()->storagebuf_alloc(size, usage, name);
/* Direct init. */
if (data != nullptr) {
ssbo->update(data);
}
return wrap(ssbo);
}
void GPU_storagebuf_free(GPUStorageBuf *ssbo)
{
delete unwrap(ssbo);
}
void GPU_storagebuf_update(GPUStorageBuf *ssbo, const void *data)
{
unwrap(ssbo)->update(data);
}
void GPU_storagebuf_bind(GPUStorageBuf *ssbo, int slot)
{
unwrap(ssbo)->bind(slot);
}
void GPU_storagebuf_unbind(GPUStorageBuf *ssbo)
{
unwrap(ssbo)->unbind();
}
void GPU_storagebuf_unbind_all()
{
/* FIXME */
}
void GPU_storagebuf_clear_to_zero(GPUStorageBuf *ssbo)
{
GPU_storagebuf_clear(ssbo, 0);
}
void GPU_storagebuf_clear(GPUStorageBuf *ssbo, uint32_t clear_value)
{
unwrap(ssbo)->clear(clear_value);
}
void GPU_storagebuf_copy_sub_from_vertbuf(
GPUStorageBuf *ssbo, GPUVertBuf *src, uint dst_offset, uint src_offset, uint copy_size)
{
unwrap(ssbo)->copy_sub(unwrap(src), dst_offset, src_offset, copy_size);
}
void GPU_storagebuf_sync_to_host(GPUStorageBuf *ssbo)
{
unwrap(ssbo)->async_flush_to_host();
}
void GPU_storagebuf_read(GPUStorageBuf *ssbo, void *data)
{
unwrap(ssbo)->read(data);
}
/** \} */