GPU: Remove wrapper type for gpu::StorageBuf
This is the first step into merging DRW_gpu_wrapper.hh into the GPU module. This is very similar to #119825. Pull Request: https://projects.blender.org/blender/blender/pulls/144329
This commit is contained in:
committed by
Clément Foucault
parent
6dca66d606
commit
7a97105b28
@@ -76,17 +76,17 @@ class EvalOutputAPI::EvalOutput {
|
||||
// data structure. They need to be overridden in the specific instances of the EvalOutput derived
|
||||
// classes if needed, while the interfaces above are overridden through VolatileEvalOutput.
|
||||
|
||||
virtual GPUStorageBuf *create_patch_arrays_buf()
|
||||
virtual gpu::StorageBuf *create_patch_arrays_buf()
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
virtual GPUStorageBuf *get_patch_index_buf()
|
||||
virtual gpu::StorageBuf *get_patch_index_buf()
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
virtual GPUStorageBuf *get_patch_param_buf()
|
||||
virtual gpu::StorageBuf *get_patch_param_buf()
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
@@ -101,17 +101,17 @@ class EvalOutputAPI::EvalOutput {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
virtual GPUStorageBuf *create_face_varying_patch_array_buf(const int /*face_varying_channel*/)
|
||||
virtual gpu::StorageBuf *create_face_varying_patch_array_buf(const int /*face_varying_channel*/)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
virtual GPUStorageBuf *get_face_varying_patch_index_buf(const int /*face_varying_channel*/)
|
||||
virtual gpu::StorageBuf *get_face_varying_patch_index_buf(const int /*face_varying_channel*/)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
virtual GPUStorageBuf *get_face_varying_patch_param_buf(const int /*face_varying_channel*/)
|
||||
virtual gpu::StorageBuf *get_face_varying_patch_param_buf(const int /*face_varying_channel*/)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -15,11 +15,11 @@ using OpenSubdiv::Osd::PatchArrayVector;
|
||||
|
||||
namespace blender::opensubdiv {
|
||||
|
||||
static GPUStorageBuf *create_patch_array_buffer(const PatchArrayVector &patch_arrays)
|
||||
static gpu::StorageBuf *create_patch_array_buffer(const PatchArrayVector &patch_arrays)
|
||||
{
|
||||
const size_t patch_array_size = sizeof(PatchArray);
|
||||
const size_t patch_array_byte_size = patch_array_size * patch_arrays.size();
|
||||
GPUStorageBuf *storage_buf = GPU_storagebuf_create_ex(
|
||||
gpu::StorageBuf *storage_buf = GPU_storagebuf_create_ex(
|
||||
patch_array_byte_size, patch_arrays.data(), GPU_USAGE_STATIC, "osd_patch_array");
|
||||
return storage_buf;
|
||||
}
|
||||
@@ -43,13 +43,13 @@ GpuEvalOutput::GpuEvalOutput(const StencilTable *vertex_stencils,
|
||||
{
|
||||
}
|
||||
|
||||
GPUStorageBuf *GpuEvalOutput::create_patch_arrays_buf()
|
||||
gpu::StorageBuf *GpuEvalOutput::create_patch_arrays_buf()
|
||||
{
|
||||
GPUPatchTable *patch_table = getPatchTable();
|
||||
return create_patch_array_buffer(patch_table->GetPatchArrays());
|
||||
}
|
||||
|
||||
GPUStorageBuf *GpuEvalOutput::create_face_varying_patch_array_buf(const int face_varying_channel)
|
||||
gpu::StorageBuf *GpuEvalOutput::create_face_varying_patch_array_buf(const int face_varying_channel)
|
||||
{
|
||||
GPUPatchTable *patch_table = getFVarPatchTable(face_varying_channel);
|
||||
return create_patch_array_buffer(patch_table->GetFVarPatchArrays(face_varying_channel));
|
||||
|
||||
@@ -31,14 +31,14 @@ class GpuEvalOutput : public VolatileEvalOutput<GPUVertexBuffer,
|
||||
const PatchTable *patch_table,
|
||||
EvaluatorCache *evaluator_cache = nullptr);
|
||||
|
||||
GPUStorageBuf *create_patch_arrays_buf() override;
|
||||
gpu::StorageBuf *create_patch_arrays_buf() override;
|
||||
|
||||
GPUStorageBuf *get_patch_index_buf() override
|
||||
gpu::StorageBuf *get_patch_index_buf() override
|
||||
{
|
||||
return getPatchTable()->GetPatchIndexBuffer();
|
||||
}
|
||||
|
||||
GPUStorageBuf *get_patch_param_buf() override
|
||||
gpu::StorageBuf *get_patch_param_buf() override
|
||||
{
|
||||
return getPatchTable()->GetPatchParamBuffer();
|
||||
}
|
||||
@@ -53,15 +53,15 @@ class GpuEvalOutput : public VolatileEvalOutput<GPUVertexBuffer,
|
||||
return getSrcVertexDataBuffer()->get_vertex_buffer();
|
||||
}
|
||||
|
||||
GPUStorageBuf *create_face_varying_patch_array_buf(const int face_varying_channel) override;
|
||||
gpu::StorageBuf *create_face_varying_patch_array_buf(const int face_varying_channel) override;
|
||||
|
||||
GPUStorageBuf *get_face_varying_patch_index_buf(const int face_varying_channel) override
|
||||
gpu::StorageBuf *get_face_varying_patch_index_buf(const int face_varying_channel) override
|
||||
{
|
||||
GPUPatchTable *patch_table = getFVarPatchTable(face_varying_channel);
|
||||
return patch_table->GetFVarPatchIndexBuffer(face_varying_channel);
|
||||
}
|
||||
|
||||
GPUStorageBuf *get_face_varying_patch_param_buf(const int face_varying_channel) override
|
||||
gpu::StorageBuf *get_face_varying_patch_param_buf(const int face_varying_channel) override
|
||||
{
|
||||
GPUPatchTable *patch_table = getFVarPatchTable(face_varying_channel);
|
||||
return patch_table->GetFVarPatchParamBuffer(face_varying_channel);
|
||||
|
||||
@@ -352,17 +352,17 @@ void EvalOutputAPI::getPatchMap(blender::gpu::VertBuf *patch_map_handles,
|
||||
memcpy(buffer_nodes.data(), quadtree.data(), sizeof(PatchMap::QuadNode) * quadtree.size());
|
||||
}
|
||||
|
||||
GPUStorageBuf *EvalOutputAPI::create_patch_arrays_buf()
|
||||
gpu::StorageBuf *EvalOutputAPI::create_patch_arrays_buf()
|
||||
{
|
||||
return implementation_->create_patch_arrays_buf();
|
||||
}
|
||||
|
||||
GPUStorageBuf *EvalOutputAPI::get_patch_index_buf()
|
||||
gpu::StorageBuf *EvalOutputAPI::get_patch_index_buf()
|
||||
{
|
||||
return implementation_->get_patch_index_buf();
|
||||
}
|
||||
|
||||
GPUStorageBuf *EvalOutputAPI::get_patch_param_buf()
|
||||
gpu::StorageBuf *EvalOutputAPI::get_patch_param_buf()
|
||||
{
|
||||
return implementation_->get_patch_param_buf();
|
||||
}
|
||||
@@ -377,17 +377,17 @@ gpu::VertBuf *EvalOutputAPI::get_source_data_buf()
|
||||
return implementation_->get_source_data_buf();
|
||||
}
|
||||
|
||||
GPUStorageBuf *EvalOutputAPI::create_face_varying_patch_array_buf(const int face_varying_channel)
|
||||
gpu::StorageBuf *EvalOutputAPI::create_face_varying_patch_array_buf(const int face_varying_channel)
|
||||
{
|
||||
return implementation_->create_face_varying_patch_array_buf(face_varying_channel);
|
||||
}
|
||||
|
||||
GPUStorageBuf *EvalOutputAPI::get_face_varying_patch_index_buf(const int face_varying_channel)
|
||||
gpu::StorageBuf *EvalOutputAPI::get_face_varying_patch_index_buf(const int face_varying_channel)
|
||||
{
|
||||
return implementation_->get_face_varying_patch_index_buf(face_varying_channel);
|
||||
}
|
||||
|
||||
GPUStorageBuf *EvalOutputAPI::get_face_varying_patch_param_buf(const int face_varying_channel)
|
||||
gpu::StorageBuf *EvalOutputAPI::get_face_varying_patch_param_buf(const int face_varying_channel)
|
||||
{
|
||||
return implementation_->get_face_varying_patch_param_buf(face_varying_channel);
|
||||
}
|
||||
|
||||
@@ -48,14 +48,14 @@ using OpenSubdiv::Osd::PatchArrayVector;
|
||||
|
||||
namespace blender::opensubdiv {
|
||||
|
||||
template<class T> GPUStorageBuf *create_buffer(std::vector<T> const &src, const char *name)
|
||||
template<class T> gpu::StorageBuf *create_buffer(std::vector<T> const &src, const char *name)
|
||||
{
|
||||
if (src.empty()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const size_t buffer_size = src.size() * sizeof(T);
|
||||
GPUStorageBuf *storage_buffer = GPU_storagebuf_create_ex(
|
||||
gpu::StorageBuf *storage_buffer = GPU_storagebuf_create_ex(
|
||||
buffer_size, &src.at(0), GPU_USAGE_STATIC, name);
|
||||
|
||||
return storage_buffer;
|
||||
@@ -88,7 +88,7 @@ GPUStencilTableSSBO::GPUStencilTableSSBO(LimitStencilTable const *limitStencilTa
|
||||
}
|
||||
}
|
||||
|
||||
static void storage_buffer_free(GPUStorageBuf **buffer)
|
||||
static void storage_buffer_free(gpu::StorageBuf **buffer)
|
||||
{
|
||||
if (*buffer) {
|
||||
GPU_storagebuf_free(*buffer);
|
||||
@@ -195,12 +195,12 @@ bool GPUComputeEvaluator::EvalStencils(gpu::VertBuf *srcBuffer,
|
||||
BufferDescriptor const &duDesc,
|
||||
gpu::VertBuf *dvBuffer,
|
||||
BufferDescriptor const &dvDesc,
|
||||
GPUStorageBuf *sizesBuffer,
|
||||
GPUStorageBuf *offsetsBuffer,
|
||||
GPUStorageBuf *indicesBuffer,
|
||||
GPUStorageBuf *weightsBuffer,
|
||||
GPUStorageBuf *duWeightsBuffer,
|
||||
GPUStorageBuf *dvWeightsBuffer,
|
||||
gpu::StorageBuf *sizesBuffer,
|
||||
gpu::StorageBuf *offsetsBuffer,
|
||||
gpu::StorageBuf *indicesBuffer,
|
||||
gpu::StorageBuf *weightsBuffer,
|
||||
gpu::StorageBuf *duWeightsBuffer,
|
||||
gpu::StorageBuf *dvWeightsBuffer,
|
||||
int start,
|
||||
int end) const
|
||||
{
|
||||
@@ -267,8 +267,8 @@ bool GPUComputeEvaluator::EvalPatches(gpu::VertBuf *srcBuffer,
|
||||
int numPatchCoords,
|
||||
gpu::VertBuf *patchCoordsBuffer,
|
||||
const PatchArrayVector &patchArrays,
|
||||
GPUStorageBuf *patchIndexBuffer,
|
||||
GPUStorageBuf *patchParamsBuffer)
|
||||
gpu::StorageBuf *patchIndexBuffer,
|
||||
gpu::StorageBuf *patchParamsBuffer)
|
||||
{
|
||||
if (_patchKernel.shader == nullptr) {
|
||||
return false;
|
||||
|
||||
@@ -45,39 +45,39 @@ class GPUStencilTableSSBO {
|
||||
~GPUStencilTableSSBO();
|
||||
|
||||
// interfaces needed for GLSLComputeKernel
|
||||
GPUStorageBuf *GetSizesBuffer() const
|
||||
gpu::StorageBuf *GetSizesBuffer() const
|
||||
{
|
||||
return sizes_buf;
|
||||
}
|
||||
GPUStorageBuf *GetOffsetsBuffer() const
|
||||
gpu::StorageBuf *GetOffsetsBuffer() const
|
||||
{
|
||||
return offsets_buf;
|
||||
}
|
||||
GPUStorageBuf *GetIndicesBuffer() const
|
||||
gpu::StorageBuf *GetIndicesBuffer() const
|
||||
{
|
||||
return indices_buf;
|
||||
}
|
||||
GPUStorageBuf *GetWeightsBuffer() const
|
||||
gpu::StorageBuf *GetWeightsBuffer() const
|
||||
{
|
||||
return weights_buf;
|
||||
}
|
||||
GPUStorageBuf *GetDuWeightsBuffer() const
|
||||
gpu::StorageBuf *GetDuWeightsBuffer() const
|
||||
{
|
||||
return du_weights_buf;
|
||||
}
|
||||
GPUStorageBuf *GetDvWeightsBuffer() const
|
||||
gpu::StorageBuf *GetDvWeightsBuffer() const
|
||||
{
|
||||
return dv_weights_buf;
|
||||
}
|
||||
GPUStorageBuf *GetDuuWeightsBuffer() const
|
||||
gpu::StorageBuf *GetDuuWeightsBuffer() const
|
||||
{
|
||||
return duu_weights_buf;
|
||||
}
|
||||
GPUStorageBuf *GetDuvWeightsBuffer() const
|
||||
gpu::StorageBuf *GetDuvWeightsBuffer() const
|
||||
{
|
||||
return duv_weights_buf;
|
||||
}
|
||||
GPUStorageBuf *GetDvvWeightsBuffer() const
|
||||
gpu::StorageBuf *GetDvvWeightsBuffer() const
|
||||
{
|
||||
return dvv_weights_buf;
|
||||
}
|
||||
@@ -87,15 +87,15 @@ class GPUStencilTableSSBO {
|
||||
}
|
||||
|
||||
private:
|
||||
GPUStorageBuf *sizes_buf = nullptr;
|
||||
GPUStorageBuf *offsets_buf = nullptr;
|
||||
GPUStorageBuf *indices_buf = nullptr;
|
||||
GPUStorageBuf *weights_buf = nullptr;
|
||||
GPUStorageBuf *du_weights_buf = nullptr;
|
||||
GPUStorageBuf *dv_weights_buf = nullptr;
|
||||
GPUStorageBuf *duu_weights_buf = nullptr;
|
||||
GPUStorageBuf *duv_weights_buf = nullptr;
|
||||
GPUStorageBuf *dvv_weights_buf = nullptr;
|
||||
gpu::StorageBuf *sizes_buf = nullptr;
|
||||
gpu::StorageBuf *offsets_buf = nullptr;
|
||||
gpu::StorageBuf *indices_buf = nullptr;
|
||||
gpu::StorageBuf *weights_buf = nullptr;
|
||||
gpu::StorageBuf *du_weights_buf = nullptr;
|
||||
gpu::StorageBuf *dv_weights_buf = nullptr;
|
||||
gpu::StorageBuf *duu_weights_buf = nullptr;
|
||||
gpu::StorageBuf *duv_weights_buf = nullptr;
|
||||
gpu::StorageBuf *dvv_weights_buf = nullptr;
|
||||
int _numStencils;
|
||||
};
|
||||
|
||||
@@ -428,12 +428,12 @@ class GPUComputeEvaluator {
|
||||
OpenSubdiv::Osd::BufferDescriptor const &duDesc,
|
||||
gpu::VertBuf *dvBuffer,
|
||||
OpenSubdiv::Osd::BufferDescriptor const &dvDesc,
|
||||
GPUStorageBuf *sizesBuffer,
|
||||
GPUStorageBuf *offsetsBuffer,
|
||||
GPUStorageBuf *indicesBuffer,
|
||||
GPUStorageBuf *weightsBuffer,
|
||||
GPUStorageBuf *duWeightsBuffer,
|
||||
GPUStorageBuf *dvWeightsBuffer,
|
||||
gpu::StorageBuf *sizesBuffer,
|
||||
gpu::StorageBuf *offsetsBuffer,
|
||||
gpu::StorageBuf *indicesBuffer,
|
||||
gpu::StorageBuf *weightsBuffer,
|
||||
gpu::StorageBuf *duWeightsBuffer,
|
||||
gpu::StorageBuf *dvWeightsBuffer,
|
||||
int start,
|
||||
int end) const;
|
||||
|
||||
@@ -735,8 +735,8 @@ class GPUComputeEvaluator {
|
||||
int numPatchCoords,
|
||||
gpu::VertBuf *patchCoordsBuffer,
|
||||
const OpenSubdiv::Osd::PatchArrayVector &patchArrays,
|
||||
GPUStorageBuf *patchIndexBuffer,
|
||||
GPUStorageBuf *patchParamsBuffer);
|
||||
gpu::StorageBuf *patchIndexBuffer,
|
||||
gpu::StorageBuf *patchParamsBuffer);
|
||||
|
||||
/// \brief Generic limit eval function. This function has a same
|
||||
/// signature as other device kernels have so that it can be called
|
||||
@@ -1381,7 +1381,7 @@ class GPUComputeEvaluator {
|
||||
} _patchKernel;
|
||||
|
||||
int _workGroupSize;
|
||||
GPUStorageBuf *_patchArraysSSBO = nullptr;
|
||||
gpu::StorageBuf *_patchArraysSSBO = nullptr;
|
||||
|
||||
int GetDispatchSize(int count) const;
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ GPUPatchTable *GPUPatchTable::Create(PatchTable const *far_patch_table, void * /
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static void discard_buffer(GPUStorageBuf **buffer)
|
||||
static void discard_buffer(gpu::StorageBuf **buffer)
|
||||
{
|
||||
if (*buffer != nullptr) {
|
||||
GPU_storagebuf_free(*buffer);
|
||||
@@ -29,10 +29,10 @@ static void discard_buffer(GPUStorageBuf **buffer)
|
||||
}
|
||||
}
|
||||
|
||||
static void discard_list(std::vector<GPUStorageBuf *> &buffers)
|
||||
static void discard_list(std::vector<gpu::StorageBuf *> &buffers)
|
||||
{
|
||||
while (!buffers.empty()) {
|
||||
GPUStorageBuf *buffer = buffers.back();
|
||||
gpu::StorageBuf *buffer = buffers.back();
|
||||
buffers.pop_back();
|
||||
GPU_storagebuf_free(buffer);
|
||||
}
|
||||
|
||||
@@ -31,13 +31,13 @@ class GPUPatchTable : private NonCopyable<GPUPatchTable> {
|
||||
}
|
||||
|
||||
/// Returns the GL index buffer containing the patch control vertices
|
||||
GPUStorageBuf *GetPatchIndexBuffer() const
|
||||
gpu::StorageBuf *GetPatchIndexBuffer() const
|
||||
{
|
||||
return _patchIndexBuffer;
|
||||
}
|
||||
|
||||
/// Returns the GL index buffer containing the patch parameter
|
||||
GPUStorageBuf *GetPatchParamBuffer() const
|
||||
gpu::StorageBuf *GetPatchParamBuffer() const
|
||||
{
|
||||
return _patchParamBuffer;
|
||||
}
|
||||
@@ -49,7 +49,7 @@ class GPUPatchTable : private NonCopyable<GPUPatchTable> {
|
||||
}
|
||||
|
||||
/// Returns the GL index buffer containing the varying control vertices
|
||||
GPUStorageBuf *GetVaryingPatchIndexBuffer() const
|
||||
gpu::StorageBuf *GetVaryingPatchIndexBuffer() const
|
||||
{
|
||||
return _varyingIndexBuffer;
|
||||
}
|
||||
@@ -67,13 +67,13 @@ class GPUPatchTable : private NonCopyable<GPUPatchTable> {
|
||||
}
|
||||
|
||||
/// Returns the GL index buffer containing face-varying control vertices
|
||||
GPUStorageBuf *GetFVarPatchIndexBuffer(int fvarChannel = 0) const
|
||||
gpu::StorageBuf *GetFVarPatchIndexBuffer(int fvarChannel = 0) const
|
||||
{
|
||||
return _fvarIndexBuffers[fvarChannel];
|
||||
}
|
||||
|
||||
/// Returns the GL index buffer containing face-varying patch params
|
||||
GPUStorageBuf *GetFVarPatchParamBuffer(int fvarChannel = 0) const
|
||||
gpu::StorageBuf *GetFVarPatchParamBuffer(int fvarChannel = 0) const
|
||||
{
|
||||
return _fvarParamBuffers[fvarChannel];
|
||||
}
|
||||
@@ -86,15 +86,15 @@ class GPUPatchTable : private NonCopyable<GPUPatchTable> {
|
||||
|
||||
PatchArrayVector _patchArrays;
|
||||
|
||||
GPUStorageBuf *_patchIndexBuffer = nullptr;
|
||||
GPUStorageBuf *_patchParamBuffer = nullptr;
|
||||
gpu::StorageBuf *_patchIndexBuffer = nullptr;
|
||||
gpu::StorageBuf *_patchParamBuffer = nullptr;
|
||||
|
||||
PatchArrayVector _varyingPatchArrays;
|
||||
GPUStorageBuf *_varyingIndexBuffer = nullptr;
|
||||
gpu::StorageBuf *_varyingIndexBuffer = nullptr;
|
||||
|
||||
std::vector<PatchArrayVector> _fvarPatchArrays;
|
||||
std::vector<GPUStorageBuf *> _fvarIndexBuffers;
|
||||
std::vector<GPUStorageBuf *> _fvarParamBuffers;
|
||||
std::vector<gpu::StorageBuf *> _fvarIndexBuffers;
|
||||
std::vector<gpu::StorageBuf *> _fvarParamBuffers;
|
||||
};
|
||||
|
||||
} // namespace blender::opensubdiv
|
||||
|
||||
@@ -139,13 +139,13 @@ class EvalOutputAPI {
|
||||
int *patches_are_triangular);
|
||||
|
||||
// Copy the patch arrays buffer used by OpenSubDiv for the source data to the given buffer.
|
||||
GPUStorageBuf *create_patch_arrays_buf();
|
||||
gpu::StorageBuf *create_patch_arrays_buf();
|
||||
|
||||
// Wrap the patch index buffer used by OpenSubDiv for the source data with the given buffer.
|
||||
GPUStorageBuf *get_patch_index_buf();
|
||||
gpu::StorageBuf *get_patch_index_buf();
|
||||
|
||||
// Wrap the patch param buffer used by OpenSubDiv for the source data with the given buffer.
|
||||
GPUStorageBuf *get_patch_param_buf();
|
||||
gpu::StorageBuf *get_patch_param_buf();
|
||||
|
||||
// Wrap the buffer used by OpenSubDiv for the source data with the given buffer.
|
||||
gpu::VertBuf *get_source_buf();
|
||||
@@ -155,15 +155,15 @@ class EvalOutputAPI {
|
||||
|
||||
// Copy the patch arrays buffer used by OpenSubDiv for the face varying channel with the given
|
||||
// buffer.
|
||||
GPUStorageBuf *create_face_varying_patch_array_buf(const int face_varying_channel);
|
||||
gpu::StorageBuf *create_face_varying_patch_array_buf(const int face_varying_channel);
|
||||
|
||||
// Wrap the patch index buffer used by OpenSubDiv for the face varying channel with the given
|
||||
// buffer.
|
||||
GPUStorageBuf *get_face_varying_patch_index_buf(const int face_varying_channel);
|
||||
gpu::StorageBuf *get_face_varying_patch_index_buf(const int face_varying_channel);
|
||||
|
||||
// Wrap the patch param buffer used by OpenSubDiv for the face varying channel with the given
|
||||
// buffer.
|
||||
GPUStorageBuf *get_face_varying_patch_param_buf(const int face_varying_channel);
|
||||
gpu::StorageBuf *get_face_varying_patch_param_buf(const int face_varying_channel);
|
||||
|
||||
// Wrap thebuffer used by OpenSubDiv for the face varying channel with the given buffer.
|
||||
gpu::VertBuf *get_face_varying_source_buf(const int face_varying_channel);
|
||||
|
||||
Submodule lib/macos_arm64 updated: fb94c1b78b...ce2a40b21f
@@ -172,18 +172,18 @@ void KeyingScreen::compute_gpu(Context &context,
|
||||
marker_positions.append(float2(0.0f));
|
||||
}
|
||||
|
||||
GPUStorageBuf *positions_ssbo = GPU_storagebuf_create_ex(marker_positions.size() *
|
||||
sizeof(float2),
|
||||
marker_positions.data(),
|
||||
GPU_USAGE_STATIC,
|
||||
"Marker Positions");
|
||||
gpu::StorageBuf *positions_ssbo = GPU_storagebuf_create_ex(marker_positions.size() *
|
||||
sizeof(float2),
|
||||
marker_positions.data(),
|
||||
GPU_USAGE_STATIC,
|
||||
"Marker Positions");
|
||||
const int positions_ssbo_location = GPU_shader_get_ssbo_binding(shader, "marker_positions");
|
||||
GPU_storagebuf_bind(positions_ssbo, positions_ssbo_location);
|
||||
|
||||
GPUStorageBuf *colors_ssbo = GPU_storagebuf_create_ex(marker_colors.size() * sizeof(float4),
|
||||
marker_colors.data(),
|
||||
GPU_USAGE_STATIC,
|
||||
"Marker Colors");
|
||||
gpu::StorageBuf *colors_ssbo = GPU_storagebuf_create_ex(marker_colors.size() * sizeof(float4),
|
||||
marker_colors.data(),
|
||||
GPU_USAGE_STATIC,
|
||||
"Marker Colors");
|
||||
const int colors_ssbo_location = GPU_shader_get_ssbo_binding(shader, "marker_colors");
|
||||
GPU_storagebuf_bind(colors_ssbo, colors_ssbo_location);
|
||||
|
||||
|
||||
@@ -210,7 +210,7 @@ class UniformCommon : public DataBuffer<T, len, false>, NonMovable, NonCopyable
|
||||
template<typename T, int64_t len, bool device_only>
|
||||
class StorageCommon : public DataBuffer<T, len, false>, NonMovable, NonCopyable {
|
||||
protected:
|
||||
GPUStorageBuf *ssbo_;
|
||||
gpu::StorageBuf *ssbo_;
|
||||
|
||||
#ifndef NDEBUG
|
||||
const char *name_ = typeid(T).name();
|
||||
@@ -255,12 +255,12 @@ class StorageCommon : public DataBuffer<T, len, false>, NonMovable, NonCopyable
|
||||
GPU_storagebuf_read(ssbo_, this->data_);
|
||||
}
|
||||
|
||||
operator GPUStorageBuf *() const
|
||||
operator gpu::StorageBuf *() const
|
||||
{
|
||||
return ssbo_;
|
||||
}
|
||||
/* To be able to use it with DRW_shgroup_*_ref(). */
|
||||
GPUStorageBuf **operator&()
|
||||
gpu::StorageBuf **operator&()
|
||||
{
|
||||
return &ssbo_;
|
||||
}
|
||||
|
||||
@@ -980,9 +980,9 @@ void draw_subdiv_extract_pos(const DRWSubdivCache &cache, gpu::VertBuf *pos, gpu
|
||||
src_extra_buffer = evaluator->eval_output->get_source_data_buf();
|
||||
}
|
||||
|
||||
GPUStorageBuf *patch_arrays_buffer = evaluator->eval_output->create_patch_arrays_buf();
|
||||
GPUStorageBuf *patch_index_buffer = evaluator->eval_output->get_patch_index_buf();
|
||||
GPUStorageBuf *patch_param_buffer = evaluator->eval_output->get_patch_param_buf();
|
||||
gpu::StorageBuf *patch_arrays_buffer = evaluator->eval_output->create_patch_arrays_buf();
|
||||
gpu::StorageBuf *patch_index_buffer = evaluator->eval_output->get_patch_index_buf();
|
||||
gpu::StorageBuf *patch_param_buffer = evaluator->eval_output->get_patch_param_buf();
|
||||
|
||||
gpu::Shader *shader = DRW_shader_subdiv_get(orco ? SubdivShaderType::PATCH_EVALUATION_ORCO :
|
||||
SubdivShaderType::PATCH_EVALUATION);
|
||||
@@ -1039,11 +1039,11 @@ void draw_subdiv_extract_uvs(const DRWSubdivCache &cache,
|
||||
int src_buffer_offset = evaluator->eval_output->get_face_varying_source_offset(
|
||||
face_varying_channel);
|
||||
|
||||
GPUStorageBuf *patch_arrays_buffer = evaluator->eval_output->create_face_varying_patch_array_buf(
|
||||
gpu::StorageBuf *patch_arrays_buffer =
|
||||
evaluator->eval_output->create_face_varying_patch_array_buf(face_varying_channel);
|
||||
gpu::StorageBuf *patch_index_buffer = evaluator->eval_output->get_face_varying_patch_index_buf(
|
||||
face_varying_channel);
|
||||
GPUStorageBuf *patch_index_buffer = evaluator->eval_output->get_face_varying_patch_index_buf(
|
||||
face_varying_channel);
|
||||
GPUStorageBuf *patch_param_buffer = evaluator->eval_output->get_face_varying_patch_param_buf(
|
||||
gpu::StorageBuf *patch_param_buffer = evaluator->eval_output->get_face_varying_patch_param_buf(
|
||||
face_varying_channel);
|
||||
|
||||
gpu::Shader *shader = DRW_shader_subdiv_get(SubdivShaderType::PATCH_EVALUATION_FVAR);
|
||||
@@ -1246,9 +1246,9 @@ void draw_subdiv_build_fdots_buffers(const DRWSubdivCache &cache,
|
||||
OpenSubdiv_Evaluator *evaluator = subdiv->evaluator;
|
||||
|
||||
gpu::VertBuf *src_buffer = evaluator->eval_output->get_source_buf();
|
||||
GPUStorageBuf *patch_arrays_buffer = evaluator->eval_output->create_patch_arrays_buf();
|
||||
GPUStorageBuf *patch_index_buffer = evaluator->eval_output->get_patch_index_buf();
|
||||
GPUStorageBuf *patch_param_buffer = evaluator->eval_output->get_patch_param_buf();
|
||||
gpu::StorageBuf *patch_arrays_buffer = evaluator->eval_output->create_patch_arrays_buf();
|
||||
gpu::StorageBuf *patch_index_buffer = evaluator->eval_output->get_patch_index_buf();
|
||||
gpu::StorageBuf *patch_param_buffer = evaluator->eval_output->get_patch_param_buf();
|
||||
|
||||
gpu::Shader *shader = DRW_shader_subdiv_get(
|
||||
fdots_nor ? SubdivShaderType::PATCH_EVALUATION_FACE_DOTS_WITH_NORMALS :
|
||||
|
||||
@@ -55,7 +55,7 @@ struct RecordingState {
|
||||
DRWState pipeline_state = DRW_STATE_NO_DRAW;
|
||||
int clip_plane_count = 0;
|
||||
/** Used for gl_BaseInstance workaround. */
|
||||
GPUStorageBuf *resource_id_buf = nullptr;
|
||||
gpu::StorageBuf *resource_id_buf = nullptr;
|
||||
/** Used for pass simple resource ID. Starts at 1 as 0 is the identity handle. */
|
||||
int instance_offset = 1;
|
||||
|
||||
@@ -181,8 +181,8 @@ struct ResourceBind {
|
||||
* debug info. */
|
||||
gpu::UniformBuf *uniform_buf;
|
||||
gpu::UniformBuf **uniform_buf_ref;
|
||||
GPUStorageBuf *storage_buf;
|
||||
GPUStorageBuf **storage_buf_ref;
|
||||
gpu::StorageBuf *storage_buf;
|
||||
gpu::StorageBuf **storage_buf_ref;
|
||||
/** NOTE: Texture is used for both Sampler and Image binds. */
|
||||
gpu::Texture *texture;
|
||||
gpu::Texture **texture_ref;
|
||||
@@ -198,9 +198,9 @@ struct ResourceBind {
|
||||
: slot(slot_), is_reference(false), type(Type::UniformBuf), uniform_buf(res){};
|
||||
ResourceBind(int slot_, gpu::UniformBuf **res)
|
||||
: slot(slot_), is_reference(true), type(Type::UniformBuf), uniform_buf_ref(res){};
|
||||
ResourceBind(int slot_, GPUStorageBuf *res)
|
||||
ResourceBind(int slot_, gpu::StorageBuf *res)
|
||||
: slot(slot_), is_reference(false), type(Type::StorageBuf), storage_buf(res){};
|
||||
ResourceBind(int slot_, GPUStorageBuf **res)
|
||||
ResourceBind(int slot_, gpu::StorageBuf **res)
|
||||
: slot(slot_), is_reference(true), type(Type::StorageBuf), storage_buf_ref(res){};
|
||||
ResourceBind(int slot_, gpu::UniformBuf *res, Type /*type*/)
|
||||
: slot(slot_), is_reference(false), type(Type::UniformAsStorageBuf), uniform_buf(res){};
|
||||
@@ -412,7 +412,7 @@ struct DrawMulti {
|
||||
|
||||
struct DrawIndirect {
|
||||
gpu::Batch *batch;
|
||||
GPUStorageBuf **indirect_buf;
|
||||
gpu::StorageBuf **indirect_buf;
|
||||
ResourceIndex res_index;
|
||||
|
||||
void execute(RecordingState &state) const;
|
||||
@@ -436,7 +436,7 @@ struct Dispatch {
|
||||
};
|
||||
|
||||
struct DispatchIndirect {
|
||||
GPUStorageBuf **indirect_buf;
|
||||
gpu::StorageBuf **indirect_buf;
|
||||
|
||||
void execute(RecordingState &state) const;
|
||||
std::string serialize() const;
|
||||
|
||||
@@ -54,7 +54,7 @@ void DebugDraw::reset()
|
||||
gpu_draw_buf_used = false;
|
||||
}
|
||||
|
||||
GPUStorageBuf *DebugDraw::gpu_draw_buf_get()
|
||||
gpu::StorageBuf *DebugDraw::gpu_draw_buf_get()
|
||||
{
|
||||
#ifdef WITH_DRAW_DEBUG
|
||||
gpu_draw_buf_used = true;
|
||||
|
||||
@@ -106,7 +106,7 @@ class DebugDraw {
|
||||
void display_to_view(View &view);
|
||||
|
||||
/** Get GPU debug draw buffer. Can, return nullptr if WITH_DRAW_DEBUG is not enabled. */
|
||||
GPUStorageBuf *gpu_draw_buf_get();
|
||||
gpu::StorageBuf *gpu_draw_buf_get();
|
||||
|
||||
void acquire()
|
||||
{
|
||||
|
||||
@@ -147,7 +147,7 @@ void Manager::end_sync()
|
||||
|
||||
void Manager::debug_bind()
|
||||
{
|
||||
GPUStorageBuf *gpu_buf = DebugDraw::get().gpu_draw_buf_get();
|
||||
gpu::StorageBuf *gpu_buf = DebugDraw::get().gpu_draw_buf_get();
|
||||
if (gpu_buf == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -360,10 +360,10 @@ class PassBase {
|
||||
void bind_texture(int slot, gpu::Texture **texture, GPUSamplerState state = sampler_auto);
|
||||
void bind_texture(int slot, gpu::VertBuf *buffer);
|
||||
void bind_texture(int slot, gpu::VertBuf **buffer);
|
||||
void bind_ssbo(const char *name, GPUStorageBuf *buffer);
|
||||
void bind_ssbo(const char *name, GPUStorageBuf **buffer);
|
||||
void bind_ssbo(int slot, GPUStorageBuf *buffer);
|
||||
void bind_ssbo(int slot, GPUStorageBuf **buffer);
|
||||
void bind_ssbo(const char *name, gpu::StorageBuf *buffer);
|
||||
void bind_ssbo(const char *name, gpu::StorageBuf **buffer);
|
||||
void bind_ssbo(int slot, gpu::StorageBuf *buffer);
|
||||
void bind_ssbo(int slot, gpu::StorageBuf **buffer);
|
||||
void bind_ssbo(const char *name, gpu::UniformBuf *buffer);
|
||||
void bind_ssbo(const char *name, gpu::UniformBuf **buffer);
|
||||
void bind_ssbo(int slot, gpu::UniformBuf *buffer);
|
||||
@@ -1194,7 +1194,7 @@ template<class T> inline int PassBase<T>::push_constant_offset(const char *name)
|
||||
return GPU_shader_get_uniform(shader_, name);
|
||||
}
|
||||
|
||||
template<class T> inline void PassBase<T>::bind_ssbo(const char *name, GPUStorageBuf *buffer)
|
||||
template<class T> inline void PassBase<T>::bind_ssbo(const char *name, gpu::StorageBuf *buffer)
|
||||
{
|
||||
BLI_assert(buffer != nullptr);
|
||||
this->bind_ssbo(GPU_shader_get_ssbo_binding(shader_, name), buffer);
|
||||
@@ -1269,7 +1269,7 @@ template<class T> inline void PassBase<T>::bind_image(const char *name, gpu::Tex
|
||||
this->bind_image(GPU_shader_get_sampler_binding(shader_, name), image);
|
||||
}
|
||||
|
||||
template<class T> inline void PassBase<T>::bind_ssbo(int slot, GPUStorageBuf *buffer)
|
||||
template<class T> inline void PassBase<T>::bind_ssbo(int slot, gpu::StorageBuf *buffer)
|
||||
{
|
||||
BLI_assert(buffer != nullptr);
|
||||
create_command(Type::ResourceBind).resource_bind = {slot, buffer};
|
||||
@@ -1348,7 +1348,7 @@ template<class T> inline void PassBase<T>::bind_image(int slot, gpu::Texture *im
|
||||
create_command(Type::ResourceBind).resource_bind = {slot, as_image(image)};
|
||||
}
|
||||
|
||||
template<class T> inline void PassBase<T>::bind_ssbo(const char *name, GPUStorageBuf **buffer)
|
||||
template<class T> inline void PassBase<T>::bind_ssbo(const char *name, gpu::StorageBuf **buffer)
|
||||
{
|
||||
BLI_assert(buffer != nullptr);
|
||||
this->bind_ssbo(GPU_shader_get_ssbo_binding(shader_, name), buffer);
|
||||
@@ -1375,7 +1375,7 @@ template<class T> inline void PassBase<T>::bind_image(const char *name, gpu::Tex
|
||||
this->bind_image(GPU_shader_get_sampler_binding(shader_, name), image);
|
||||
}
|
||||
|
||||
template<class T> inline void PassBase<T>::bind_ssbo(int slot, GPUStorageBuf **buffer)
|
||||
template<class T> inline void PassBase<T>::bind_ssbo(int slot, gpu::StorageBuf **buffer)
|
||||
{
|
||||
|
||||
BLI_assert(buffer != nullptr);
|
||||
|
||||
@@ -85,7 +85,7 @@ class Batch {
|
||||
/** nullptr if element list not needed */
|
||||
blender::gpu::IndexBuf *elem;
|
||||
/** Resource ID attribute workaround. */
|
||||
GPUStorageBuf *resource_id_buf;
|
||||
blender::gpu::StorageBuf *resource_id_buf;
|
||||
/** Number of vertices to draw for procedural drawcalls. */
|
||||
int32_t procedural_vertices;
|
||||
/** Bookkeeping. */
|
||||
@@ -98,8 +98,8 @@ class Batch {
|
||||
virtual ~Batch() = default;
|
||||
|
||||
virtual void draw(int v_first, int v_count, int i_first, int i_count) = 0;
|
||||
virtual void draw_indirect(GPUStorageBuf *indirect_buf, intptr_t offset) = 0;
|
||||
virtual void multi_draw_indirect(GPUStorageBuf *indirect_buf,
|
||||
virtual void draw_indirect(blender::gpu::StorageBuf *indirect_buf, intptr_t offset) = 0;
|
||||
virtual void multi_draw_indirect(blender::gpu::StorageBuf *indirect_buf,
|
||||
int count,
|
||||
intptr_t offset,
|
||||
intptr_t stride) = 0;
|
||||
@@ -266,7 +266,8 @@ bool GPU_batch_vertbuf_has(const blender::gpu::Batch *batch,
|
||||
* on some hardware / platform.
|
||||
* \note Only to be used by draw manager.
|
||||
*/
|
||||
void GPU_batch_resource_id_buf_set(blender::gpu::Batch *batch, GPUStorageBuf *resource_id_buf);
|
||||
void GPU_batch_resource_id_buf_set(blender::gpu::Batch *batch,
|
||||
blender::gpu::StorageBuf *resource_id_buf);
|
||||
|
||||
/** \} */
|
||||
|
||||
@@ -392,7 +393,7 @@ void GPU_batch_draw_advanced(blender::gpu::Batch *batch,
|
||||
int instance_count);
|
||||
|
||||
/**
|
||||
* Issue a single draw call using arguments sourced from a #GPUStorageBuf.
|
||||
* Issue a single draw call using arguments sourced from a #blender::gpu::StorageBuf.
|
||||
* The argument are expected to be valid for the type of geometry contained by this
|
||||
* #blender::gpu::Batch (index or non-indexed).
|
||||
*
|
||||
@@ -403,11 +404,11 @@ void GPU_batch_draw_advanced(blender::gpu::Batch *batch,
|
||||
* https://registry.khronos.org/OpenGL-Refpages/gl4/html/glDrawArraysIndirect.xhtml
|
||||
*/
|
||||
void GPU_batch_draw_indirect(blender::gpu::Batch *batch,
|
||||
GPUStorageBuf *indirect_buf,
|
||||
blender::gpu::StorageBuf *indirect_buf,
|
||||
intptr_t offset);
|
||||
|
||||
/**
|
||||
* Issue \a count draw calls using arguments sourced from a #GPUStorageBuf.
|
||||
* Issue \a count draw calls using arguments sourced from a #blender::gpu::StorageBuf.
|
||||
* The \a stride (in bytes) control the spacing between each command description.
|
||||
* The argument are expected to be valid for the type of geometry contained by this
|
||||
* #blender::gpu::Batch (index or non-indexed).
|
||||
@@ -419,7 +420,7 @@ void GPU_batch_draw_indirect(blender::gpu::Batch *batch,
|
||||
* https://registry.khronos.org/OpenGL-Refpages/gl4/html/glMultiDrawArraysIndirect.xhtml
|
||||
*/
|
||||
void GPU_batch_multi_draw_indirect(blender::gpu::Batch *batch,
|
||||
GPUStorageBuf *indirect_buf,
|
||||
blender::gpu::StorageBuf *indirect_buf,
|
||||
int count,
|
||||
intptr_t offset,
|
||||
intptr_t stride);
|
||||
|
||||
@@ -49,5 +49,5 @@ void GPU_compute_dispatch(
|
||||
*/
|
||||
void GPU_compute_dispatch_indirect(
|
||||
blender::gpu::Shader *shader,
|
||||
GPUStorageBuf *indirect_buf,
|
||||
blender::gpu::StorageBuf *indirect_buf,
|
||||
const blender::gpu::shader::SpecializationConstants *constants_state = nullptr);
|
||||
|
||||
@@ -17,43 +17,44 @@
|
||||
#include "GPU_texture.hh"
|
||||
#include "GPU_vertex_buffer.hh"
|
||||
|
||||
/** Opaque type hiding blender::gpu::StorageBuf. */
|
||||
struct GPUStorageBuf;
|
||||
namespace blender::gpu {
|
||||
class StorageBuf;
|
||||
} // namespace blender::gpu
|
||||
|
||||
GPUStorageBuf *GPU_storagebuf_create_ex(size_t size,
|
||||
const void *data,
|
||||
GPUUsageType usage,
|
||||
const char *name);
|
||||
blender::gpu::StorageBuf *GPU_storagebuf_create_ex(size_t size,
|
||||
const void *data,
|
||||
GPUUsageType usage,
|
||||
const char *name);
|
||||
|
||||
#define GPU_storagebuf_create(size) \
|
||||
GPU_storagebuf_create_ex(size, nullptr, GPU_USAGE_DYNAMIC, __func__);
|
||||
|
||||
void GPU_storagebuf_free(GPUStorageBuf *ssbo);
|
||||
void GPU_storagebuf_free(blender::gpu::StorageBuf *ssbo);
|
||||
|
||||
void GPU_storagebuf_update(GPUStorageBuf *ssbo, const void *data);
|
||||
void GPU_storagebuf_update(blender::gpu::StorageBuf *ssbo, const void *data);
|
||||
|
||||
void GPU_storagebuf_bind(GPUStorageBuf *ssbo, int slot);
|
||||
void GPU_storagebuf_unbind(GPUStorageBuf *ssbo);
|
||||
void GPU_storagebuf_bind(blender::gpu::StorageBuf *ssbo, int slot);
|
||||
void GPU_storagebuf_unbind(blender::gpu::StorageBuf *ssbo);
|
||||
/**
|
||||
* Resets the internal slot usage tracking. But there is no guarantee that
|
||||
* this actually undo the bindings for the next draw call. Only has effect when G_DEBUG_GPU is set.
|
||||
*/
|
||||
void GPU_storagebuf_debug_unbind_all();
|
||||
|
||||
void GPU_storagebuf_clear_to_zero(GPUStorageBuf *ssbo);
|
||||
void GPU_storagebuf_clear_to_zero(blender::gpu::StorageBuf *ssbo);
|
||||
|
||||
/**
|
||||
* Clear the content of the buffer using the given #clear_value. #clear_value will be used as a
|
||||
* repeatable pattern of 32bits.
|
||||
*/
|
||||
void GPU_storagebuf_clear(GPUStorageBuf *ssbo, uint32_t clear_value);
|
||||
void GPU_storagebuf_clear(blender::gpu::StorageBuf *ssbo, uint32_t clear_value);
|
||||
|
||||
/**
|
||||
* Explicitly sync updated storage buffer contents back to host within the GPU command stream. This
|
||||
* ensures any changes made by the GPU are visible to the host.
|
||||
* NOTE: This command is only valid for host-visible storage buffers.
|
||||
*/
|
||||
void GPU_storagebuf_sync_to_host(GPUStorageBuf *ssbo);
|
||||
void GPU_storagebuf_sync_to_host(blender::gpu::StorageBuf *ssbo);
|
||||
|
||||
/**
|
||||
* Read back content of the buffer to CPU for inspection.
|
||||
@@ -66,7 +67,7 @@ void GPU_storagebuf_sync_to_host(GPUStorageBuf *ssbo);
|
||||
* Otherwise, this command is synchronized against this call and will stall the CPU until the
|
||||
* buffer content can be read by the host.
|
||||
*/
|
||||
void GPU_storagebuf_read(GPUStorageBuf *ssbo, void *data);
|
||||
void GPU_storagebuf_read(blender::gpu::StorageBuf *ssbo, void *data);
|
||||
|
||||
/**
|
||||
* \brief Copy a part of a vertex buffer to a storage buffer.
|
||||
@@ -77,7 +78,7 @@ void GPU_storagebuf_read(GPUStorageBuf *ssbo, void *data);
|
||||
* \param src_offset: where to start copying from (in bytes).
|
||||
* \param copy_size: byte size of the segment to copy.
|
||||
*/
|
||||
void GPU_storagebuf_copy_sub_from_vertbuf(GPUStorageBuf *ssbo,
|
||||
void GPU_storagebuf_copy_sub_from_vertbuf(blender::gpu::StorageBuf *ssbo,
|
||||
blender::gpu::VertBuf *src,
|
||||
uint dst_offset,
|
||||
uint src_offset,
|
||||
@@ -87,4 +88,4 @@ void GPU_storagebuf_copy_sub_from_vertbuf(GPUStorageBuf *ssbo,
|
||||
* Ensure the ssbo is ready to be used as an indirect buffer in `GPU_batch_draw_indirect`.
|
||||
* NOTE: Internally, this is only required for the OpenGL backend.
|
||||
*/
|
||||
void GPU_storagebuf_sync_as_indirect_buffer(GPUStorageBuf *ssbo);
|
||||
void GPU_storagebuf_sync_as_indirect_buffer(blender::gpu::StorageBuf *ssbo);
|
||||
|
||||
@@ -20,8 +20,8 @@ class DummyBatch : public Batch {
|
||||
int /*instance_count*/) override
|
||||
{
|
||||
}
|
||||
void draw_indirect(GPUStorageBuf * /*indirect_buf*/, intptr_t /*offset*/) override {}
|
||||
void multi_draw_indirect(GPUStorageBuf * /*indirect_buf*/,
|
||||
void draw_indirect(StorageBuf * /*indirect_buf*/, intptr_t /*offset*/) override {}
|
||||
void multi_draw_indirect(StorageBuf * /*indirect_buf*/,
|
||||
int /*count*/,
|
||||
intptr_t /*offset*/,
|
||||
intptr_t /*stride*/) override
|
||||
|
||||
@@ -230,7 +230,7 @@ bool GPU_batch_vertbuf_has(const Batch *batch, const VertBuf *vertex_buf)
|
||||
return false;
|
||||
}
|
||||
|
||||
void GPU_batch_resource_id_buf_set(Batch *batch, GPUStorageBuf *resource_id_buf)
|
||||
void GPU_batch_resource_id_buf_set(Batch *batch, blender::gpu::StorageBuf *resource_id_buf)
|
||||
{
|
||||
BLI_assert(resource_id_buf);
|
||||
batch->flag |= GPU_BATCH_DIRTY;
|
||||
@@ -508,7 +508,7 @@ void GPU_batch_draw_advanced(
|
||||
batch->draw(vertex_first, vertex_count, instance_first, instance_count);
|
||||
}
|
||||
|
||||
void GPU_batch_draw_indirect(Batch *batch, GPUStorageBuf *indirect_buf, intptr_t offset)
|
||||
void GPU_batch_draw_indirect(Batch *batch, blender::gpu::StorageBuf *indirect_buf, intptr_t offset)
|
||||
{
|
||||
BLI_assert(batch != nullptr);
|
||||
BLI_assert(indirect_buf != nullptr);
|
||||
@@ -518,8 +518,11 @@ void GPU_batch_draw_indirect(Batch *batch, GPUStorageBuf *indirect_buf, intptr_t
|
||||
batch->draw_indirect(indirect_buf, offset);
|
||||
}
|
||||
|
||||
void GPU_batch_multi_draw_indirect(
|
||||
Batch *batch, GPUStorageBuf *indirect_buf, int count, intptr_t offset, intptr_t stride)
|
||||
void GPU_batch_multi_draw_indirect(Batch *batch,
|
||||
blender::gpu::StorageBuf *indirect_buf,
|
||||
int count,
|
||||
intptr_t offset,
|
||||
intptr_t stride)
|
||||
{
|
||||
BLI_assert(batch != nullptr);
|
||||
BLI_assert(indirect_buf != nullptr);
|
||||
|
||||
@@ -23,7 +23,7 @@ void GPU_compute_dispatch(blender::gpu::Shader *shader,
|
||||
|
||||
void GPU_compute_dispatch_indirect(
|
||||
blender::gpu::Shader *shader,
|
||||
GPUStorageBuf *indirect_buf_,
|
||||
blender::gpu::StorageBuf *indirect_buf_,
|
||||
const blender::gpu::shader::SpecializationConstants *constants_state)
|
||||
{
|
||||
blender::gpu::GPUBackend &gpu_backend = *blender::gpu::GPUBackend::get();
|
||||
|
||||
@@ -61,7 +61,7 @@ class Context {
|
||||
int context_id = 0;
|
||||
|
||||
/* Used as a stack. Each render_begin/end pair will push pop from the stack. */
|
||||
Vector<GPUStorageBuf *> printf_buf;
|
||||
Vector<StorageBuf *> printf_buf;
|
||||
|
||||
/** Dummy VBO to feed the procedural batches. */
|
||||
VertBuf *dummy_vbo = nullptr;
|
||||
|
||||
@@ -330,8 +330,8 @@ void printf_begin(Context *ctx)
|
||||
if (!shader::gpu_shader_dependency_has_printf()) {
|
||||
return;
|
||||
}
|
||||
GPUStorageBuf *printf_buf = GPU_storagebuf_create(GPU_SHADER_PRINTF_MAX_CAPACITY *
|
||||
sizeof(uint32_t));
|
||||
StorageBuf *printf_buf = GPU_storagebuf_create(GPU_SHADER_PRINTF_MAX_CAPACITY *
|
||||
sizeof(uint32_t));
|
||||
GPU_storagebuf_clear_to_zero(printf_buf);
|
||||
ctx->printf_buf.append(printf_buf);
|
||||
}
|
||||
@@ -344,7 +344,7 @@ void printf_end(Context *ctx)
|
||||
if (ctx->printf_buf.is_empty()) {
|
||||
return;
|
||||
}
|
||||
GPUStorageBuf *printf_buf = ctx->printf_buf.pop_last();
|
||||
StorageBuf *printf_buf = ctx->printf_buf.pop_last();
|
||||
|
||||
Vector<uint32_t> data(GPU_SHADER_PRINTF_MAX_CAPACITY);
|
||||
GPU_storagebuf_read(printf_buf, data.data());
|
||||
|
||||
@@ -48,10 +48,10 @@ StorageBuf::~StorageBuf()
|
||||
|
||||
using namespace blender::gpu;
|
||||
|
||||
GPUStorageBuf *GPU_storagebuf_create_ex(size_t size,
|
||||
const void *data,
|
||||
GPUUsageType usage,
|
||||
const char *name)
|
||||
blender::gpu::StorageBuf *GPU_storagebuf_create_ex(size_t size,
|
||||
const void *data,
|
||||
GPUUsageType usage,
|
||||
const char *name)
|
||||
{
|
||||
StorageBuf *ssbo = GPUBackend::get()->storagebuf_alloc(size, usage, name);
|
||||
/* Direct init. */
|
||||
@@ -65,27 +65,27 @@ GPUStorageBuf *GPU_storagebuf_create_ex(size_t size,
|
||||
ssbo->update(uninitialized_data.data());
|
||||
}
|
||||
|
||||
return wrap(ssbo);
|
||||
return ssbo;
|
||||
}
|
||||
|
||||
void GPU_storagebuf_free(GPUStorageBuf *ssbo)
|
||||
void GPU_storagebuf_free(blender::gpu::StorageBuf *ssbo)
|
||||
{
|
||||
delete unwrap(ssbo);
|
||||
delete ssbo;
|
||||
}
|
||||
|
||||
void GPU_storagebuf_update(GPUStorageBuf *ssbo, const void *data)
|
||||
void GPU_storagebuf_update(blender::gpu::StorageBuf *ssbo, const void *data)
|
||||
{
|
||||
unwrap(ssbo)->update(data);
|
||||
ssbo->update(data);
|
||||
}
|
||||
|
||||
void GPU_storagebuf_bind(GPUStorageBuf *ssbo, int slot)
|
||||
void GPU_storagebuf_bind(blender::gpu::StorageBuf *ssbo, int slot)
|
||||
{
|
||||
unwrap(ssbo)->bind(slot);
|
||||
ssbo->bind(slot);
|
||||
}
|
||||
|
||||
void GPU_storagebuf_unbind(GPUStorageBuf *ssbo)
|
||||
void GPU_storagebuf_unbind(blender::gpu::StorageBuf *ssbo)
|
||||
{
|
||||
unwrap(ssbo)->unbind();
|
||||
ssbo->unbind();
|
||||
}
|
||||
|
||||
void GPU_storagebuf_debug_unbind_all()
|
||||
@@ -93,38 +93,38 @@ void GPU_storagebuf_debug_unbind_all()
|
||||
Context::get()->debug_unbind_all_ssbo();
|
||||
}
|
||||
|
||||
void GPU_storagebuf_clear_to_zero(GPUStorageBuf *ssbo)
|
||||
void GPU_storagebuf_clear_to_zero(blender::gpu::StorageBuf *ssbo)
|
||||
{
|
||||
GPU_storagebuf_clear(ssbo, 0);
|
||||
}
|
||||
|
||||
void GPU_storagebuf_clear(GPUStorageBuf *ssbo, uint32_t clear_value)
|
||||
void GPU_storagebuf_clear(blender::gpu::StorageBuf *ssbo, uint32_t clear_value)
|
||||
{
|
||||
unwrap(ssbo)->clear(clear_value);
|
||||
ssbo->clear(clear_value);
|
||||
}
|
||||
|
||||
void GPU_storagebuf_copy_sub_from_vertbuf(GPUStorageBuf *ssbo,
|
||||
void GPU_storagebuf_copy_sub_from_vertbuf(blender::gpu::StorageBuf *ssbo,
|
||||
blender::gpu::VertBuf *src,
|
||||
uint dst_offset,
|
||||
uint src_offset,
|
||||
uint copy_size)
|
||||
{
|
||||
unwrap(ssbo)->copy_sub(src, dst_offset, src_offset, copy_size);
|
||||
ssbo->copy_sub(src, dst_offset, src_offset, copy_size);
|
||||
}
|
||||
|
||||
void GPU_storagebuf_sync_to_host(GPUStorageBuf *ssbo)
|
||||
void GPU_storagebuf_sync_to_host(blender::gpu::StorageBuf *ssbo)
|
||||
{
|
||||
unwrap(ssbo)->async_flush_to_host();
|
||||
ssbo->async_flush_to_host();
|
||||
}
|
||||
|
||||
void GPU_storagebuf_read(GPUStorageBuf *ssbo, void *data)
|
||||
void GPU_storagebuf_read(blender::gpu::StorageBuf *ssbo, void *data)
|
||||
{
|
||||
unwrap(ssbo)->read(data);
|
||||
ssbo->read(data);
|
||||
}
|
||||
|
||||
void GPU_storagebuf_sync_as_indirect_buffer(GPUStorageBuf *ssbo)
|
||||
void GPU_storagebuf_sync_as_indirect_buffer(blender::gpu::StorageBuf *ssbo)
|
||||
{
|
||||
unwrap(ssbo)->sync_as_indirect_buffer();
|
||||
ssbo->sync_as_indirect_buffer();
|
||||
}
|
||||
|
||||
/** \} */
|
||||
|
||||
@@ -10,10 +10,9 @@
|
||||
|
||||
#include "BLI_sys_types.h"
|
||||
|
||||
struct GPUStorageBuf;
|
||||
|
||||
namespace blender::gpu {
|
||||
|
||||
class StorageBuf;
|
||||
class VertBuf;
|
||||
|
||||
#ifndef NDEBUG
|
||||
@@ -49,20 +48,6 @@ class StorageBuf {
|
||||
virtual void sync_as_indirect_buffer() = 0;
|
||||
};
|
||||
|
||||
/* Syntactic sugar. */
|
||||
static inline GPUStorageBuf *wrap(StorageBuf *storage_buf)
|
||||
{
|
||||
return reinterpret_cast<GPUStorageBuf *>(storage_buf);
|
||||
}
|
||||
static inline StorageBuf *unwrap(GPUStorageBuf *storage_buf)
|
||||
{
|
||||
return reinterpret_cast<StorageBuf *>(storage_buf);
|
||||
}
|
||||
static inline const StorageBuf *unwrap(const GPUStorageBuf *storage_buf)
|
||||
{
|
||||
return reinterpret_cast<const StorageBuf *>(storage_buf);
|
||||
}
|
||||
|
||||
#undef DEBUG_NAME_LEN
|
||||
|
||||
} // namespace blender::gpu
|
||||
|
||||
@@ -81,8 +81,8 @@ class MTLBatch : public Batch {
|
||||
~MTLBatch() override = default;
|
||||
|
||||
void draw(int v_first, int v_count, int i_first, int i_count) override;
|
||||
void draw_indirect(GPUStorageBuf *indirect_buf, intptr_t offset) override;
|
||||
void multi_draw_indirect(GPUStorageBuf * /*indirect_buf*/,
|
||||
void draw_indirect(StorageBuf *indirect_buf, intptr_t offset) override;
|
||||
void multi_draw_indirect(StorageBuf * /*indirect_buf*/,
|
||||
int /*count*/,
|
||||
intptr_t /*offset*/,
|
||||
intptr_t /*stride*/) override
|
||||
@@ -115,7 +115,7 @@ class MTLBatch : public Batch {
|
||||
|
||||
private:
|
||||
void draw_advanced(int v_first, int v_count, int i_first, int i_count);
|
||||
void draw_advanced_indirect(GPUStorageBuf *indirect_buf, intptr_t offset);
|
||||
void draw_advanced_indirect(StorageBuf *indirect_buf, intptr_t offset);
|
||||
int prepare_vertex_binding(MTLVertBuf *verts,
|
||||
MTLRenderPipelineStateDescriptor &desc,
|
||||
const MTLShaderInterface *interface,
|
||||
|
||||
@@ -37,7 +37,7 @@ void MTLBatch::draw(int v_first, int v_count, int i_first, int i_count)
|
||||
this->draw_advanced(v_first, v_count, i_first, i_count);
|
||||
}
|
||||
|
||||
void MTLBatch::draw_indirect(GPUStorageBuf *indirect_buf, intptr_t offset)
|
||||
void MTLBatch::draw_indirect(StorageBuf *indirect_buf, intptr_t offset)
|
||||
{
|
||||
this->draw_advanced_indirect(indirect_buf, offset);
|
||||
}
|
||||
@@ -649,7 +649,7 @@ void MTLBatch::draw_advanced(int v_first, int v_count, int i_first, int i_count)
|
||||
this->unbind(rec);
|
||||
}
|
||||
|
||||
void MTLBatch::draw_advanced_indirect(GPUStorageBuf *indirect_buf, intptr_t offset)
|
||||
void MTLBatch::draw_advanced_indirect(StorageBuf *indirect_buf, intptr_t offset)
|
||||
{
|
||||
/* Setup RenderPipelineState for batch. */
|
||||
MTLContext *ctx = MTLContext::get();
|
||||
@@ -663,7 +663,7 @@ void MTLBatch::draw_advanced_indirect(GPUStorageBuf *indirect_buf, intptr_t offs
|
||||
}
|
||||
|
||||
/* Fetch indirect buffer Metal handle. */
|
||||
MTLStorageBuf *mtlssbo = static_cast<MTLStorageBuf *>(unwrap(indirect_buf));
|
||||
MTLStorageBuf *mtlssbo = static_cast<MTLStorageBuf *>(indirect_buf);
|
||||
id<MTLBuffer> mtl_indirect_buf = mtlssbo->get_metal_buffer();
|
||||
BLI_assert(mtl_indirect_buf != nil);
|
||||
if (mtl_indirect_buf == nil) {
|
||||
|
||||
@@ -1300,7 +1300,7 @@ bool MTLContext::ensure_buffer_bindings(
|
||||
}
|
||||
}
|
||||
|
||||
/* Bind Global GPUStorageBuf's */
|
||||
/* Bind Global StorageBuf's */
|
||||
/* Iterate through expected SSBOs in the shader interface, and check if the globally bound ones
|
||||
* match. This is used to support the gpu_uniformbuffer module, where the uniform data is global,
|
||||
* and not owned by the shader instance. */
|
||||
|
||||
@@ -270,12 +270,12 @@ void GLBatch::draw(int v_first, int v_count, int i_first, int i_count)
|
||||
}
|
||||
}
|
||||
|
||||
void GLBatch::draw_indirect(GPUStorageBuf *indirect_buf, intptr_t offset)
|
||||
void GLBatch::draw_indirect(blender::gpu::StorageBuf *indirect_buf, intptr_t offset)
|
||||
{
|
||||
GL_CHECK_RESOURCES("Batch");
|
||||
|
||||
this->bind();
|
||||
dynamic_cast<GLStorageBuf *>(unwrap(indirect_buf))->bind_as(GL_DRAW_INDIRECT_BUFFER);
|
||||
dynamic_cast<GLStorageBuf *>(indirect_buf)->bind_as(GL_DRAW_INDIRECT_BUFFER);
|
||||
|
||||
GLenum gl_type = to_gl(prim_type);
|
||||
if (elem) {
|
||||
@@ -290,7 +290,7 @@ void GLBatch::draw_indirect(GPUStorageBuf *indirect_buf, intptr_t offset)
|
||||
glBindBuffer(GL_DRAW_INDIRECT_BUFFER, 0);
|
||||
}
|
||||
|
||||
void GLBatch::multi_draw_indirect(GPUStorageBuf *indirect_buf,
|
||||
void GLBatch::multi_draw_indirect(blender::gpu::StorageBuf *indirect_buf,
|
||||
int count,
|
||||
intptr_t offset,
|
||||
intptr_t stride)
|
||||
@@ -298,7 +298,7 @@ void GLBatch::multi_draw_indirect(GPUStorageBuf *indirect_buf,
|
||||
GL_CHECK_RESOURCES("Batch");
|
||||
|
||||
this->bind();
|
||||
dynamic_cast<GLStorageBuf *>(unwrap(indirect_buf))->bind_as(GL_DRAW_INDIRECT_BUFFER);
|
||||
dynamic_cast<GLStorageBuf *>(indirect_buf)->bind_as(GL_DRAW_INDIRECT_BUFFER);
|
||||
|
||||
GLenum gl_type = to_gl(prim_type);
|
||||
if (elem) {
|
||||
|
||||
@@ -91,8 +91,8 @@ class GLBatch : public Batch {
|
||||
|
||||
public:
|
||||
void draw(int v_first, int v_count, int i_first, int i_count) override;
|
||||
void draw_indirect(GPUStorageBuf *indirect_buf, intptr_t offset) override;
|
||||
void multi_draw_indirect(GPUStorageBuf *indirect_buf,
|
||||
void draw_indirect(StorageBuf *indirect_buf, intptr_t offset) override;
|
||||
void multi_draw_indirect(StorageBuf *indirect_buf,
|
||||
int count,
|
||||
intptr_t offset,
|
||||
intptr_t stride) override;
|
||||
|
||||
@@ -115,7 +115,7 @@ void GLVertArray::update_bindings(const GLuint vao,
|
||||
component_len = 2;
|
||||
}
|
||||
if (input) {
|
||||
dynamic_cast<GLStorageBuf *>(unwrap(batch->resource_id_buf))->bind_as(GL_ARRAY_BUFFER);
|
||||
dynamic_cast<GLStorageBuf *>(batch->resource_id_buf)->bind_as(GL_ARRAY_BUFFER);
|
||||
glEnableVertexAttribArray(input->location);
|
||||
glVertexAttribDivisor(input->location, 1);
|
||||
glVertexAttribIPointer(
|
||||
|
||||
@@ -35,7 +35,7 @@ static void test_buffer_texture()
|
||||
GPU_shader_get_sampler_binding(shader, "bufferTexture"));
|
||||
|
||||
/* Construct SSBO. */
|
||||
GPUStorageBuf *ssbo = GPU_storagebuf_create_ex(
|
||||
StorageBuf *ssbo = GPU_storagebuf_create_ex(
|
||||
4 * sizeof(float), nullptr, GPU_USAGE_STATIC, __func__);
|
||||
GPU_storagebuf_bind(ssbo, GPU_shader_get_ssbo_binding(shader, "data_out"));
|
||||
|
||||
|
||||
@@ -83,7 +83,7 @@ static void test_compute_indirect()
|
||||
uint4 commands[1] = {
|
||||
{SIZE, SIZE, 1, 0},
|
||||
};
|
||||
GPUStorageBuf *compute_commands = GPU_storagebuf_create_ex(
|
||||
StorageBuf *compute_commands = GPU_storagebuf_create_ex(
|
||||
sizeof(commands), &commands, GPU_USAGE_STATIC, __func__);
|
||||
|
||||
/* Dispatch compute task. */
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
|
||||
namespace blender::gpu::tests {
|
||||
struct CallData {
|
||||
GPUStorageBuf *ssbo = nullptr;
|
||||
StorageBuf *ssbo = nullptr;
|
||||
Vector<float> data;
|
||||
|
||||
float float_in;
|
||||
|
||||
@@ -206,7 +206,7 @@ static void test_shader_compute_ssbo()
|
||||
GPU_shader_bind(shader);
|
||||
|
||||
/* Construct SSBO. */
|
||||
GPUStorageBuf *ssbo = GPU_storagebuf_create_ex(
|
||||
StorageBuf *ssbo = GPU_storagebuf_create_ex(
|
||||
SIZE * sizeof(uint32_t), nullptr, GPU_USAGE_DEVICE_ONLY, __func__);
|
||||
GPU_storagebuf_bind(ssbo, GPU_shader_get_ssbo_binding(shader, "data_out"));
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ namespace blender::gpu::tests {
|
||||
|
||||
struct ShaderSpecializationConst {
|
||||
gpu::Shader *shader = nullptr;
|
||||
GPUStorageBuf *ssbo = nullptr;
|
||||
StorageBuf *ssbo = nullptr;
|
||||
Vector<int> data;
|
||||
|
||||
float float_in;
|
||||
|
||||
@@ -27,8 +27,7 @@ static Vector<int32_t> test_data()
|
||||
|
||||
static void test_storage_buffer_create_update_read()
|
||||
{
|
||||
GPUStorageBuf *ssbo = GPU_storagebuf_create_ex(
|
||||
SIZE_IN_BYTES, nullptr, GPU_USAGE_STATIC, __func__);
|
||||
StorageBuf *ssbo = GPU_storagebuf_create_ex(SIZE_IN_BYTES, nullptr, GPU_USAGE_STATIC, __func__);
|
||||
EXPECT_NE(ssbo, nullptr);
|
||||
|
||||
/* Upload some dummy data. */
|
||||
@@ -52,8 +51,7 @@ GPU_TEST(storage_buffer_create_update_read);
|
||||
|
||||
static void test_storage_buffer_clear_zero()
|
||||
{
|
||||
GPUStorageBuf *ssbo = GPU_storagebuf_create_ex(
|
||||
SIZE_IN_BYTES, nullptr, GPU_USAGE_STATIC, __func__);
|
||||
StorageBuf *ssbo = GPU_storagebuf_create_ex(SIZE_IN_BYTES, nullptr, GPU_USAGE_STATIC, __func__);
|
||||
EXPECT_NE(ssbo, nullptr);
|
||||
|
||||
/* Upload some dummy data. */
|
||||
@@ -77,8 +75,7 @@ GPU_TEST(storage_buffer_clear_zero);
|
||||
|
||||
static void test_storage_buffer_clear()
|
||||
{
|
||||
GPUStorageBuf *ssbo = GPU_storagebuf_create_ex(
|
||||
SIZE_IN_BYTES, nullptr, GPU_USAGE_STATIC, __func__);
|
||||
StorageBuf *ssbo = GPU_storagebuf_create_ex(SIZE_IN_BYTES, nullptr, GPU_USAGE_STATIC, __func__);
|
||||
EXPECT_NE(ssbo, nullptr);
|
||||
|
||||
GPU_storagebuf_clear(ssbo, 157255);
|
||||
@@ -100,8 +97,7 @@ GPU_TEST(storage_buffer_clear);
|
||||
|
||||
static void test_storage_buffer_clear_byte_pattern()
|
||||
{
|
||||
GPUStorageBuf *ssbo = GPU_storagebuf_create_ex(
|
||||
SIZE_IN_BYTES, nullptr, GPU_USAGE_STATIC, __func__);
|
||||
StorageBuf *ssbo = GPU_storagebuf_create_ex(SIZE_IN_BYTES, nullptr, GPU_USAGE_STATIC, __func__);
|
||||
EXPECT_NE(ssbo, nullptr);
|
||||
|
||||
/* Tests a different clear command on Metal. */
|
||||
@@ -124,8 +120,7 @@ GPU_TEST(storage_buffer_clear_byte_pattern);
|
||||
|
||||
static void test_storage_buffer_copy_from_vertex_buffer()
|
||||
{
|
||||
GPUStorageBuf *ssbo = GPU_storagebuf_create_ex(
|
||||
SIZE_IN_BYTES, nullptr, GPU_USAGE_STATIC, __func__);
|
||||
StorageBuf *ssbo = GPU_storagebuf_create_ex(SIZE_IN_BYTES, nullptr, GPU_USAGE_STATIC, __func__);
|
||||
EXPECT_NE(ssbo, nullptr);
|
||||
|
||||
/* Create vertex buffer. */
|
||||
|
||||
@@ -72,12 +72,12 @@ void VKBatch::draw(int vertex_first, int vertex_count, int instance_first, int i
|
||||
}
|
||||
}
|
||||
|
||||
void VKBatch::draw_indirect(GPUStorageBuf *indirect_buf, intptr_t offset)
|
||||
void VKBatch::draw_indirect(StorageBuf *indirect_buf, intptr_t offset)
|
||||
{
|
||||
multi_draw_indirect(indirect_buf, 1, offset, 0);
|
||||
}
|
||||
|
||||
void VKBatch::multi_draw_indirect(GPUStorageBuf *indirect_buf,
|
||||
void VKBatch::multi_draw_indirect(StorageBuf *indirect_buf,
|
||||
const int count,
|
||||
const intptr_t offset,
|
||||
const intptr_t stride)
|
||||
|
||||
@@ -17,8 +17,8 @@ namespace blender::gpu {
|
||||
class VKBatch : public Batch {
|
||||
public:
|
||||
void draw(int vertex_first, int vertex_count, int instance_first, int instance_count) override;
|
||||
void draw_indirect(GPUStorageBuf *indirect_buf, intptr_t offset) override;
|
||||
void multi_draw_indirect(GPUStorageBuf *indirect_buf,
|
||||
void draw_indirect(StorageBuf *indirect_buf, intptr_t offset) override;
|
||||
void multi_draw_indirect(StorageBuf *indirect_buf,
|
||||
int count,
|
||||
intptr_t offset,
|
||||
intptr_t stride) override;
|
||||
|
||||
@@ -526,7 +526,7 @@ class VectorBlurOperation : public NodeOperation {
|
||||
void execute_gpu()
|
||||
{
|
||||
Result max_tile_velocity = this->compute_max_tile_velocity();
|
||||
GPUStorageBuf *tile_indirection_buffer = this->dilate_max_velocity(max_tile_velocity);
|
||||
gpu::StorageBuf *tile_indirection_buffer = this->dilate_max_velocity(max_tile_velocity);
|
||||
this->compute_motion_blur(max_tile_velocity, tile_indirection_buffer);
|
||||
max_tile_velocity.release();
|
||||
GPU_storagebuf_free(tile_indirection_buffer);
|
||||
@@ -564,7 +564,7 @@ class VectorBlurOperation : public NodeOperation {
|
||||
* the output will be an indirection buffer that points to a particular tile in the original max
|
||||
* tile velocity image. This is done as a form of performance optimization, see the shader for
|
||||
* more information. */
|
||||
GPUStorageBuf *dilate_max_velocity(Result &max_tile_velocity)
|
||||
gpu::StorageBuf *dilate_max_velocity(Result &max_tile_velocity)
|
||||
{
|
||||
gpu::Shader *shader = context().get_shader("compositor_motion_blur_max_velocity_dilate");
|
||||
GPU_shader_bind(shader);
|
||||
@@ -577,7 +577,7 @@ class VectorBlurOperation : public NodeOperation {
|
||||
* composed of blocks of 32, we get 16k / 32 = 512. So the table is 512x512, but we store two
|
||||
* tables for the previous and next velocities, so we double that. */
|
||||
const int size = sizeof(uint32_t) * 512 * 512 * 2;
|
||||
GPUStorageBuf *tile_indirection_buffer = GPU_storagebuf_create_ex(
|
||||
gpu::StorageBuf *tile_indirection_buffer = GPU_storagebuf_create_ex(
|
||||
size, nullptr, GPU_USAGE_DEVICE_ONLY, __func__);
|
||||
GPU_storagebuf_clear_to_zero(tile_indirection_buffer);
|
||||
const int slot = GPU_shader_get_ssbo_binding(shader, "tile_indirection_buf");
|
||||
@@ -592,7 +592,7 @@ class VectorBlurOperation : public NodeOperation {
|
||||
return tile_indirection_buffer;
|
||||
}
|
||||
|
||||
void compute_motion_blur(Result &max_tile_velocity, GPUStorageBuf *tile_indirection_buffer)
|
||||
void compute_motion_blur(Result &max_tile_velocity, gpu::StorageBuf *tile_indirection_buffer)
|
||||
{
|
||||
gpu::Shader *shader = context().get_shader("compositor_motion_blur");
|
||||
GPU_shader_bind(shader);
|
||||
|
||||
Reference in New Issue
Block a user