2023-08-16 00:20:26 +10:00
|
|
|
/* SPDX-FileCopyrightText: 2005 Blender Authors
|
2023-05-31 16:19:06 +02:00
|
|
|
*
|
|
|
|
|
* SPDX-License-Identifier: GPL-2.0-or-later */
|
2015-12-06 21:20:19 +01:00
|
|
|
|
2019-02-18 08:08:12 +11:00
|
|
|
/** \file
|
|
|
|
|
* \ingroup gpu
|
2023-02-12 17:40:13 +01:00
|
|
|
*
|
2025-08-11 09:34:28 +02:00
|
|
|
* A #blender::gpu::Shader is a container for backend specific shader program.
|
2015-12-06 21:20:19 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
|
2025-02-27 19:20:33 +01:00
|
|
|
#include <mutex>
|
2024-11-01 20:00:31 +01:00
|
|
|
#include <optional>
|
|
|
|
|
|
2024-06-05 18:45:57 +02:00
|
|
|
#include "BLI_span.hh"
|
2024-11-01 20:00:31 +01:00
|
|
|
#include "BLI_string_ref.hh"
|
2024-06-05 18:45:57 +02:00
|
|
|
#include "BLI_vector.hh"
|
2024-11-01 20:00:31 +01:00
|
|
|
|
2024-06-07 18:45:31 +02:00
|
|
|
#include "GPU_common_types.hh"
|
2024-03-23 01:24:18 +01:00
|
|
|
#include "GPU_shader_builtin.hh"
|
2015-12-06 21:20:19 +01:00
|
|
|
|
2024-03-24 16:38:30 +01:00
|
|
|
namespace blender::gpu {
|
|
|
|
|
class VertBuf;
|
2025-08-11 09:34:28 +02:00
|
|
|
class Shader;
|
|
|
|
|
} // namespace blender::gpu
|
2015-12-06 21:20:19 +01:00
|
|
|
|
2022-01-17 14:45:22 +01:00
|
|
|
/** Opaque type hiding #blender::gpu::shader::ShaderCreateInfo */
|
2024-03-23 01:24:18 +01:00
|
|
|
struct GPUShaderCreateInfo;
|
2015-12-06 21:20:19 +01:00
|
|
|
|
2024-02-27 13:41:31 +01:00
|
|
|
/* Hardware limit is 16. Position attribute is always needed so we reduce to 15.
|
2023-02-12 17:40:13 +01:00
|
|
|
* This makes sure the GPUVertexFormat name buffer does not overflow. */
|
2024-10-31 15:18:29 +01:00
|
|
|
constexpr static int GPU_MAX_ATTR = 15;
|
2023-02-12 17:40:13 +01:00
|
|
|
|
|
|
|
|
/* Determined by the maximum uniform buffer size divided by chunk size. */
|
2024-10-31 15:18:29 +01:00
|
|
|
constexpr static int GPU_MAX_UNIFORM_ATTR = 8;
|
2023-02-12 17:40:13 +01:00
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Creation
|
|
|
|
|
* \{ */
|
|
|
|
|
|
2025-04-16 18:49:21 +02:00
|
|
|
/**
|
|
|
|
|
* Preprocess a raw GLSL source to adhere to our backend compatible shader language.
|
|
|
|
|
* Needed if the string was not part of our build system and is used in a #GPUShaderCreateInfo.
|
|
|
|
|
*/
|
|
|
|
|
std::string GPU_shader_preprocess_source(blender::StringRefNull original);
|
|
|
|
|
|
2023-02-12 17:40:13 +01:00
|
|
|
/**
|
|
|
|
|
* Create a shader using the given #GPUShaderCreateInfo.
|
2024-03-23 01:24:18 +01:00
|
|
|
* Can return a null pointer if compilation fails.
|
2023-02-12 17:40:13 +01:00
|
|
|
*/
|
2025-08-11 09:34:28 +02:00
|
|
|
blender::gpu::Shader *GPU_shader_create_from_info(const GPUShaderCreateInfo *_info);
|
2023-02-12 17:40:13 +01:00
|
|
|
|
2024-10-07 12:54:10 +02:00
|
|
|
/**
|
|
|
|
|
* Same as GPU_shader_create_from_info but will run preprocessor on source strings.
|
|
|
|
|
*/
|
2025-08-11 09:34:28 +02:00
|
|
|
blender::gpu::Shader *GPU_shader_create_from_info_python(const GPUShaderCreateInfo *_info);
|
2024-10-07 12:54:10 +02:00
|
|
|
|
2023-02-12 17:40:13 +01:00
|
|
|
/**
|
|
|
|
|
* Create a shader using a named #GPUShaderCreateInfo registered at startup.
|
|
|
|
|
* These are declared inside `*_info.hh` files using the `GPU_SHADER_CREATE_INFO()` macro.
|
|
|
|
|
* They are also expected to have been flagged using `do_static_compilation`.
|
2024-03-23 01:24:18 +01:00
|
|
|
* Can return a null pointer if compilation fails.
|
2023-02-12 17:40:13 +01:00
|
|
|
*/
|
2025-08-11 09:34:28 +02:00
|
|
|
blender::gpu::Shader *GPU_shader_create_from_info_name(const char *info_name);
|
2023-02-12 17:40:13 +01:00
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Fetch a named #GPUShaderCreateInfo registered at startup.
|
|
|
|
|
* These are declared inside `*_info.hh` files using the `GPU_SHADER_CREATE_INFO()` macro.
|
2024-03-23 01:24:18 +01:00
|
|
|
* Can return a null pointer if no match is found.
|
2023-02-12 17:40:13 +01:00
|
|
|
*/
|
|
|
|
|
const GPUShaderCreateInfo *GPU_shader_create_info_get(const char *info_name);
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Error checking for user created shaders.
|
|
|
|
|
* \return true is create info is valid.
|
|
|
|
|
*/
|
|
|
|
|
bool GPU_shader_create_info_check_error(const GPUShaderCreateInfo *_info, char r_error[128]);
|
|
|
|
|
|
2025-05-29 19:43:02 +02:00
|
|
|
enum class CompilationPriority { Low, Medium, High };
|
|
|
|
|
|
2024-06-05 18:45:57 +02:00
|
|
|
using BatchHandle = int64_t;
|
|
|
|
|
/**
|
|
|
|
|
* Request the creation of multiple shaders at once, allowing the backend to use multithreaded
|
|
|
|
|
* compilation. Returns a handle that can be used to poll if all shaders have been compiled, and to
|
|
|
|
|
* retrieve the compiled shaders.
|
2024-11-01 18:57:17 +01:00
|
|
|
* NOTE: This function is asynchronous on OpenGL, but it's blocking on Vulkan.
|
2024-06-05 18:45:57 +02:00
|
|
|
* WARNING: The GPUShaderCreateInfo pointers should be valid until `GPU_shader_batch_finalize` has
|
|
|
|
|
* returned.
|
|
|
|
|
*/
|
2025-05-29 19:43:02 +02:00
|
|
|
BatchHandle GPU_shader_batch_create_from_infos(
|
|
|
|
|
blender::Span<const GPUShaderCreateInfo *> infos,
|
|
|
|
|
CompilationPriority priority = CompilationPriority::High);
|
2024-06-05 18:45:57 +02:00
|
|
|
/**
|
|
|
|
|
* Returns true if all the shaders from the batch have finished their compilation.
|
|
|
|
|
*/
|
|
|
|
|
bool GPU_shader_batch_is_ready(BatchHandle handle);
|
|
|
|
|
/**
|
|
|
|
|
* Retrieve the compiled shaders, in the same order as the `GPUShaderCreateInfo`s.
|
|
|
|
|
* If the compilation has not finished yet, this call will block the thread until all the shaders
|
|
|
|
|
* are ready.
|
|
|
|
|
* Shaders with compilation errors are returned as null pointers.
|
|
|
|
|
* WARNING: The handle will be invalidated by this call, you can't request the same batch twice.
|
|
|
|
|
*/
|
2025-08-11 09:34:28 +02:00
|
|
|
blender::Vector<blender::gpu::Shader *> GPU_shader_batch_finalize(BatchHandle &handle);
|
2025-05-12 19:54:03 +02:00
|
|
|
/**
|
|
|
|
|
* Cancel the compilation of the batch.
|
|
|
|
|
* WARNING: The handle will be invalidated by this call.
|
|
|
|
|
*/
|
|
|
|
|
void GPU_shader_batch_cancel(BatchHandle &handle);
|
2025-07-31 20:06:27 +02:00
|
|
|
/**
|
|
|
|
|
* Returns true if there's any batch still being compiled.
|
|
|
|
|
*/
|
|
|
|
|
bool GPU_shader_batch_is_compiling();
|
2025-05-22 17:53:22 +02:00
|
|
|
/**
|
|
|
|
|
* Wait until all the requested batches have been compiled.
|
|
|
|
|
*/
|
|
|
|
|
void GPU_shader_batch_wait_for_all();
|
2024-06-05 18:45:57 +02:00
|
|
|
|
2023-02-12 17:40:13 +01:00
|
|
|
/** \} */
|
|
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Free
|
|
|
|
|
* \{ */
|
|
|
|
|
|
2025-08-11 09:34:28 +02:00
|
|
|
void GPU_shader_free(blender::gpu::Shader *shader);
|
2023-02-12 17:40:13 +01:00
|
|
|
|
|
|
|
|
/** \} */
|
|
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Binding
|
|
|
|
|
* \{ */
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Set the given shader as active shader for the active GPU context.
|
|
|
|
|
* It replaces any already bound shader.
|
|
|
|
|
* All following draw-calls and dispatches will use this shader.
|
|
|
|
|
* Uniform functions need to have the shader bound in order to work. (TODO: until we use
|
|
|
|
|
* glProgramUniform)
|
|
|
|
|
*/
|
2025-05-19 17:42:55 +02:00
|
|
|
void GPU_shader_bind(
|
2025-08-11 09:34:28 +02:00
|
|
|
blender::gpu::Shader *shader,
|
2025-05-19 17:42:55 +02:00
|
|
|
const blender::gpu::shader::SpecializationConstants *constants_state = nullptr);
|
2023-02-12 17:40:13 +01:00
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Unbind the active shader.
|
|
|
|
|
* \note this is a no-op in release builds. But it make sense to actually do it in user land code
|
|
|
|
|
* to detect incorrect API usage.
|
|
|
|
|
*/
|
2024-03-23 01:24:18 +01:00
|
|
|
void GPU_shader_unbind();
|
2023-02-12 17:40:13 +01:00
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Return the currently bound shader to the active GPU context.
|
2024-03-23 01:24:18 +01:00
|
|
|
* \return null pointer if no shader is bound of if no context is active.
|
2023-02-12 17:40:13 +01:00
|
|
|
*/
|
2025-08-11 09:34:28 +02:00
|
|
|
blender::gpu::Shader *GPU_shader_get_bound();
|
2023-02-12 17:40:13 +01:00
|
|
|
|
|
|
|
|
/** \} */
|
|
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Debugging introspection API.
|
|
|
|
|
* \{ */
|
|
|
|
|
|
2025-08-11 09:34:28 +02:00
|
|
|
const char *GPU_shader_get_name(blender::gpu::Shader *shader);
|
2023-02-12 17:40:13 +01:00
|
|
|
|
|
|
|
|
/** \} */
|
|
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Uniform API.
|
|
|
|
|
* \{ */
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Returns binding point location.
|
2023-02-12 22:52:27 +01:00
|
|
|
* Binding location are given to be set at shader compile time and immutable.
|
2023-02-12 17:40:13 +01:00
|
|
|
*/
|
2025-08-11 09:34:28 +02:00
|
|
|
int GPU_shader_get_ubo_binding(blender::gpu::Shader *shader, const char *name);
|
|
|
|
|
int GPU_shader_get_ssbo_binding(blender::gpu::Shader *shader, const char *name);
|
|
|
|
|
int GPU_shader_get_sampler_binding(blender::gpu::Shader *shader, const char *name);
|
2023-02-12 17:40:13 +01:00
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Returns uniform location.
|
|
|
|
|
* If cached, it is faster than querying the interface for each uniform assignment.
|
|
|
|
|
*/
|
2025-08-11 09:34:28 +02:00
|
|
|
int GPU_shader_get_uniform(blender::gpu::Shader *shader, const char *name);
|
2023-02-12 17:40:13 +01:00
|
|
|
|
2023-12-28 05:34:38 +01:00
|
|
|
/**
|
|
|
|
|
* Returns specialization constant location.
|
|
|
|
|
*/
|
2025-08-11 09:34:28 +02:00
|
|
|
int GPU_shader_get_constant(blender::gpu::Shader *shader, const char *name);
|
2023-12-28 05:34:38 +01:00
|
|
|
|
2023-02-12 17:40:13 +01:00
|
|
|
/**
|
|
|
|
|
* Sets a generic push constant (a.k.a. uniform).
|
|
|
|
|
* \a length and \a array_size should match the create info push_constant declaration.
|
|
|
|
|
*/
|
2023-02-12 23:39:48 +01:00
|
|
|
void GPU_shader_uniform_float_ex(
|
2025-08-11 09:34:28 +02:00
|
|
|
blender::gpu::Shader *shader, int location, int length, int array_size, const float *value);
|
2023-02-12 23:39:48 +01:00
|
|
|
void GPU_shader_uniform_int_ex(
|
2025-08-11 09:34:28 +02:00
|
|
|
blender::gpu::Shader *shader, int location, int length, int array_size, const int *value);
|
2023-02-12 17:40:13 +01:00
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Sets a generic push constant (a.k.a. uniform).
|
|
|
|
|
* \a length and \a array_size should match the create info push_constant declaration.
|
|
|
|
|
* These functions need to have the shader bound in order to work. (TODO: until we use
|
|
|
|
|
* glProgramUniform)
|
|
|
|
|
*/
|
2025-08-11 09:34:28 +02:00
|
|
|
void GPU_shader_uniform_1i(blender::gpu::Shader *sh, const char *name, int value);
|
|
|
|
|
void GPU_shader_uniform_1b(blender::gpu::Shader *sh, const char *name, bool value);
|
|
|
|
|
void GPU_shader_uniform_1f(blender::gpu::Shader *sh, const char *name, float value);
|
|
|
|
|
void GPU_shader_uniform_2f(blender::gpu::Shader *sh, const char *name, float x, float y);
|
|
|
|
|
void GPU_shader_uniform_3f(blender::gpu::Shader *sh, const char *name, float x, float y, float z);
|
|
|
|
|
void GPU_shader_uniform_4f(
|
|
|
|
|
blender::gpu::Shader *sh, const char *name, float x, float y, float z, float w);
|
|
|
|
|
void GPU_shader_uniform_2fv(blender::gpu::Shader *sh, const char *name, const float data[2]);
|
|
|
|
|
void GPU_shader_uniform_3fv(blender::gpu::Shader *sh, const char *name, const float data[3]);
|
|
|
|
|
void GPU_shader_uniform_4fv(blender::gpu::Shader *sh, const char *name, const float data[4]);
|
|
|
|
|
void GPU_shader_uniform_2iv(blender::gpu::Shader *sh, const char *name, const int data[2]);
|
|
|
|
|
void GPU_shader_uniform_3iv(blender::gpu::Shader *sh, const char *name, const int data[3]);
|
|
|
|
|
void GPU_shader_uniform_mat4(blender::gpu::Shader *sh, const char *name, const float data[4][4]);
|
|
|
|
|
void GPU_shader_uniform_mat3_as_mat4(blender::gpu::Shader *sh,
|
|
|
|
|
const char *name,
|
|
|
|
|
const float data[3][3]);
|
|
|
|
|
void GPU_shader_uniform_1f_array(blender::gpu::Shader *sh,
|
|
|
|
|
const char *name,
|
|
|
|
|
int len,
|
|
|
|
|
const float *val);
|
|
|
|
|
void GPU_shader_uniform_2fv_array(blender::gpu::Shader *sh,
|
|
|
|
|
const char *name,
|
|
|
|
|
int len,
|
|
|
|
|
const float (*val)[2]);
|
|
|
|
|
void GPU_shader_uniform_4fv_array(blender::gpu::Shader *sh,
|
|
|
|
|
const char *name,
|
|
|
|
|
int len,
|
|
|
|
|
const float (*val)[4]);
|
2023-02-12 17:40:13 +01:00
|
|
|
|
|
|
|
|
/** \} */
|
|
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Attribute API.
|
|
|
|
|
*
|
|
|
|
|
* Used to create #GPUVertexFormat from the shader's vertex input layout.
|
|
|
|
|
* \{ */
|
|
|
|
|
|
2025-08-11 09:34:28 +02:00
|
|
|
uint GPU_shader_get_attribute_len(const blender::gpu::Shader *shader);
|
|
|
|
|
uint GPU_shader_get_ssbo_input_len(const blender::gpu::Shader *shader);
|
|
|
|
|
int GPU_shader_get_attribute(const blender::gpu::Shader *shader, const char *name);
|
|
|
|
|
bool GPU_shader_get_attribute_info(const blender::gpu::Shader *shader,
|
2023-02-12 17:40:13 +01:00
|
|
|
int attr_location,
|
|
|
|
|
char r_name[256],
|
|
|
|
|
int *r_type);
|
2025-08-11 09:34:28 +02:00
|
|
|
bool GPU_shader_get_ssbo_input_info(const blender::gpu::Shader *shader,
|
|
|
|
|
int ssbo_location,
|
|
|
|
|
char r_name[256]);
|
2023-02-12 17:40:13 +01:00
|
|
|
|
|
|
|
|
/** \} */
|
|
|
|
|
|
2023-12-28 05:34:38 +01:00
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Specialization API.
|
|
|
|
|
*
|
|
|
|
|
* Used to allow specialization constants.
|
|
|
|
|
* IMPORTANT: All constants must be specified before binding a shader that needs specialization.
|
|
|
|
|
* Otherwise, it will produce undefined behavior.
|
|
|
|
|
* \{ */
|
|
|
|
|
|
2025-05-19 17:42:55 +02:00
|
|
|
/* Return the default constants.
|
|
|
|
|
* All constants available for this shader should fit the returned structure. */
|
|
|
|
|
const blender::gpu::shader::SpecializationConstants &GPU_shader_get_default_constant_state(
|
2025-08-11 09:34:28 +02:00
|
|
|
blender::gpu::Shader *sh);
|
2023-12-28 05:34:38 +01:00
|
|
|
|
2024-06-20 18:02:44 +02:00
|
|
|
using SpecializationBatchHandle = int64_t;
|
|
|
|
|
|
2024-06-07 18:45:31 +02:00
|
|
|
struct ShaderSpecialization {
|
2025-08-11 09:34:28 +02:00
|
|
|
blender::gpu::Shader *shader;
|
2025-05-19 17:42:55 +02:00
|
|
|
blender::gpu::shader::SpecializationConstants constants;
|
2024-06-07 18:45:31 +02:00
|
|
|
};
|
|
|
|
|
|
2024-06-20 18:02:44 +02:00
|
|
|
/**
|
|
|
|
|
* Request the compilation of multiple specialization constant variations at once,
|
2024-10-01 09:59:33 +10:00
|
|
|
* allowing the backend to use multi-threaded compilation.
|
2024-06-20 18:02:44 +02:00
|
|
|
* Returns a handle that can be used to poll if all variations have been compiled.
|
2024-09-30 11:21:28 +02:00
|
|
|
* A NULL handle indicates no compilation of any variant was possible (likely due to
|
|
|
|
|
* some state being currently available) and so no batch was created. Compilation
|
2024-10-01 09:59:33 +10:00
|
|
|
* of the specialized variant will instead occur at draw/dispatch time.
|
2024-09-30 11:21:28 +02:00
|
|
|
* NOTE: This function is asynchronous on OpenGL and Metal and a no-op on Vulkan.
|
2024-06-20 18:02:44 +02:00
|
|
|
* Batches are processed one by one in FIFO order.
|
|
|
|
|
* WARNING: Binding a specialization before the batch finishes will fail.
|
|
|
|
|
*/
|
|
|
|
|
SpecializationBatchHandle GPU_shader_batch_specializations(
|
2025-05-29 19:43:02 +02:00
|
|
|
blender::Span<ShaderSpecialization> specializations,
|
|
|
|
|
CompilationPriority priority = CompilationPriority::High);
|
2024-06-20 18:02:44 +02:00
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Returns true if all the specializations from the batch have finished their compilation.
|
|
|
|
|
* NOTE: Polling this function is required for the compilation process to keep progressing.
|
|
|
|
|
* WARNING: Invalidates the handle if it returns true.
|
|
|
|
|
*/
|
|
|
|
|
bool GPU_shader_batch_specializations_is_ready(SpecializationBatchHandle &handle);
|
2024-06-07 18:45:31 +02:00
|
|
|
|
2025-05-12 19:54:03 +02:00
|
|
|
/**
|
|
|
|
|
* Cancel the specialization batch.
|
|
|
|
|
* WARNING: The handle will be invalidated by this call.
|
|
|
|
|
*/
|
|
|
|
|
void GPU_shader_batch_specializations_cancel(SpecializationBatchHandle &handle);
|
|
|
|
|
|
2023-12-28 05:34:38 +01:00
|
|
|
/** \} */
|
|
|
|
|
|
2023-02-12 17:40:13 +01:00
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Legacy API
|
|
|
|
|
*
|
|
|
|
|
* All of this section is deprecated and should be ported to use the API described above.
|
|
|
|
|
* \{ */
|
|
|
|
|
|
2023-02-14 21:51:03 +01:00
|
|
|
/**
|
|
|
|
|
* Shader cache warming.
|
|
|
|
|
* For each shader, rendering APIs perform a two-step compilation:
|
|
|
|
|
*
|
|
|
|
|
* * The first stage is Front-End compilation which only needs to be performed once, and generates
|
|
|
|
|
* a portable intermediate representation. This happens during `gpu::Shader::finalize()`.
|
|
|
|
|
*
|
|
|
|
|
* * The second is Back-End compilation which compiles a device-specific executable shader
|
|
|
|
|
* program. This compilation requires some contextual pipeline state which is baked into the
|
|
|
|
|
* executable shader source, producing a Pipeline State Object (PSO). In OpenGL, backend
|
|
|
|
|
* compilation happens in the background, within the driver, but can still incur runtime stutters.
|
|
|
|
|
* In Metal/Vulkan, PSOs are compiled explicitly. These are currently resolved within the backend
|
|
|
|
|
* based on the current pipeline state and can incur runtime stalls when they occur.
|
|
|
|
|
*
|
|
|
|
|
* Shader Cache warming uses the specified parent shader set using `GPU_shader_set_parent(..)` as a
|
|
|
|
|
* template reference for pre-compiling Render Pipeline State Objects (PSOs) outside of the main
|
|
|
|
|
* render pipeline.
|
|
|
|
|
*
|
|
|
|
|
* PSOs require descriptors containing information on the render state for a given shader, which
|
|
|
|
|
* includes input vertex data layout and output pixel formats, along with some state such as
|
2023-02-15 13:11:14 +11:00
|
|
|
* blend mode and color output masks. As this state information is usually consistent between
|
2023-02-14 21:51:03 +01:00
|
|
|
* similar draws, we can assign a parent shader and use this shader's cached pipeline state's to
|
|
|
|
|
* prime compilations.
|
|
|
|
|
*
|
|
|
|
|
* Shaders do not necessarily have to be similar in functionality to be used as a parent, so long
|
2023-02-15 13:11:14 +11:00
|
|
|
* as the #GPUVertFormt and #GPUFrameBuffer which they are used with remain the same.
|
|
|
|
|
* Other bindings such as textures, uniforms and UBOs are all assigned independently as dynamic
|
|
|
|
|
* state.
|
2023-02-14 21:51:03 +01:00
|
|
|
*
|
|
|
|
|
* This function should be called asynchronously, mitigating the impact of run-time stuttering from
|
|
|
|
|
* dynamic compilation of PSOs during normal rendering.
|
|
|
|
|
*
|
|
|
|
|
* \param: shader: The shader whose cache to warm.
|
|
|
|
|
* \param limit: The maximum number of PSOs to compile within a call. Specifying
|
|
|
|
|
* a limit <= 0 will compile a PSO for all cached PSOs in the parent shader. */
|
2025-08-11 09:34:28 +02:00
|
|
|
void GPU_shader_warm_cache(blender::gpu::Shader *shader, int limit);
|
2023-02-14 21:51:03 +01:00
|
|
|
|
|
|
|
|
/* We expect the parent shader to be compiled and already have some cached PSOs when being assigned
|
|
|
|
|
* as a reference. Ensure the parent shader still exists when `GPU_shader_cache_warm(..)` is
|
|
|
|
|
* called. */
|
2025-08-11 09:34:28 +02:00
|
|
|
void GPU_shader_set_parent(blender::gpu::Shader *shader, blender::gpu::Shader *parent);
|
2023-02-14 21:51:03 +01:00
|
|
|
|
2023-02-12 17:40:13 +01:00
|
|
|
/**
|
|
|
|
|
* Indexed commonly used uniform name for faster lookup into the uniform cache.
|
|
|
|
|
*/
|
2024-03-23 01:24:18 +01:00
|
|
|
enum GPUUniformBuiltin {
|
2020-08-20 13:05:22 +02:00
|
|
|
GPU_UNIFORM_MODEL = 0, /* mat4 ModelMatrix */
|
|
|
|
|
GPU_UNIFORM_VIEW, /* mat4 ViewMatrix */
|
|
|
|
|
GPU_UNIFORM_MODELVIEW, /* mat4 ModelViewMatrix */
|
|
|
|
|
GPU_UNIFORM_PROJECTION, /* mat4 ProjectionMatrix */
|
|
|
|
|
GPU_UNIFORM_VIEWPROJECTION, /* mat4 ViewProjectionMatrix */
|
|
|
|
|
GPU_UNIFORM_MVP, /* mat4 ModelViewProjectionMatrix */
|
|
|
|
|
|
|
|
|
|
GPU_UNIFORM_MODEL_INV, /* mat4 ModelMatrixInverse */
|
|
|
|
|
GPU_UNIFORM_VIEW_INV, /* mat4 ViewMatrixInverse */
|
|
|
|
|
GPU_UNIFORM_MODELVIEW_INV, /* mat4 ModelViewMatrixInverse */
|
|
|
|
|
GPU_UNIFORM_PROJECTION_INV, /* mat4 ProjectionMatrixInverse */
|
|
|
|
|
GPU_UNIFORM_VIEWPROJECTION_INV, /* mat4 ViewProjectionMatrixInverse */
|
|
|
|
|
|
|
|
|
|
GPU_UNIFORM_NORMAL, /* mat3 NormalMatrix */
|
|
|
|
|
GPU_UNIFORM_CLIPPLANES, /* vec4 WorldClipPlanes[] */
|
|
|
|
|
|
|
|
|
|
GPU_UNIFORM_COLOR, /* vec4 color */
|
|
|
|
|
GPU_UNIFORM_BASE_INSTANCE, /* int baseInstance */
|
|
|
|
|
GPU_UNIFORM_RESOURCE_CHUNK, /* int resourceChunk */
|
|
|
|
|
GPU_UNIFORM_RESOURCE_ID, /* int resourceId */
|
|
|
|
|
GPU_UNIFORM_SRGB_TRANSFORM, /* bool srgbTarget */
|
2024-03-23 01:24:18 +01:00
|
|
|
};
|
2023-02-12 17:40:13 +01:00
|
|
|
#define GPU_NUM_UNIFORMS (GPU_UNIFORM_SRGB_TRANSFORM + 1)
|
2020-08-20 13:05:22 +02:00
|
|
|
|
2023-02-14 10:29:48 +11:00
|
|
|
/**
|
|
|
|
|
* TODO: To be moved as private API. Not really used outside of gpu_matrix.cc and doesn't really
|
|
|
|
|
* offer a noticeable performance boost.
|
|
|
|
|
*/
|
2025-08-11 09:34:28 +02:00
|
|
|
int GPU_shader_get_builtin_uniform(blender::gpu::Shader *shader, int builtin);
|
2023-02-12 17:40:13 +01:00
|
|
|
|
2024-02-15 08:13:44 +01:00
|
|
|
/**
|
2024-02-16 14:26:46 +11:00
|
|
|
* Compile all statically defined shaders and print a report to the console.
|
2024-02-15 08:13:44 +01:00
|
|
|
*
|
|
|
|
|
* This is used for platform support, where bug reports can list all failing shaders.
|
|
|
|
|
*/
|
|
|
|
|
void GPU_shader_compile_static();
|
|
|
|
|
|
2024-09-16 14:03:14 +02:00
|
|
|
void GPU_shader_cache_dir_clear_old();
|
|
|
|
|
|
2024-02-16 14:26:46 +11:00
|
|
|
/** DEPRECATED: Use hard-coded buffer location instead. */
|
2024-03-23 01:24:18 +01:00
|
|
|
enum GPUUniformBlockBuiltin {
|
2020-08-20 13:05:22 +02:00
|
|
|
GPU_UNIFORM_BLOCK_VIEW = 0, /* viewBlock */
|
|
|
|
|
GPU_UNIFORM_BLOCK_MODEL, /* modelBlock */
|
|
|
|
|
GPU_UNIFORM_BLOCK_INFO, /* infoBlock */
|
2023-02-12 17:40:13 +01:00
|
|
|
|
2022-01-26 12:46:37 +01:00
|
|
|
GPU_UNIFORM_BLOCK_DRW_VIEW,
|
|
|
|
|
GPU_UNIFORM_BLOCK_DRW_MODEL,
|
|
|
|
|
GPU_UNIFORM_BLOCK_DRW_INFOS,
|
2022-10-07 00:06:27 +02:00
|
|
|
GPU_UNIFORM_BLOCK_DRW_CLIPPING,
|
2020-08-20 13:05:22 +02:00
|
|
|
|
|
|
|
|
GPU_NUM_UNIFORM_BLOCKS, /* Special value, denotes number of builtin uniforms block. */
|
2024-03-23 01:24:18 +01:00
|
|
|
};
|
2020-08-20 13:05:22 +02:00
|
|
|
|
2021-12-09 20:01:47 +11:00
|
|
|
/** DEPRECATED: Kept only because of Python GPU API. */
|
2025-08-11 09:34:28 +02:00
|
|
|
int GPU_shader_get_uniform_block(blender::gpu::Shader *shader, const char *name);
|
2015-12-06 21:20:19 +01:00
|
|
|
|
2023-02-12 17:40:13 +01:00
|
|
|
/** \} */
|
2024-12-11 00:43:32 +01:00
|
|
|
|
|
|
|
|
#define GPU_SHADER_FREE_SAFE(shader) \
|
|
|
|
|
do { \
|
|
|
|
|
if (shader != nullptr) { \
|
|
|
|
|
GPU_shader_free(shader); \
|
|
|
|
|
shader = nullptr; \
|
|
|
|
|
} \
|
|
|
|
|
} while (0)
|
2025-02-27 19:20:33 +01:00
|
|
|
|
|
|
|
|
#include "BLI_utility_mixins.hh"
|
|
|
|
|
#include <atomic>
|
|
|
|
|
#include <mutex>
|
|
|
|
|
|
|
|
|
|
namespace blender::gpu {
|
|
|
|
|
|
2025-08-11 09:34:28 +02:00
|
|
|
/* blender::gpu::Shader wrapper that makes compilation threadsafe.
|
2025-02-27 19:20:33 +01:00
|
|
|
* The compilation is deferred until the first get() call.
|
|
|
|
|
* Concurrently using the shader from multiple threads is still unsafe. */
|
|
|
|
|
class StaticShader : NonCopyable {
|
|
|
|
|
private:
|
|
|
|
|
std::string info_name_;
|
2025-08-11 09:34:28 +02:00
|
|
|
std::atomic<blender::gpu::Shader *> shader_ = nullptr;
|
|
|
|
|
/* TODO: Failed compilation detection should be supported by the blender::gpu::Shader API. */
|
2025-04-09 20:05:29 +02:00
|
|
|
std::atomic<bool> failed_ = false;
|
2025-02-27 19:20:33 +01:00
|
|
|
std::mutex mutex_;
|
2025-05-30 00:15:10 +02:00
|
|
|
/* Handle for async compilation. */
|
|
|
|
|
BatchHandle compilation_handle_ = 0;
|
2025-02-27 19:20:33 +01:00
|
|
|
|
|
|
|
|
void move(StaticShader &&other)
|
|
|
|
|
{
|
|
|
|
|
std::scoped_lock lock1(mutex_);
|
|
|
|
|
std::scoped_lock lock2(other.mutex_);
|
|
|
|
|
BLI_assert(shader_ == nullptr && info_name_.empty());
|
|
|
|
|
std::swap(info_name_, other.info_name_);
|
|
|
|
|
/* No std::swap support for atomics. */
|
|
|
|
|
shader_.exchange(other.shader_.exchange(shader_));
|
|
|
|
|
failed_.exchange(other.failed_.exchange(failed_));
|
2025-06-03 17:39:33 +02:00
|
|
|
std::swap(compilation_handle_, other.compilation_handle_);
|
2025-02-27 19:20:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
StaticShader(std::string info_name) : info_name_(info_name) {}
|
|
|
|
|
|
|
|
|
|
StaticShader() = default;
|
|
|
|
|
StaticShader(StaticShader &&other)
|
|
|
|
|
{
|
|
|
|
|
move(std::move(other));
|
|
|
|
|
}
|
|
|
|
|
StaticShader &operator=(StaticShader &&other)
|
|
|
|
|
{
|
|
|
|
|
move(std::move(other));
|
|
|
|
|
return *this;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
~StaticShader()
|
|
|
|
|
{
|
2025-05-30 00:15:10 +02:00
|
|
|
if (compilation_handle_) {
|
|
|
|
|
GPU_shader_batch_cancel(compilation_handle_);
|
|
|
|
|
}
|
2025-02-27 19:20:33 +01:00
|
|
|
GPU_SHADER_FREE_SAFE(shader_);
|
|
|
|
|
}
|
|
|
|
|
|
2025-05-30 00:15:10 +02:00
|
|
|
/* Schedule the shader to be compile in a worker thread. */
|
|
|
|
|
void ensure_compile_async()
|
|
|
|
|
{
|
2025-05-30 15:20:58 +02:00
|
|
|
if (is_ready()) {
|
2025-05-30 00:15:10 +02:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::scoped_lock lock(mutex_);
|
|
|
|
|
|
2025-05-30 15:20:58 +02:00
|
|
|
if (compilation_handle_) {
|
|
|
|
|
if (GPU_shader_batch_is_ready(compilation_handle_)) {
|
|
|
|
|
shader_ = GPU_shader_batch_finalize(compilation_handle_)[0];
|
|
|
|
|
failed_ = shader_ == nullptr;
|
|
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2025-05-30 00:15:10 +02:00
|
|
|
if (!shader_ && !failed_ && !compilation_handle_) {
|
|
|
|
|
BLI_assert(!info_name_.empty());
|
|
|
|
|
const GPUShaderCreateInfo *create_info = GPU_shader_create_info_get(info_name_.c_str());
|
|
|
|
|
compilation_handle_ = GPU_shader_batch_create_from_infos({&create_info, 1});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool is_ready()
|
|
|
|
|
{
|
2025-05-30 15:20:58 +02:00
|
|
|
return shader_ || failed_;
|
2025-05-30 00:15:10 +02:00
|
|
|
}
|
|
|
|
|
|
2025-08-11 09:34:28 +02:00
|
|
|
blender::gpu::Shader *get()
|
2025-02-27 19:20:33 +01:00
|
|
|
{
|
2025-05-30 15:20:58 +02:00
|
|
|
if (is_ready()) {
|
2025-02-27 19:20:33 +01:00
|
|
|
return shader_;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::scoped_lock lock(mutex_);
|
|
|
|
|
|
|
|
|
|
if (!shader_ && !failed_) {
|
2025-05-30 00:15:10 +02:00
|
|
|
if (compilation_handle_) {
|
|
|
|
|
shader_ = GPU_shader_batch_finalize(compilation_handle_)[0];
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
BLI_assert(!info_name_.empty());
|
|
|
|
|
shader_ = GPU_shader_create_from_info_name(info_name_.c_str());
|
|
|
|
|
}
|
2025-05-30 15:19:10 +02:00
|
|
|
failed_ = shader_ == nullptr;
|
2025-02-27 19:20:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return shader_;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* For batch compiled shaders. */
|
|
|
|
|
/* TODO: Find a better way to handle this. */
|
2025-08-11 09:34:28 +02:00
|
|
|
void set(blender::gpu::Shader *shader)
|
2025-02-27 19:20:33 +01:00
|
|
|
{
|
|
|
|
|
std::scoped_lock lock(mutex_);
|
|
|
|
|
BLI_assert(shader_ == nullptr);
|
|
|
|
|
shader_ = shader;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/* Thread-safe container for StaticShader cache classes.
|
|
|
|
|
* The class instance creation is deferred until the first get() call. */
|
|
|
|
|
template<typename T> class StaticShaderCache {
|
|
|
|
|
std::atomic<T *> cache_ = nullptr;
|
|
|
|
|
std::mutex mutex_;
|
|
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
~StaticShaderCache()
|
|
|
|
|
{
|
|
|
|
|
BLI_assert(cache_ == nullptr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
template<typename... Args> T &get(Args &&...constructor_args)
|
|
|
|
|
{
|
|
|
|
|
if (cache_) {
|
|
|
|
|
return *cache_;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::lock_guard lock(mutex_);
|
|
|
|
|
|
|
|
|
|
if (cache_ == nullptr) {
|
|
|
|
|
cache_ = new T(std::forward<Args>(constructor_args)...);
|
|
|
|
|
}
|
|
|
|
|
return *cache_;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void release()
|
|
|
|
|
{
|
|
|
|
|
if (!cache_) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::lock_guard lock(mutex_);
|
|
|
|
|
|
|
|
|
|
if (cache_) {
|
|
|
|
|
delete cache_;
|
|
|
|
|
cache_ = nullptr;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::lock_guard<std::mutex> lock_guard()
|
|
|
|
|
{
|
|
|
|
|
return std::lock_guard(mutex_);
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
} // namespace blender::gpu
|