Draw: Add default material fallback detection

Detect passes that are functionally equivalent to the default Material
ones to skip their compilation.

Pull Request: https://projects.blender.org/blender/blender/pulls/121137
This commit is contained in:
Miguel Pozo
2024-04-30 19:44:06 +02:00
parent e581ba077d
commit a8f8745dcb
7 changed files with 119 additions and 36 deletions

View File

@@ -190,9 +190,7 @@ MaterialPass MaterialModule::material_pass_get(Object *ob,
}
case GPU_MAT_QUEUED:
queued_shaders_count++;
blender_mat = (is_volume) ? BKE_material_default_volume() : BKE_material_default_surface();
matpass.gpumat = inst_.shaders.material_shader_get(
blender_mat, blender_mat->nodetree, pipeline_type, geometry_type, false);
matpass.gpumat = inst_.shaders.material_default_shader_get(pipeline_type, geometry_type);
break;
case GPU_MAT_FAILED:
default:

View File

@@ -764,6 +764,59 @@ static void codegen_callback(void *thunk, GPUMaterial *mat, GPUCodegenOutput *co
reinterpret_cast<ShaderModule *>(thunk)->material_create_info_ammend(mat, codegen);
}
static bool can_use_default_cb(GPUMaterial *mat)
{
using namespace blender::gpu::shader;
uint64_t shader_uuid = GPU_material_uuid_get(mat);
eMaterialPipeline pipeline_type;
eMaterialGeometry geometry_type;
eMaterialDisplacement displacement_type;
bool transparent_shadows;
material_type_from_shader_uuid(
shader_uuid, pipeline_type, geometry_type, displacement_type, transparent_shadows);
bool is_shadow_pass = pipeline_type == eMaterialPipeline::MAT_PIPE_SHADOW;
bool is_prepass = ELEM(pipeline_type,
eMaterialPipeline::MAT_PIPE_PREPASS_DEFERRED,
eMaterialPipeline::MAT_PIPE_PREPASS_DEFERRED_VELOCITY,
eMaterialPipeline::MAT_PIPE_PREPASS_OVERLAP,
eMaterialPipeline::MAT_PIPE_PREPASS_FORWARD,
eMaterialPipeline::MAT_PIPE_PREPASS_FORWARD_VELOCITY,
eMaterialPipeline::MAT_PIPE_PREPASS_PLANAR);
bool has_vertex_displacement = GPU_material_has_displacement_output(mat) &&
displacement_type != eMaterialDisplacement::MAT_DISPLACEMENT_BUMP;
bool has_transparency = GPU_material_flag_get(mat, GPU_MATFLAG_TRANSPARENT);
bool has_shadow_transparency = has_transparency && transparent_shadows;
return (is_shadow_pass && (!has_vertex_displacement && !has_shadow_transparency)) ||
(is_prepass && (!has_vertex_displacement && !has_transparency));
}
GPUMaterial *ShaderModule::material_default_shader_get(eMaterialPipeline pipeline_type,
eMaterialGeometry geometry_type)
{
bool is_volume = ELEM(pipeline_type, MAT_PIPE_VOLUME_MATERIAL, MAT_PIPE_VOLUME_OCCUPANCY);
::Material *blender_mat = (is_volume) ? BKE_material_default_volume() :
BKE_material_default_surface();
eMaterialDisplacement displacement_type = to_displacement_type(blender_mat->displacement_method);
uint64_t shader_uuid = shader_uuid_from_material_type(
pipeline_type, geometry_type, displacement_type, blender_mat->blend_flag);
return DRW_shader_from_material(blender_mat,
blender_mat->nodetree,
GPU_MAT_EEVEE,
shader_uuid,
is_volume,
false,
codegen_callback,
this);
}
GPUMaterial *ShaderModule::material_shader_get(::Material *blender_mat,
bNodeTree *nodetree,
eMaterialPipeline pipeline_type,
@@ -777,14 +830,21 @@ GPUMaterial *ShaderModule::material_shader_get(::Material *blender_mat,
uint64_t shader_uuid = shader_uuid_from_material_type(
pipeline_type, geometry_type, displacement_type, blender_mat->blend_flag);
return DRW_shader_from_material(blender_mat,
nodetree,
GPU_MAT_EEVEE,
shader_uuid,
is_volume,
deferred_compilation,
codegen_callback,
this);
GPUMaterial *mat = DRW_shader_from_material(blender_mat,
nodetree,
GPU_MAT_EEVEE,
shader_uuid,
is_volume,
deferred_compilation,
codegen_callback,
this,
can_use_default_cb);
if (GPU_material_status(mat) == GPU_MAT_USE_DEFAULT) {
mat = material_default_shader_get(pipeline_type, geometry_type);
}
return mat;
}
GPUMaterial *ShaderModule::world_shader_get(::World *blender_world,

View File

@@ -165,6 +165,8 @@ class ShaderModule {
~ShaderModule();
GPUShader *static_shader_get(eShaderType shader_type);
GPUMaterial *material_default_shader_get(eMaterialPipeline pipeline_type,
eMaterialGeometry geometry_type);
GPUMaterial *material_shader_get(::Material *blender_mat,
bNodeTree *nodetree,
eMaterialPipeline pipeline_type,

View File

@@ -306,14 +306,16 @@ GPUMaterial *DRW_shader_from_world(World *wo,
bool deferred,
GPUCodegenCallbackFn callback,
void *thunk);
GPUMaterial *DRW_shader_from_material(Material *ma,
bNodeTree *ntree,
eGPUMaterialEngine engine,
const uint64_t shader_id,
const bool is_volume_shader,
bool deferred,
GPUCodegenCallbackFn callback,
void *thunk);
GPUMaterial *DRW_shader_from_material(
Material *ma,
bNodeTree *ntree,
eGPUMaterialEngine engine,
const uint64_t shader_id,
const bool is_volume_shader,
bool deferred,
GPUCodegenCallbackFn callback,
void *thunk,
GPUMaterialCanUseDefaultCallbackFn can_use_default_cb = nullptr);
void DRW_shader_queue_optimize_material(GPUMaterial *mat);
void DRW_shader_free(GPUShader *shader);
#define DRW_SHADER_FREE_SAFE(shader) \

View File

@@ -258,6 +258,8 @@ static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
return;
}
BLI_assert(GPU_material_status(mat) != GPU_MAT_USE_DEFAULT);
/* Do not defer the compilation if we are rendering for image.
* deferred rendering is only possible when `evil_C` is available */
if (DST.draw_ctx.evil_C == nullptr || DRW_state_is_image_render() || !USE_DEFERRED_COMPILATION) {
@@ -294,6 +296,8 @@ static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
static void drw_register_shader_vlattrs(GPUMaterial *mat)
{
BLI_assert(GPU_material_status(mat) != GPU_MAT_USE_DEFAULT);
const ListBase *attrs = GPU_material_layer_attributes(mat);
if (!attrs) {
@@ -536,7 +540,8 @@ GPUMaterial *DRW_shader_from_material(Material *ma,
const bool is_volume_shader,
bool deferred,
GPUCodegenCallbackFn callback,
void *thunk)
void *thunk,
GPUMaterialCanUseDefaultCallbackFn can_use_default_cb)
{
Scene *scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
GPUMaterial *mat = GPU_material_from_nodetree(scene,
@@ -549,7 +554,12 @@ GPUMaterial *DRW_shader_from_material(Material *ma,
is_volume_shader,
false,
callback,
thunk);
thunk,
can_use_default_cb);
if (GPU_material_status(mat) == GPU_MAT_USE_DEFAULT) {
return mat;
}
drw_register_shader_vlattrs(mat);
@@ -565,6 +575,8 @@ GPUMaterial *DRW_shader_from_material(Material *ma,
void DRW_shader_queue_optimize_material(GPUMaterial *mat)
{
BLI_assert(GPU_material_status(mat) != GPU_MAT_USE_DEFAULT);
/* Do not perform deferred optimization if performing render.
* De-queue any queued optimization jobs. */
if (DRW_state_is_image_render()) {

View File

@@ -116,6 +116,7 @@ enum eGPUMaterialStatus {
GPU_MAT_CREATED,
GPU_MAT_QUEUED,
GPU_MAT_SUCCESS,
GPU_MAT_USE_DEFAULT,
};
/* GPU_MAT_OPTIMIZATION_SKIP for cases where we do not
@@ -146,6 +147,8 @@ struct GPUCodegenOutput {
};
using GPUCodegenCallbackFn = void (*)(void *thunk, GPUMaterial *mat, GPUCodegenOutput *codegen);
/* Should return true if the pass is functionally equivalent to the default Material one. */
using GPUMaterialCanUseDefaultCallbackFn = bool (*)(GPUMaterial *mat);
GPUNodeLink *GPU_constant(const float *num);
GPUNodeLink *GPU_uniform(const float *num);
@@ -237,17 +240,19 @@ enum eGPUMaterialEngine {
GPU_MAT_COMPOSITOR,
};
GPUMaterial *GPU_material_from_nodetree(Scene *scene,
Material *ma,
bNodeTree *ntree,
ListBase *gpumaterials,
const char *name,
eGPUMaterialEngine engine,
uint64_t shader_uuid,
bool is_volume_shader,
bool is_lookdev,
GPUCodegenCallbackFn callback,
void *thunk);
GPUMaterial *GPU_material_from_nodetree(
Scene *scene,
Material *ma,
bNodeTree *ntree,
ListBase *gpumaterials,
const char *name,
eGPUMaterialEngine engine,
uint64_t shader_uuid,
bool is_volume_shader,
bool is_lookdev,
GPUCodegenCallbackFn callback,
void *thunk,
GPUMaterialCanUseDefaultCallbackFn can_use_default_cb = nullptr);
void GPU_material_compile(GPUMaterial *mat);
void GPU_material_free_single(GPUMaterial *material);

View File

@@ -831,7 +831,8 @@ GPUMaterial *GPU_material_from_nodetree(Scene *scene,
bool is_volume_shader,
bool is_lookdev,
GPUCodegenCallbackFn callback,
void *thunk)
void *thunk,
GPUMaterialCanUseDefaultCallbackFn can_use_default_cb)
{
/* Search if this material is not already compiled. */
LISTBASE_FOREACH (LinkData *, link, gpumaterials) {
@@ -862,10 +863,13 @@ GPUMaterial *GPU_material_from_nodetree(Scene *scene,
bNodeTree *localtree = ntreeLocalize(ntree);
ntreeGPUMaterialNodes(localtree, mat);
gpu_material_ramp_texture_build(mat);
gpu_material_sky_texture_build(mat);
if (can_use_default_cb && can_use_default_cb(mat)) {
mat->status = GPU_MAT_USE_DEFAULT;
}
else {
gpu_material_ramp_texture_build(mat);
gpu_material_sky_texture_build(mat);
{
/* Create source code and search pass cache for an already compiled version. */
mat->pass = GPU_generate_pass(mat, &mat->graph, engine, callback, thunk, false);