EEVEE-Next: Support ShaderToRGBA on Dithered materials
This adds a new hybrid mode where the light and lightprobe textures are bound to the gbuffer pass and used by shaders that use ShaderToRGBA. The counterpart is that these shaders have less texture slots available but still two more than the Blended materials. These materials will correctly write to the AOVs and render passes. This is not too invasive in terms of implementation. The only quirk was that we needed another fragment shader to simplify the dependencies on the resources. But if we ever get pre-processor support in our include / require system this could be cleaned up.
This commit is contained in:
@@ -597,6 +597,7 @@ set(GLSL_SRC
|
||||
engines/eevee_next/shaders/eevee_surf_deferred_frag.glsl
|
||||
engines/eevee_next/shaders/eevee_surf_depth_frag.glsl
|
||||
engines/eevee_next/shaders/eevee_surf_forward_frag.glsl
|
||||
engines/eevee_next/shaders/eevee_surf_hybrid_frag.glsl
|
||||
engines/eevee_next/shaders/eevee_surf_lib.glsl
|
||||
engines/eevee_next/shaders/eevee_surf_occupancy_frag.glsl
|
||||
engines/eevee_next/shaders/eevee_surf_shadow_frag.glsl
|
||||
|
||||
@@ -143,6 +143,9 @@ static inline eClosureBits shader_closure_bits_from_flag(const GPUMaterial *gpum
|
||||
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_AO)) {
|
||||
closure_bits |= CLOSURE_AMBIENT_OCCLUSION;
|
||||
}
|
||||
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_SHADER_TO_RGBA)) {
|
||||
closure_bits |= CLOSURE_SHADER_TO_RGBA;
|
||||
}
|
||||
return closure_bits;
|
||||
}
|
||||
|
||||
|
||||
@@ -464,8 +464,21 @@ void DeferredLayer::begin_sync()
|
||||
inst_.hiz_buffer.bind_resources(gbuffer_ps_);
|
||||
inst_.cryptomatte.bind_resources(gbuffer_ps_);
|
||||
|
||||
/* Bind light resources for the NPR materials that gets rendered first.
|
||||
* Non-NPR shaders will override these resource bindings. */
|
||||
inst_.lights.bind_resources(gbuffer_ps_);
|
||||
inst_.shadows.bind_resources(gbuffer_ps_);
|
||||
inst_.reflection_probes.bind_resources(gbuffer_ps_);
|
||||
inst_.irradiance_cache.bind_resources(gbuffer_ps_);
|
||||
|
||||
DRWState state = DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_EQUAL;
|
||||
|
||||
gbuffer_single_sided_hybrid_ps_ = &gbuffer_ps_.sub("DoubleSided");
|
||||
gbuffer_single_sided_hybrid_ps_->state_set(state | DRW_STATE_CULL_BACK);
|
||||
|
||||
gbuffer_double_sided_hybrid_ps_ = &gbuffer_ps_.sub("SingleSided");
|
||||
gbuffer_double_sided_hybrid_ps_->state_set(state);
|
||||
|
||||
gbuffer_double_sided_ps_ = &gbuffer_ps_.sub("DoubleSided");
|
||||
gbuffer_double_sided_ps_->state_set(state);
|
||||
|
||||
@@ -600,9 +613,15 @@ PassMain::Sub *DeferredLayer::material_add(::Material *blender_mat, GPUMaterial
|
||||
eClosureBits closure_bits = shader_closure_bits_from_flag(gpumat);
|
||||
closure_bits_ |= closure_bits;
|
||||
|
||||
PassMain::Sub *pass = (blender_mat->blend_flag & MA_BL_CULL_BACKFACE) ?
|
||||
gbuffer_single_sided_ps_ :
|
||||
gbuffer_double_sided_ps_;
|
||||
bool has_shader_to_rgba = (closure_bits & CLOSURE_SHADER_TO_RGBA) != 0;
|
||||
bool backface_culling = (blender_mat->blend_flag & MA_BL_CULL_BACKFACE) != 0;
|
||||
|
||||
PassMain::Sub *pass = (has_shader_to_rgba) ?
|
||||
((backface_culling) ? gbuffer_single_sided_hybrid_ps_ :
|
||||
gbuffer_double_sided_hybrid_ps_) :
|
||||
((backface_culling) ? gbuffer_single_sided_ps_ :
|
||||
gbuffer_double_sided_ps_);
|
||||
|
||||
return &pass->sub(GPU_material_get_name(gpumat));
|
||||
}
|
||||
|
||||
|
||||
@@ -175,6 +175,10 @@ struct DeferredLayerBase {
|
||||
PassMain::Sub *prepass_double_sided_moving_ps_ = nullptr;
|
||||
|
||||
PassMain gbuffer_ps_ = {"Shading"};
|
||||
/* Shaders that use the ClosureToRGBA node needs to be rendered first.
|
||||
* Consider they hybrid forward and deferred. */
|
||||
PassMain::Sub *gbuffer_single_sided_hybrid_ps_ = nullptr;
|
||||
PassMain::Sub *gbuffer_double_sided_hybrid_ps_ = nullptr;
|
||||
PassMain::Sub *gbuffer_single_sided_ps_ = nullptr;
|
||||
PassMain::Sub *gbuffer_double_sided_ps_ = nullptr;
|
||||
|
||||
|
||||
@@ -641,7 +641,12 @@ void ShaderModule::material_create_info_ammend(GPUMaterial *gpumat, GPUCodegenOu
|
||||
info.additional_info("eevee_surf_capture");
|
||||
break;
|
||||
case MAT_PIPE_DEFERRED:
|
||||
info.additional_info("eevee_surf_deferred");
|
||||
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_SHADER_TO_RGBA)) {
|
||||
info.additional_info("eevee_surf_deferred_hybrid");
|
||||
}
|
||||
else {
|
||||
info.additional_info("eevee_surf_deferred");
|
||||
}
|
||||
break;
|
||||
case MAT_PIPE_FORWARD:
|
||||
info.additional_info("eevee_surf_forward");
|
||||
|
||||
@@ -1187,6 +1187,7 @@ enum eClosureBits : uint32_t {
|
||||
CLOSURE_HOLDOUT = (1u << 10u),
|
||||
CLOSURE_VOLUME = (1u << 11u),
|
||||
CLOSURE_AMBIENT_OCCLUSION = (1u << 12u),
|
||||
CLOSURE_SHADER_TO_RGBA = (1u << 13u),
|
||||
};
|
||||
|
||||
enum GBufferMode : uint32_t {
|
||||
|
||||
@@ -0,0 +1,153 @@
|
||||
/* SPDX-FileCopyrightText: 2022-2023 Blender Authors
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
|
||||
/**
|
||||
* Deferred lighting evaluation: Lighting is evaluated in a separate pass.
|
||||
*
|
||||
* Outputs shading parameter per pixel using a randomized set of BSDFs.
|
||||
* Some render-pass are written during this pass.
|
||||
*/
|
||||
|
||||
#pragma BLENDER_REQUIRE(draw_view_lib.glsl)
|
||||
#pragma BLENDER_REQUIRE(eevee_gbuffer_lib.glsl)
|
||||
#pragma BLENDER_REQUIRE(common_hair_lib.glsl)
|
||||
#pragma BLENDER_REQUIRE(eevee_ambient_occlusion_lib.glsl)
|
||||
#pragma BLENDER_REQUIRE(eevee_surf_lib.glsl)
|
||||
#pragma BLENDER_REQUIRE(eevee_light_eval_lib.glsl)
|
||||
#pragma BLENDER_REQUIRE(eevee_lightprobe_eval_lib.glsl)
|
||||
#pragma BLENDER_REQUIRE(eevee_nodetree_lib.glsl)
|
||||
#pragma BLENDER_REQUIRE(eevee_sampling_lib.glsl)
|
||||
|
||||
vec4 closure_to_rgba(Closure cl)
|
||||
{
|
||||
vec3 diffuse_light = vec3(0.0);
|
||||
vec3 reflection_light = vec3(0.0);
|
||||
vec3 refraction_light = vec3(0.0);
|
||||
float shadow = 1.0;
|
||||
|
||||
float vPz = dot(drw_view_forward(), g_data.P) - dot(drw_view_forward(), drw_view_position());
|
||||
vec3 V = drw_world_incident_vector(g_data.P);
|
||||
|
||||
ClosureLightStack stack;
|
||||
|
||||
ClosureLight cl_diff;
|
||||
cl_diff.N = g_diffuse_data.N;
|
||||
cl_diff.ltc_mat = LTC_LAMBERT_MAT;
|
||||
cl_diff.type = LIGHT_DIFFUSE;
|
||||
stack.cl[0] = cl_diff;
|
||||
|
||||
ClosureLight cl_refl;
|
||||
cl_refl.N = g_reflection_data.N;
|
||||
cl_refl.ltc_mat = LTC_GGX_MAT(dot(g_reflection_data.N, V), g_reflection_data.roughness);
|
||||
cl_refl.type = LIGHT_SPECULAR;
|
||||
stack.cl[1] = cl_refl;
|
||||
|
||||
float thickness = 0.01; /* TODO(fclem) thickness. */
|
||||
light_eval(stack, g_data.P, g_data.Ng, V, vPz, thickness);
|
||||
|
||||
vec2 noise_probe = interlieved_gradient_noise(gl_FragCoord.xy, vec2(0, 1), vec2(0.0));
|
||||
LightProbeSample samp = lightprobe_load(g_data.P, g_data.Ng, V);
|
||||
|
||||
diffuse_light += stack.cl[0].light_shadowed;
|
||||
diffuse_light += lightprobe_eval(samp, g_diffuse_data, g_data.P, V, noise_probe);
|
||||
|
||||
reflection_light += stack.cl[1].light_shadowed;
|
||||
reflection_light += lightprobe_eval(samp, g_reflection_data, g_data.P, V, noise_probe);
|
||||
|
||||
vec4 out_color;
|
||||
out_color.rgb = g_emission;
|
||||
out_color.rgb += g_diffuse_data.color * g_diffuse_data.weight * diffuse_light;
|
||||
out_color.rgb += g_reflection_data.color * g_reflection_data.weight * reflection_light;
|
||||
|
||||
out_color.a = saturate(1.0 - average(g_transmittance));
|
||||
|
||||
/* Reset for the next closure tree. */
|
||||
closure_weights_reset();
|
||||
|
||||
return out_color;
|
||||
}
|
||||
|
||||
void main()
|
||||
{
|
||||
/* Clear AOVs first. In case the material renders to them. */
|
||||
clear_aovs();
|
||||
|
||||
init_globals();
|
||||
|
||||
float noise = utility_tx_fetch(utility_tx, gl_FragCoord.xy, UTIL_BLUE_NOISE_LAYER).r;
|
||||
g_closure_rand = fract(noise + sampling_rng_1D_get(SAMPLING_CLOSURE));
|
||||
|
||||
fragment_displacement();
|
||||
|
||||
nodetree_surface();
|
||||
|
||||
g_holdout = saturate(g_holdout);
|
||||
|
||||
float thickness = nodetree_thickness();
|
||||
|
||||
g_diffuse_data.color *= g_diffuse_data.weight;
|
||||
g_reflection_data.color *= g_reflection_data.weight;
|
||||
g_refraction_data.color *= g_refraction_data.weight;
|
||||
|
||||
/* TODO(fclem): This feels way too complex for what is it. */
|
||||
bool has_any_bsdf_weight = g_diffuse_data.weight != 0.0 || g_reflection_data.weight != 0.0 ||
|
||||
g_refraction_data.weight != 0.0;
|
||||
vec3 out_normal = has_any_bsdf_weight ? vec3(0.0) : g_data.N;
|
||||
out_normal += g_diffuse_data.N * g_diffuse_data.weight;
|
||||
out_normal += g_reflection_data.N * g_reflection_data.weight;
|
||||
out_normal += g_refraction_data.N * g_refraction_data.weight;
|
||||
out_normal = safe_normalize(out_normal);
|
||||
|
||||
vec3 specular_color = g_reflection_data.color + g_refraction_data.color;
|
||||
|
||||
/* ----- Render Passes output ----- */
|
||||
|
||||
ivec2 out_texel = ivec2(gl_FragCoord.xy);
|
||||
#ifdef MAT_RENDER_PASS_SUPPORT /* Needed because node_tree isn't present in test shaders. */
|
||||
/* Some render pass can be written during the gbuffer pass. Light passes are written later. */
|
||||
if (imageSize(rp_cryptomatte_img).x > 1) {
|
||||
vec4 cryptomatte_output = vec4(
|
||||
cryptomatte_object_buf[resource_id], node_tree.crypto_hash, 0.0);
|
||||
imageStore(rp_cryptomatte_img, out_texel, cryptomatte_output);
|
||||
}
|
||||
output_renderpass_color(uniform_buf.render_pass.normal_id, vec4(out_normal, 1.0));
|
||||
output_renderpass_color(uniform_buf.render_pass.position_id, vec4(g_data.P, 1.0));
|
||||
output_renderpass_color(uniform_buf.render_pass.diffuse_color_id,
|
||||
vec4(g_diffuse_data.color, 1.0));
|
||||
output_renderpass_color(uniform_buf.render_pass.specular_color_id, vec4(specular_color, 1.0));
|
||||
output_renderpass_color(uniform_buf.render_pass.emission_id, vec4(g_emission, 1.0));
|
||||
#endif
|
||||
|
||||
/* ----- GBuffer output ----- */
|
||||
|
||||
GBufferDataPacked gbuf = gbuffer_pack(
|
||||
g_diffuse_data, g_reflection_data, g_refraction_data, out_normal, thickness);
|
||||
|
||||
/* Output header and first closure using frame-buffer attachment. */
|
||||
out_gbuf_header = gbuf.header;
|
||||
out_gbuf_color = gbuf.color[0];
|
||||
out_gbuf_closure = gbuf.closure[0];
|
||||
|
||||
/* Output remaining closures using image store. */
|
||||
/* NOTE: The image view start at layer 1 so all destination layer is `closure_index - 1`. */
|
||||
if (gbuffer_header_unpack(gbuf.header, 1) != GBUF_NONE) {
|
||||
imageStore(out_gbuf_color_img, ivec3(out_texel, 1 - 1), gbuf.color[1]);
|
||||
imageStore(out_gbuf_closure_img, ivec3(out_texel, 1 - 1), gbuf.closure[1]);
|
||||
}
|
||||
if (gbuffer_header_unpack(gbuf.header, 2) != GBUF_NONE) {
|
||||
imageStore(out_gbuf_color_img, ivec3(out_texel, 2 - 1), gbuf.color[2]);
|
||||
imageStore(out_gbuf_closure_img, ivec3(out_texel, 2 - 1), gbuf.closure[2]);
|
||||
}
|
||||
if (gbuffer_header_unpack(gbuf.header, 3) != GBUF_NONE) {
|
||||
/* No color for SSS. */
|
||||
imageStore(out_gbuf_closure_img, ivec3(out_texel, 3 - 1), gbuf.closure[3]);
|
||||
}
|
||||
|
||||
/* ----- Radiance output ----- */
|
||||
|
||||
/* Only output emission during the gbuffer pass. */
|
||||
out_radiance = vec4(g_emission, 0.0);
|
||||
out_radiance.rgb *= 1.0 - g_holdout;
|
||||
out_radiance.a = g_holdout;
|
||||
}
|
||||
@@ -148,7 +148,7 @@ GPU_SHADER_CREATE_INFO(eevee_cryptomatte_out)
|
||||
.storage_buf(CRYPTOMATTE_BUF_SLOT, Qualifier::READ, "vec2", "cryptomatte_object_buf[]")
|
||||
.image_out(RBUFS_CRYPTOMATTE_SLOT, Qualifier::WRITE, GPU_RGBA32F, "rp_cryptomatte_img");
|
||||
|
||||
GPU_SHADER_CREATE_INFO(eevee_surf_deferred)
|
||||
GPU_SHADER_CREATE_INFO(eevee_surf_deferred_base)
|
||||
.define("MAT_DEFERRED")
|
||||
/* NOTE: This removes the possibility of using gl_FragDepth. */
|
||||
.early_fragment_test(true)
|
||||
@@ -161,7 +161,6 @@ GPU_SHADER_CREATE_INFO(eevee_surf_deferred)
|
||||
* limitation of the number of images we can bind on a single shader. */
|
||||
.image_array_out(GBUF_CLOSURE_SLOT, Qualifier::WRITE, GPU_RGBA16, "out_gbuf_closure_img")
|
||||
.image_array_out(GBUF_COLOR_SLOT, Qualifier::WRITE, GPU_RGB10_A2, "out_gbuf_color_img")
|
||||
.fragment_source("eevee_surf_deferred_frag.glsl")
|
||||
.additional_info("eevee_global_ubo",
|
||||
"eevee_utility_texture",
|
||||
/* Added at runtime because of test shaders not having `node_tree`. */
|
||||
@@ -170,6 +169,18 @@ GPU_SHADER_CREATE_INFO(eevee_surf_deferred)
|
||||
"eevee_sampling_data",
|
||||
"eevee_hiz_data");
|
||||
|
||||
GPU_SHADER_CREATE_INFO(eevee_surf_deferred)
|
||||
.fragment_source("eevee_surf_deferred_frag.glsl")
|
||||
.additional_info("eevee_surf_deferred_base");
|
||||
|
||||
GPU_SHADER_CREATE_INFO(eevee_surf_deferred_hybrid)
|
||||
.fragment_source("eevee_surf_hybrid_frag.glsl")
|
||||
.define("LIGHT_CLOSURE_EVAL_COUNT", "2")
|
||||
.additional_info("eevee_surf_deferred_base",
|
||||
"eevee_light_data",
|
||||
"eevee_lightprobe_data",
|
||||
"eevee_shadow_data");
|
||||
|
||||
GPU_SHADER_CREATE_INFO(eevee_surf_forward)
|
||||
.define("MAT_FORWARD")
|
||||
/* Early fragment test is needed for render passes support for forward surfaces. */
|
||||
|
||||
Reference in New Issue
Block a user