Cleanup: EEVEE-Next: Remove some TODOs

These just move some code around, cleanup the
syntax and make sure to use appropriate libs.
This commit is contained in:
Clément Foucault
2023-10-13 18:33:08 +02:00
parent ef494b2794
commit 5b1d5fa82f
6 changed files with 101 additions and 81 deletions

View File

@@ -675,6 +675,7 @@ set(GLSL_SRC
intern/shaders/draw_resource_finalize_comp.glsl
intern/shaders/draw_view_finalize_comp.glsl
intern/shaders/draw_view_lib.glsl
intern/shaders/draw_view_reconstruction_lib.glsl
intern/shaders/draw_visibility_comp.glsl
intern/draw_command_shared.hh

View File

@@ -3,62 +3,9 @@
* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma BLENDER_REQUIRE(gpu_shader_math_vector_lib.glsl)
#pragma BLENDER_REQUIRE(draw_view_reconstruction_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_ambient_occlusion_lib.glsl)
/* Similar to https://atyuwen.github.io/posts/normal-reconstruction/.
* This samples the depth buffer 4 time for each direction to get the most correct
* implicit normal reconstruction out of the depth buffer. */
vec3 view_position_derivative_from_depth(
sampler2D depth_tx, ivec2 extent, vec2 uv, ivec2 offset, vec3 vP, float depth_center)
{
vec4 H;
H.x = texelFetch(depth_tx, ivec2(uv * vec2(extent)) - offset * 2, 0).r;
H.y = texelFetch(depth_tx, ivec2(uv * vec2(extent)) - offset, 0).r;
H.z = texelFetch(depth_tx, ivec2(uv * vec2(extent)) + offset, 0).r;
H.w = texelFetch(depth_tx, ivec2(uv * vec2(extent)) + offset * 2, 0).r;
vec2 uv_offset = vec2(offset) / vec2(extent);
vec2 uv1 = uv - uv_offset * 2.0;
vec2 uv2 = uv - uv_offset;
vec2 uv3 = uv + uv_offset;
vec2 uv4 = uv + uv_offset * 2.0;
/* Fix issue with depth precision. Take even larger diff. */
vec4 diff = abs(vec4(depth_center, H.yzw) - H.x);
if (reduce_max(diff) < 2.4e-7 && all(lessThan(diff.xyz, diff.www))) {
return 0.25 *
(drw_point_screen_to_view(vec3(uv3, H.w)) - drw_point_screen_to_view(vec3(uv1, H.x)));
}
/* Simplified (H.xw + 2.0 * (H.yz - H.xw)) - depth_center */
vec2 deltas = abs((2.0 * H.yz - H.xw) - depth_center);
if (deltas.x < deltas.y) {
return vP - drw_point_screen_to_view(vec3(uv2, H.y));
}
else {
return drw_point_screen_to_view(vec3(uv3, H.z)) - vP;
}
}
/* TODO(Miguel Pozo): This should be in common_view_lib,
* but moving it there results in dependency hell. */
bool reconstruct_view_position_and_normal_from_depth(
sampler2D depth_tx, ivec2 extent, vec2 uv, out vec3 vP, out vec3 vNg)
{
float depth_center = texelFetch(depth_tx, ivec2(uv * vec2(extent)), 0).r;
vP = drw_point_screen_to_view(vec3(uv, depth_center));
vec3 dPdx = view_position_derivative_from_depth(
depth_tx, extent, uv, ivec2(1, 0), vP, depth_center);
vec3 dPdy = view_position_derivative_from_depth(
depth_tx, extent, uv, ivec2(0, 1), vP, depth_center);
vNg = safe_normalize(cross(dPdx, dPdy));
/* Background case. */
return depth_center != 1.0;
}
void main()
{
ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
@@ -67,29 +14,26 @@ void main()
return;
}
vec2 uv = (vec2(texel) + vec2(0.5)) / vec2(extent);
vec3 vP, vNg;
if (!reconstruct_view_position_and_normal_from_depth(hiz_tx, extent, uv, vP, vNg)) {
SurfaceReconstructResult surf = view_reconstruct_from_depth(hiz_tx, extent, texel);
if (surf.is_background) {
/* Do not trace for background */
imageStore(out_ao_img, ivec3(texel, out_ao_img_layer_index), vec4(0.0));
return;
}
vec3 P = drw_point_view_to_world(vP);
vec3 P = drw_point_view_to_world(surf.vP);
vec3 V = drw_world_incident_vector(P);
vec3 Ng = drw_normal_view_to_world(vNg);
vec3 Ng = drw_normal_view_to_world(surf.vNg);
vec3 N = imageLoad(in_normal_img, ivec3(texel, in_normal_img_layer_index)).xyz;
OcclusionData data = ambient_occlusion_search(
vP, hiz_tx, texel, uniform_buf.ao.distance, 0.0, 8.0);
surf.vP, hiz_tx, texel, uniform_buf.ao.distance, 0.0, 8.0);
float visibility;
float visibility_error_out;
vec3 bent_normal_out;
float unused_visibility_error_out;
vec3 unused_bent_normal_out;
ambient_occlusion_eval(
data, texel, V, N, Ng, 0.0, visibility, visibility_error_out, bent_normal_out);
/* Scale by user factor */
visibility = saturate(visibility);
data, texel, V, N, Ng, 0.0, visibility, unused_visibility_error_out, unused_bent_normal_out);
imageStore(out_ao_img, ivec3(texel, out_ao_img_layer_index), vec4(visibility));
imageStore(out_ao_img, ivec3(texel, out_ao_img_layer_index), vec4(saturate(visibility)));
}

View File

@@ -300,7 +300,6 @@ vec3 lightprobe_eval(
LightProbeSample samp, ClosureReflection reflection, vec3 P, vec3 V, vec2 noise)
{
vec3 L = lightprobe_specular_dominant_dir(reflection.N, V, reflection.roughness);
/* TODO: Right now generate a dependency hell. */
// vec3 L = ray_generate_direction(noise, reflection, V, pdf);
float lod = lightprobe_roughness_to_lod(reflection.roughness);

View File

@@ -2,6 +2,7 @@
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma BLENDER_REQUIRE(gpu_shader_utildefines_lib.glsl)
#pragma BLENDER_REQUIRE(gpu_shader_math_vector_lib.glsl)
vec3 lightprobe_irradiance_grid_sample_position(mat4 grid_local_to_world_mat,
@@ -54,8 +55,7 @@ float lightprobe_planar_score(ProbePlanarData planar, vec3 P, vec3 V, vec3 L)
}
/* Return how much the ray is lined up with the captured ray. */
vec3 R = -reflect(V, planar.normal);
/* TODO: Use saturate (dependency hell). */
return clamp(dot(L, R), 0.0, 1.0);
return saturate(dot(L, R));
}
#ifdef PLANAR_PROBES
@@ -65,8 +65,7 @@ float lightprobe_planar_score(ProbePlanarData planar, vec3 P, vec3 V, vec3 L)
int lightprobe_planar_select(vec3 P, vec3 V, vec3 L)
{
/* Initialize to the score of a camera ray. */
/* TODO: Use saturate (dependency hell). */
float best_score = clamp(dot(L, -V), 0.0, 1.0);
float best_score = saturate(dot(L, -V));
int best_index = -1;
for (int index = 0; index < PLANAR_PROBES_MAX; index++) {

View File

@@ -2,6 +2,7 @@
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma BLENDER_REQUIRE(gpu_shader_utildefines_lib.glsl)
#pragma BLENDER_REQUIRE(gpu_shader_math_matrix_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_shadow_tilemap_lib.glsl)
@@ -33,12 +34,6 @@ float shadow_read_depth_at_tilemap_uv(usampler2DArray atlas_tx,
return uintBitsToFloat(texelFetch(atlas_tx, texel, 0).r);
}
/* TODO(fclem): Use utildef version. Only here to avoid include order hell with common_math_lib. */
float shadow_orderedIntBitsToFloat(int int_value)
{
return intBitsToFloat((int_value < 0) ? (int_value ^ 0x7FFFFFFF) : int_value);
}
struct ShadowEvalResult {
/* Visibility of the light. */
float light_visibilty;
@@ -81,8 +76,8 @@ float shadow_linear_occluder_distance(LightData light,
vec3 lP,
float occluder)
{
float near = shadow_orderedIntBitsToFloat(light.clip_near);
float far = shadow_orderedIntBitsToFloat(light.clip_far);
float near = orderedIntBitsToFloat(light.clip_near);
float far = orderedIntBitsToFloat(light.clip_far);
float occluder_z = (is_directional) ? (occluder * (far - near) + near) :
((near * far) / (occluder * (near - far) + far));
@@ -129,8 +124,8 @@ ShadowEvalResult shadow_directional_sample_get(usampler2DArray atlas_tx,
vec3 lP = P * mat3(light.object_mat);
ShadowCoordinates coord = shadow_directional_coordinates(light, lP);
float clip_near = shadow_orderedIntBitsToFloat(light.clip_near);
float clip_far = shadow_orderedIntBitsToFloat(light.clip_far);
float clip_near = orderedIntBitsToFloat(light.clip_near);
float clip_far = orderedIntBitsToFloat(light.clip_far);
/* Assumed to be non-null. */
float z_range = clip_far - clip_near;
float dist_to_near_plane = -lP.z - clip_near;

View File

@@ -0,0 +1,82 @@
/* SPDX-FileCopyrightText: 2018-2023 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma BLENDER_REQUIRE(draw_view_lib.glsl)
/* Similar to https://atyuwen.github.io/posts/normal-reconstruction/.
* This samples the depth buffer 4 time for each direction to get the most correct
* implicit normal reconstruction out of the depth buffer. */
vec3 view_position_derivative_from_depth(sampler2D scene_depth_tx,
ivec2 extent,
vec2 uv,
ivec2 texel,
ivec2 offset,
vec3 vP,
float depth_center)
{
vec4 H;
H.x = texelFetch(scene_depth_tx, texel - offset * 2, 0).r;
H.y = texelFetch(scene_depth_tx, texel - offset, 0).r;
H.z = texelFetch(scene_depth_tx, texel + offset, 0).r;
H.w = texelFetch(scene_depth_tx, texel + offset * 2, 0).r;
vec2 uv_offset = vec2(offset) / vec2(extent);
vec2 uv1 = uv - uv_offset * 2.0;
vec2 uv2 = uv - uv_offset;
vec2 uv3 = uv + uv_offset;
vec2 uv4 = uv + uv_offset * 2.0;
/* Fix issue with depth precision. Take even larger diff. */
vec4 diff = abs(vec4(depth_center, H.yzw) - H.x);
if (reduce_max(diff) < 2.4e-7 && all(lessThan(diff.xyz, diff.www))) {
vec3 P1 = drw_point_screen_to_view(vec3(uv1, H.x));
vec3 P3 = drw_point_screen_to_view(vec3(uv3, H.w));
return 0.25 * (P3 - P1);
}
/* Simplified (H.xw + 2.0 * (H.yz - H.xw)) - depth_center */
vec2 deltas = abs((2.0 * H.yz - H.xw) - depth_center);
if (deltas.x < deltas.y) {
return vP - drw_point_screen_to_view(vec3(uv2, H.y));
}
return drw_point_screen_to_view(vec3(uv3, H.z)) - vP;
}
struct SurfaceReconstructResult {
/* View position. */
vec3 vP;
/* View geometric normal. */
vec3 vNg;
/* Screen depth [0..1]. Corresponding to the depth buffer value. */
float depth;
/* True if the pixel has background depth. */
bool is_background;
};
/**
* Reconstruct surface information from the depth buffer.
* Use adjacent pixel info to reconstruct normals.
* \a extent is the valid region of depth_tx.
* \a texel is the pixel coordinate [0..extent-1] to reconstruct.
*/
SurfaceReconstructResult view_reconstruct_from_depth(sampler2D scene_depth_tx,
ivec2 extent,
ivec2 texel)
{
SurfaceReconstructResult result;
result.depth = texelFetch(scene_depth_tx, texel, 0).r;
result.is_background = (result.depth == 1.0);
vec2 uv = (vec2(texel) + vec2(0.5)) / vec2(extent);
result.vP = drw_point_screen_to_view(vec3(uv, result.depth));
if (result.is_background) {
result.vNg = drw_view_incident_vector(result.vP);
return result;
}
vec3 dPdx = view_position_derivative_from_depth(
scene_depth_tx, extent, uv, texel, ivec2(1, 0), result.vP, result.depth);
vec3 dPdy = view_position_derivative_from_depth(
scene_depth_tx, extent, uv, texel, ivec2(0, 1), result.vP, result.depth);
result.vNg = safe_normalize(cross(dPdx, dPdy));
return result;
}