EEVEE: Improve Pixel Size Upscaling

This fixes a few issues and clear up some confusion
in the code.

Note that this changes the behavior of render region;
they now reduce the internal render size. This is
matching the new design documentation.

- Data passes have correct accumulation.
- Adhere to naming conventions for extents, film and render pixels.
- Jitter over final pixels first before doing random sampling
  in order to speed up convergence.
- Ensure enough sample to cover at least all the film pixels once.
- Always include the four neighbor pixels in case one is nearer.
- Fix projection matrix computation to align overscan pixels.

Pull Request: https://projects.blender.org/blender/blender/pulls/124735
This commit is contained in:
Clément Foucault
2024-07-16 16:59:00 +02:00
committed by Clément Foucault
parent ca0e1d696a
commit c72a1d698e
9 changed files with 224 additions and 102 deletions

View File

@@ -87,6 +87,10 @@ void BKE_camera_params_from_view3d(CameraParams *params,
void BKE_camera_params_compute_viewplane(
CameraParams *params, int winx, int winy, float aspx, float aspy);
/**
* Crop `viewplane` given the current resolution and a pixel region inside the view plane.
*/
void BKE_camera_params_crop_viewplane(rctf *viewplane, int winx, int winy, rcti *region);
/**
* View-plane is assumed to be already computed.
*/

View File

@@ -485,6 +485,18 @@ void BKE_camera_params_compute_viewplane(
params->viewplane = viewplane;
}
void BKE_camera_params_crop_viewplane(rctf *viewplane, int winx, int winy, rcti *region)
{
float pix_size_x = BLI_rctf_size_x(viewplane) / winx;
float pix_size_y = BLI_rctf_size_y(viewplane) / winy;
viewplane->xmin += pix_size_x * region->xmin;
viewplane->ymin += pix_size_y * region->ymin;
viewplane->xmax = viewplane->xmin + pix_size_x * BLI_rcti_size_x(region);
viewplane->ymax = viewplane->ymin + pix_size_y * BLI_rcti_size_y(region);
}
void BKE_camera_params_compute_matrix(CameraParams *params)
{
rctf viewplane = params->viewplane;

View File

@@ -10,6 +10,8 @@
#include "DRW_render.hh"
#include "BLI_bounds.hh"
#include "DNA_camera_types.h"
#include "DNA_view3d_types.h"
@@ -90,24 +92,30 @@ void Camera::sync()
CameraData &data = data_;
float2 resolution = float2(inst_.film.display_extent_get());
float2 overscan_margin = float2(overscan_ * math::max(UNPACK2(resolution)));
float2 overscan_resolution = resolution + overscan_margin * 2.0f;
float2 camera_min = overscan_margin;
float2 camera_max = camera_min + resolution;
int2 display_extent = inst_.film.display_extent_get();
int2 film_extent = inst_.film.film_extent_get();
int2 film_offset = inst_.film.film_offset_get();
/* Overscan in film pixel. Not the same as `render_overscan_get`. */
int film_overscan = inst_.film.overscan_pixels_get(overscan_, film_extent);
rcti film_rect;
BLI_rcti_init(&film_rect,
film_offset.x,
film_offset.x + film_extent.x,
film_offset.y,
film_offset.y + film_extent.y);
Bounds<float2> uv_region = {float2(0.0f), float2(display_extent)};
if (inst_.drw_view) {
/* Viewport camera view. */
float2 camera_uv_scale = float2(inst_.rv3d->viewcamtexcofac);
float2 camera_uv_bias = float2(inst_.rv3d->viewcamtexcofac + 2);
float2 camera_region_min = (-camera_uv_bias * resolution) / camera_uv_scale;
float2 camera_region_size = resolution / camera_uv_scale;
camera_min = overscan_margin + camera_region_min;
camera_max = camera_min + camera_region_size;
float2 uv_scale = float4(inst_.rv3d->viewcamtexcofac).xy();
float2 uv_bias = float4(inst_.rv3d->viewcamtexcofac).zw();
/* UV region inside the display extent reference frame. */
uv_region.min = (-uv_bias * float2(display_extent)) / uv_scale;
uv_region.max = uv_region.min + (float2(display_extent) / uv_scale);
}
data.uv_scale = overscan_resolution / (camera_max - camera_min);
data.uv_bias = -camera_min / (camera_max - camera_min);
data.uv_scale = float2(film_extent + film_overscan * 2) / uv_region.size();
data.uv_bias = (float2(film_offset - film_overscan) - uv_region.min) / uv_region.size();
if (inst_.is_baking()) {
/* Any view so that shadows and light culling works during irradiance bake. */
@@ -129,39 +137,46 @@ void Camera::sync()
else if (inst_.drw_view) {
DRW_view_viewmat_get(inst_.drw_view, data.viewmat.ptr(), false);
DRW_view_viewmat_get(inst_.drw_view, data.viewinv.ptr(), true);
if (overscan_ == 0.0f) {
DRW_view_winmat_get(inst_.drw_view, data.winmat.ptr(), false);
CameraParams params;
BKE_camera_params_init(&params);
if (inst_.rv3d->persp == RV3D_CAMOB && DRW_state_is_viewport_image_render()) {
/* We are rendering camera view, no need for pan/zoom params from viewport.*/
BKE_camera_params_from_object(&params, camera_eval);
}
else {
CameraParams params;
BKE_camera_params_init(&params);
if (inst_.rv3d->persp == RV3D_CAMOB && DRW_state_is_viewport_image_render()) {
/* We are rendering camera view, no need for pan/zoom params from viewport.*/
BKE_camera_params_from_object(&params, camera_eval);
}
else {
BKE_camera_params_from_view3d(&params, inst_.depsgraph, inst_.v3d, inst_.rv3d);
}
BKE_camera_params_compute_viewplane(
&params, UNPACK2(inst_.film.display_extent_get()), 1.0f, 1.0f);
RE_GetWindowMatrixWithOverscan(params.is_ortho,
params.clip_start,
params.clip_end,
params.viewplane,
overscan_,
data.winmat.ptr());
BKE_camera_params_from_view3d(&params, inst_.depsgraph, inst_.v3d, inst_.rv3d);
}
BKE_camera_params_compute_viewplane(&params, UNPACK2(display_extent), 1.0f, 1.0f);
BKE_camera_params_crop_viewplane(&params.viewplane, UNPACK2(display_extent), &film_rect);
RE_GetWindowMatrixWithOverscan(params.is_ortho,
params.clip_start,
params.clip_end,
params.viewplane,
overscan_,
data.winmat.ptr());
}
else if (inst_.render) {
RE_GetCameraModelMatrix(inst_.render->re, camera_eval, data.viewinv.ptr());
data.viewmat = math::invert(data.viewinv);
const Render *re = inst_.render->re;
RE_GetCameraWindow(inst_.render->re, camera_eval, data.winmat.ptr());
if (overscan_ != 0.0f) {
RE_GetCameraWindowWithOverscan(inst_.render->re, overscan_, data.winmat.ptr());
}
RE_GetCameraModelMatrix(re, camera_eval, data.viewinv.ptr());
data.viewmat = math::invert(data.viewinv);
rctf viewplane = re->viewplane;
BKE_camera_params_crop_viewplane(&viewplane, UNPACK2(display_extent), &film_rect);
RE_GetWindowMatrixWithOverscan(this->is_orthographic(),
re->clip_start,
re->clip_end,
viewplane,
overscan_,
data.winmat.ptr());
}
else {
data.viewmat = float4x4::identity();

View File

@@ -212,6 +212,8 @@ static eViewLayerEEVEEPassType enabled_passes(const ViewLayer *view_layer)
void Film::init(const int2 &extent, const rcti *output_rect)
{
using namespace math;
Sampling &sampling = inst_.sampling;
Scene &scene = *inst_.scene;
@@ -246,12 +248,7 @@ void Film::init(const int2 &extent, const rcti *output_rect)
{
data_.scaling_factor = 1;
if (inst_.is_viewport()) {
if (!bool(enabled_passes_ &
(EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET | EEVEE_RENDER_PASS_CRYPTOMATTE_MATERIAL |
EEVEE_RENDER_PASS_CRYPTOMATTE_OBJECT | EEVEE_RENDER_PASS_NORMAL)))
{
data_.scaling_factor = BKE_render_preview_pixel_size(&inst_.scene->r);
}
data_.scaling_factor = BKE_render_preview_pixel_size(&inst_.scene->r);
}
/* Sharpen the LODs (1.5x) to avoid TAA filtering causing over-blur (see #122941). */
data_.texture_lod_bias = 1.0f / (data_.scaling_factor * 1.5f);
@@ -268,13 +265,9 @@ void Film::init(const int2 &extent, const rcti *output_rect)
data_.extent = int2(BLI_rcti_size_x(output_rect), BLI_rcti_size_y(output_rect));
data_.offset = int2(output_rect->xmin, output_rect->ymin);
data_.extent_inv = 1.0f / float2(data_.extent);
data_.render_extent = math::divide_ceil(extent, int2(data_.scaling_factor));
data_.overscan = 0;
if (inst_.camera.overscan() != 0.0f) {
data_.overscan = inst_.camera.overscan() * math::max(UNPACK2(data_.render_extent));
data_.render_extent += data_.overscan * 2;
}
data_.render_extent = divide_ceil(data_.extent, int2(data_.scaling_factor));
data_.overscan = overscan_pixels_get(inst_.camera.overscan(), data_.render_extent);
data_.render_extent += data_.overscan * 2;
/* Disable filtering if sample count is 1. */
data_.filter_radius = (sampling.sample_count() == 1) ? 0.0f :
@@ -562,9 +555,25 @@ float2 Film::pixel_jitter_get() const
/* Jitter the size of a whole pixel. [-0.5..0.5] */
jitter -= 0.5f;
}
/* TODO(fclem): Mixed-resolution rendering: We need to offset to each of the target pixel covered
* by a render pixel, ideally, by choosing one randomly using another sampling dimension, or by
* repeating the same sample RNG sequence for each pixel offset. */
if (data_.scaling_factor > 1) {
/* In this case, the jitter sequence is the same for the number of film pixel a render pixel
* covers. This allows to add a manual offset to the different film pixels to ensure they get
* appropriate coverage instead of waiting that random sampling covers all the area. This
* ensures a much faster convergence. */
const int scale = data_.scaling_factor;
const int render_pixel_per_final_pixel = square_i(scale);
/* TODO(fclem): Random in Z-order curve. */
/* Works great for the scaling factor we have. */
int prime = (render_pixel_per_final_pixel / 2) - 1;
/* For now just randomize in scan-lines using a prime number. */
uint64_t index = (inst_.sampling.sample_index() * prime) % render_pixel_per_final_pixel;
int2 pixel_co = int2(index % scale, index / scale);
/* The jitter is applied on render target pixels. Make it proportional to film pixel. */
jitter /= float(scale);
/* Offset from the render pixel center to the center of film pixel. */
jitter += ((float2(pixel_co) + 0.5f) / scale) - 0.5f;
}
return jitter;
}
@@ -602,13 +611,29 @@ int Film::cryptomatte_layer_max_get() const
void Film::update_sample_table()
{
/* Offset in render target pixels. */
data_.subpixel_offset = pixel_jitter_get();
int filter_radius_ceil = ceilf(data_.filter_radius);
float filter_radius_sqr = square_f(data_.filter_radius);
data_.samples_len = 0;
if (use_box_filter || data_.filter_radius < 0.01f) {
if (data_.scaling_factor > 1) {
/* For this case there might be no valid samples for some pixels.
* Still visit all four neighbors to have the best weight available.
* Note that weight is computed on the GPU as it is different for each sample. */
/* TODO(fclem): Make it work for filters larger than then scaling_factor. */
for (int y = 0; y <= 1; y++) {
for (int x = 0; x <= 1; x++) {
FilmSample &sample = data_.samples[data_.samples_len];
sample.texel = int2(x, y);
sample.weight = -1.0f; /* Computed on GPU. */
data_.samples_len++;
}
}
data_.samples_weight_total = -1.0f; /* Computed on GPU. */
}
else if (use_box_filter || data_.filter_radius < 0.01f) {
/* Disable gather filtering. */
data_.samples[0].texel = int2(0, 0);
data_.samples[0].weight = 1.0f;

View File

@@ -28,6 +28,8 @@
#pragma once
#include "BLI_math_vector.hh"
#include "DRW_render.hh"
#include "eevee_shader_shared.hh"
@@ -110,18 +112,40 @@ class Film {
float *read_pass(eViewLayerEEVEEPassType pass_type, int layer_offset);
float *read_aov(ViewLayerAOV *aov);
/** Returns shading views internal resolution. */
/** Returns shading views internal resolution. Includes overscan pixels. */
int2 render_extent_get() const
{
return data_.render_extent;
}
/** Returns final output resolution. */
/** Size and offset of the film (taking into account render region). */
int2 film_extent_get() const
{
return data_.extent;
}
int2 film_offset_get() const
{
return data_.offset;
}
/** Size of the whole viewport or the render, disregarding the render region. */
int2 display_extent_get() const
{
return display_extent;
}
/** Number of padding pixels around the render target. Included inside `render_extent_get`. */
int render_overscan_get() const
{
return data_.overscan;
}
/** Returns number of overscan pixels for the given parameters. */
static int overscan_pixels_get(float overscan, int2 extent)
{
return math::ceil(max_ff(0.0f, overscan) * math::reduce_max(extent));
}
int scaling_factor_get() const
{
return data_.scaling_factor;

View File

@@ -37,6 +37,15 @@ void Sampling::init(const Scene *scene)
sample_count_ = infinite_sample_count_;
}
if (inst_.is_viewport()) {
/* We can't rely on the film module as it is initialized later. */
int pixel_size = BKE_render_preview_pixel_size(&inst_.scene->r);
if (pixel_size > 1) {
/* Enforce to render at least all the film pixel once. */
sample_count_ = max_ii(sample_count_, square_i(pixel_size));
}
}
motion_blur_steps_ = !inst_.is_viewport() ? scene->eevee.motion_blur_steps : 1;
sample_count_ = divide_ceil_u(sample_count_, motion_blur_steps_);
@@ -113,7 +122,8 @@ void Sampling::end_sync()
void Sampling::step()
{
{
uint64_t sample_filter = sample_;
/* Repeat the sequence for all pixels that are being up-scaled. */
uint64_t sample_filter = sample_ / square_i(inst_.film.scaling_factor_get());
if (interactive_mode()) {
sample_filter = sample_filter % interactive_sample_aa_;
}

View File

@@ -395,7 +395,7 @@ struct FilmData {
int2 render_extent;
/**
* Sub-pixel offset applied to the window matrix.
* NOTE: In final film pixel unit.
* NOTE: In render target pixel unit.
* NOTE: Positive values makes the view translate in the negative axes direction.
* NOTE: The origin is the center of the lower left film pixel of the area covered by a render
* pixel if using scaled resolution rendering.

View File

@@ -204,36 +204,63 @@ GPUTexture *ShadingView::render_postfx(GPUTexture *input_tx)
void ShadingView::update_view()
{
const Film &film = inst_.film;
float4x4 viewmat = main_view_.viewmat();
float4x4 winmat = main_view_.winmat();
/*
* Mixed resolution rendering. The center of the display pixels must align with the center of
* the render pixels. If they don't align, the winmat needs to be re-projected.
*/
int2 scaling_factor = int2(inst_.film.scaling_factor_get());
int2 display_extent = inst_.film.display_extent_get();
int overscan = inst_.film.get_data().overscan;
int2 rescaled_render_extent = (extent_ - 2 * overscan) * scaling_factor;
if (film.scaling_factor_get() > 1) {
/* This whole section ensures that the render target pixel grid will match the film pixel pixel
* grid. Otherwise the weight computation inside the film accumulation will be wrong. */
if (rescaled_render_extent != display_extent) {
float left;
float right;
float bottom;
float top;
float near;
float far;
const bool is_perspective = main_view_.is_persp();
float left, right, bottom, top, near, far;
projmat_dimensions(winmat.ptr(), &left, &right, &bottom, &top, &near, &far);
float2 scale = (float2(rescaled_render_extent) / float2(display_extent));
right = left + ((right - left) * scale.x);
top = bottom + ((top - bottom) * scale.y);
const float2 bottom_left_with_overscan = float2(left, bottom);
const float2 top_right_with_overscan = float2(right, top);
const float2 render_size_with_overscan = top_right_with_overscan - bottom_left_with_overscan;
if (is_perspective) {
winmat = math::projection::perspective(left, right, bottom, top, near, far);
float2 bottom_left = bottom_left_with_overscan;
float2 top_right = top_right_with_overscan;
float2 render_size = render_size_with_overscan;
float overscan = inst_.camera.overscan();
if (overscan > 0.0f) {
/* Size of overscan on the screen. */
const float max_size_with_overscan = math::reduce_max(render_size);
const float max_size_original = max_size_with_overscan / (1.0f - 2.0f * overscan);
const float overscan_size = (max_size_with_overscan - max_size_original) / 2.0f;
/* Undo overscan to get the initial dimension of the screen. */
bottom_left = bottom_left_with_overscan + overscan_size;
top_right = top_right_with_overscan - overscan_size;
/* Render target size on the screen (without overscan). */
render_size = top_right - bottom_left;
}
/* Final pixel size on the screen. */
const float2 pixel_size = render_size / float2(film.film_extent_get());
/* Render extent in final film pixel unit. */
const int2 render_extent = film.render_extent_get() * film.scaling_factor_get();
const int overscan_pixels = film.render_overscan_get() * film.scaling_factor_get();
const float2 render_bottom_left = bottom_left - pixel_size * float(overscan_pixels);
const float2 render_top_right = render_bottom_left + pixel_size * float2(render_extent);
if (main_view_.is_persp()) {
winmat = math::projection::perspective(render_bottom_left.x,
render_top_right.x,
render_bottom_left.y,
render_top_right.y,
near,
far);
}
else {
winmat = math::projection::orthographic(left, right, bottom, top, near, far);
winmat = math::projection::orthographic(render_bottom_left.x,
render_top_right.x,
render_bottom_left.y,
render_top_right.y,
near,
far);
}
}

View File

@@ -55,28 +55,32 @@ FilmSample film_sample_get(int sample_n, ivec2 texel_film)
#else
FilmSample film_sample = uniform_buf.film.samples[sample_n];
film_sample.texel += (texel_film + uniform_buf.film.offset) / scaling_factor +
uniform_buf.film.overscan;
/* Use extend on borders. */
film_sample.texel = clamp(film_sample.texel, ivec2(0, 0), uniform_buf.film.render_extent - 1);
/* TODO(fclem): Panoramic projection will need to compute the sample weight in the shader
* instead of precomputing it on CPU. */
if (scaling_factor > 1) {
/* We need to compute the real distance and weight since a sample
* can be used by many final pixel. */
vec2 offset = (vec2(film_sample.texel - uniform_buf.film.overscan) + 0.5 -
uniform_buf.film.subpixel_offset) *
scaling_factor -
(vec2(texel_film + uniform_buf.film.offset) + 0.5);
film_sample.weight = film_filter_weight(uniform_buf.film.filter_radius,
length_squared(offset));
/* We are working in the render pixel region on the film. We use film pixel units. */
vec2 film_coord = 0.5 + vec2(texel_film % scaling_factor);
/* Sample position inside the render pixel region. */
vec2 jittered_sample_coord = (0.5 - uniform_buf.film.subpixel_offset) * float(scaling_factor);
/* Offset the film samples to always sample the 4 nearest neighbors in the render target.
* `film_sample.texel` is set to visit all 4 neighbors in [0..1] region. */
ivec2 quad_offset = -ivec2(lessThan(film_coord, jittered_sample_coord));
/* Select correct sample depending on which quadrant the film pixel lies. */
film_sample.texel += quad_offset;
jittered_sample_coord += vec2(film_sample.texel * scaling_factor);
float sample_dist_sqr = length_squared(jittered_sample_coord - film_coord);
film_sample.weight = film_filter_weight(uniform_buf.film.filter_radius, sample_dist_sqr);
/* Ensure a minimum weight for each sample to avoid missing data at 4x or 8x up-scaling. */
film_sample.weight = max(film_sample.weight, 1e-8);
}
film_sample.texel += (texel_film / scaling_factor) + uniform_buf.film.overscan;
#endif /* PANORAMIC */
/* Always return a weight above 0 to avoid blind spots between samples. */
film_sample.weight = max(film_sample.weight, 1e-6);
/* Use extend on borders. */
film_sample.texel = clamp(film_sample.texel, ivec2(0, 0), uniform_buf.film.render_extent - 1);
return film_sample;
}
@@ -201,7 +205,7 @@ float film_distance_load(ivec2 texel)
texel = texel % imageSize(in_weight_img).xy;
if (!uniform_buf.film.use_history || use_reprojection) {
return 1.0e16;
return 0.0;
}
return imageLoad(in_weight_img, ivec3(texel, FILM_WEIGHT_LAYER_DISTANCE)).x;
}
@@ -647,7 +651,8 @@ void film_process_data(ivec2 texel_film, out vec4 out_color, out float out_depth
/* Get sample closest to target texel. It is always sample 0. */
FilmSample film_sample = film_sample_get(0, texel_film);
if (use_reprojection || film_sample.weight < film_distance) {
/* Using film weight as distance to the pixel. So the check is inversed. */
if (film_sample.weight > film_distance) {
float depth = texelFetch(depth_tx, film_sample.texel, 0).x;
vec4 vector = velocity_resolve(vector_tx, film_sample.texel, depth);
/* Transform to pixel space, matching Cycles format. */