Realtime Compositor: Implement Defocus node

This patch implements the defocus node for the Realtime Compositor. The
implementation does not match the CPU compositor implementation, but
uses a new formulation that is more physically accurate and consistent
with Blender's render engines.

The existing CPU implementation is questionable starting from its circle
of confusion calculation, to the morphological operations applied on the
CoC radius, to ignoring the maximum CoC radius in the search kernel, and
ending with the threshold parameter used to reduce artifacts. Therefore,
it should be reimplemented along with this same implementation using a
more consistent methodology.

EEVEE and Workbench already have a GPU defocus method, which can be
ported to the compositor and used as the preview defocus algorithm.
While this implementation will be updated to be a more accurate method
that produces the same structure as the ported EEVEE implementation.

The new formulation ignores the threshold parameter for now, as well as
the preview parameter.

Pull Request: https://projects.blender.org/blender/blender/pulls/116391
This commit is contained in:
Omar Emara
2023-12-21 12:20:38 +01:00
committed by Omar Emara
parent da78dd47e3
commit e055db6605
10 changed files with 550 additions and 6 deletions

View File

@@ -64,6 +64,7 @@ set(SRC
COM_utilities.hh
algorithms/intern/jump_flooding.cc
algorithms/intern/morphological_blur.cc
algorithms/intern/morphological_distance.cc
algorithms/intern/morphological_distance_feather.cc
algorithms/intern/parallel_reduction.cc
@@ -75,6 +76,7 @@ set(SRC
algorithms/intern/transform.cc
algorithms/COM_algorithm_jump_flooding.hh
algorithms/COM_algorithm_morphological_blur.hh
algorithms/COM_algorithm_morphological_distance.hh
algorithms/COM_algorithm_morphological_distance_feather.hh
algorithms/COM_algorithm_parallel_reduction.hh
@@ -136,6 +138,9 @@ set(GLSL_SRC
shaders/compositor_cryptomatte_image.glsl
shaders/compositor_cryptomatte_matte.glsl
shaders/compositor_cryptomatte_pick.glsl
shaders/compositor_defocus_blur.glsl
shaders/compositor_defocus_radius_from_depth.glsl
shaders/compositor_defocus_radius_from_scale.glsl
shaders/compositor_despeckle.glsl
shaders/compositor_directional_blur.glsl
shaders/compositor_displace.glsl
@@ -173,6 +178,7 @@ set(GLSL_SRC
shaders/compositor_kuwahara_anisotropic_compute_structure_tensor.glsl
shaders/compositor_kuwahara_classic.glsl
shaders/compositor_map_uv.glsl
shaders/compositor_morphological_blur.glsl
shaders/compositor_morphological_distance.glsl
shaders/compositor_morphological_distance_feather.glsl
shaders/compositor_morphological_distance_threshold.glsl
@@ -275,6 +281,7 @@ set(SRC_SHADER_CREATE_INFOS
shaders/infos/compositor_compute_preview_info.hh
shaders/infos/compositor_convert_info.hh
shaders/infos/compositor_cryptomatte_info.hh
shaders/infos/compositor_defocus_info.hh
shaders/infos/compositor_despeckle_info.hh
shaders/infos/compositor_directional_blur_info.hh
shaders/infos/compositor_displace_info.hh
@@ -292,6 +299,7 @@ set(SRC_SHADER_CREATE_INFOS
shaders/infos/compositor_keying_screen_info.hh
shaders/infos/compositor_kuwahara_info.hh
shaders/infos/compositor_map_uv_info.hh
shaders/infos/compositor_morphological_blur_info.hh
shaders/infos/compositor_morphological_distance_feather_info.hh
shaders/infos/compositor_morphological_distance_info.hh
shaders/infos/compositor_morphological_distance_threshold_info.hh

View File

@@ -0,0 +1,40 @@
/* SPDX-FileCopyrightText: 2023 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
#include <cstdint>
#include "BLI_math_vector_types.hh"
#include "DNA_scene_types.h"
#include "COM_context.hh"
#include "COM_result.hh"
namespace blender::realtime_compositor {
/* Possible morphological operations to apply. */
enum class MorphologicalBlurOperation : uint8_t {
/* Dilate by taking the maximum from the original input and the blurred input. Which means the
* whites bleeds into the blacks while the blacks don't bleed into the whites. */
Dilate,
/* Erode by taking the minimum from the original input and the blurred input. Which means the
* blacks bleeds into the whites while the whites don't bleed into the blacks. */
Erode,
};
/* Applies a morphological blur on input using the given radius and filter type. This essentially
* applies a standard blur operation, but then takes the maximum or minimum from the original input
* and blurred input depending on the chosen operation, see the MorphologicalBlurOperation enum for
* more information. The output is written to the given output result, which will be allocated
* internally and is thus expected not to be previously allocated. */
void morphological_blur(Context &context,
Result &input,
Result &output,
float2 radius,
MorphologicalBlurOperation operation = MorphologicalBlurOperation::Erode,
int filter_type = R_FILTER_GAUSS);
} // namespace blender::realtime_compositor

View File

@@ -0,0 +1,69 @@
/* SPDX-FileCopyrightText: 2023 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#include "BLI_assert.h"
#include "BLI_math_vector_types.hh"
#include "GPU_shader.h"
#include "COM_context.hh"
#include "COM_result.hh"
#include "COM_utilities.hh"
#include "COM_algorithm_morphological_blur.hh"
#include "COM_algorithm_symmetric_separable_blur.hh"
namespace blender::realtime_compositor {
static const char *get_shader(MorphologicalBlurOperation operation)
{
switch (operation) {
case MorphologicalBlurOperation::Dilate:
return "compositor_morphological_blur_dilate";
case MorphologicalBlurOperation::Erode:
return "compositor_morphological_blur_erode";
default:
break;
}
BLI_assert_unreachable();
return nullptr;
}
/* Apply the morphological operator (minimum or maximum) on the input and the blurred input. The
* output is written to the blurred_input in-place. */
static void apply_morphological_operator(Context &context,
Result &input,
Result &blurred_input,
MorphologicalBlurOperation operation)
{
GPUShader *shader = context.get_shader(get_shader(operation));
GPU_shader_bind(shader);
input.bind_as_texture(shader, "input_tx");
blurred_input.bind_as_image(shader, "blurred_input_img", true);
Domain domain = input.domain();
compute_dispatch_threads_at_least(shader, domain.size);
GPU_shader_unbind();
input.unbind_as_texture();
blurred_input.unbind_as_image();
}
void morphological_blur(Context &context,
Result &input,
Result &output,
float2 radius,
MorphologicalBlurOperation operation,
int filter_type)
{
BLI_assert(input.type() == ResultType::Float);
symmetric_separable_blur(context, input, output, radius, filter_type);
apply_morphological_operator(context, input, output, operation);
}
} // namespace blender::realtime_compositor

View File

@@ -0,0 +1,74 @@
/* SPDX-FileCopyrightText: 2023 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma BLENDER_REQUIRE(gpu_shader_common_math_utils.glsl)
#pragma BLENDER_REQUIRE(gpu_shader_compositor_blur_common.glsl)
#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl)
/* Given the texel in the range [-radius, radius] in both axis, load the appropriate weight from
* the weights texture, where the given texel (0, 0) corresponds the center of weights texture.
* Note that we load the weights texture inverted along both directions to maintain the shape of
* the weights if it was not symmetrical. To understand why inversion makes sense, consider a 1D
* weights texture whose right half is all ones and whose left half is all zeros. Further, consider
* that we are blurring a single white pixel on a black background. When computing the value of a
* pixel that is to the right of the white pixel, the white pixel will be in the left region of the
* search window, and consequently, without inversion, a zero will be sampled from the left side of
* the weights texture and result will be zero. However, what we expect is that pixels to the right
* of the white pixel will be white, that is, they should sample a weight of 1 from the right side
* of the weights texture, hence the need for inversion. */
vec4 load_weight(ivec2 texel, float radius)
{
/* Add the radius to transform the texel into the range [0, radius * 2], with an additional 0.5
* to sample at the center of the pixels, then divide by the upper bound plus one to transform
* the texel into the normalized range [0, 1] needed to sample the weights sampler. Finally,
* invert the textures coordinates by subtracting from 1 to maintain the shape of the weights as
* mentioned in the function description. */
return texture(weights_tx, 1.0 - ((vec2(texel) + vec2(radius + 0.5)) / (radius * 2.0 + 1.0)));
}
void main()
{
ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
float center_radius = texture_load(radius_tx, texel).x;
vec4 center_color = texture_load(input_tx, texel);
/* Go over the window of the given search radius and accumulate the colors multiplied by their
* respective weights as well as the weights themselves, but only if both the radius of the
* center pixel and the radius of the candidate pixel are less than both the x and y distances of
* the candidate pixel. */
vec4 accumulated_color = vec4(0.0);
vec4 accumulated_weight = vec4(0.0);
for (int y = -search_radius; y <= search_radius; y++) {
for (int x = -search_radius; x <= search_radius; x++) {
float candidate_radius = texture_load(radius_tx, texel + ivec2(x, y)).x;
/* Skip accumulation if either the x or y distances of the candidate pixel are larger than
* either the center or candidate pixel radius. Note that the max and min functions here
* denote "either" in the aforementioned description. */
float radius = min(center_radius, candidate_radius);
if (max(abs(x), abs(y)) > radius) {
continue;
}
vec4 weight = load_weight(ivec2(x, y), radius);
vec4 input_color = texture_load(input_tx, texel + ivec2(x, y));
if (gamma_correct) {
input_color = gamma_correct_blur_input(input_color);
}
accumulated_color += input_color * weight;
accumulated_weight += weight;
}
}
accumulated_color = safe_divide(accumulated_color, accumulated_weight);
if (gamma_correct) {
accumulated_color = gamma_uncorrect_blur_output(accumulated_color);
}
imageStore(output_img, texel, accumulated_color);
}

View File

@@ -0,0 +1,31 @@
/* SPDX-FileCopyrightText: 2022 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/* Given a depth texture, compute the radius of the circle of confusion in pixels based on equation
* (8) of the paper:
*
* Potmesil, Michael, and Indranil Chakravarty. "A lens and aperture camera model for synthetic
* image generation." ACM SIGGRAPH Computer Graphics 15.3 (1981): 297-305. */
#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl)
void main()
{
ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
float depth = texture_load(depth_tx, texel).x;
/* Compute Vu in equation (7). */
const float distance_to_image_of_object = (focal_length * depth) / (depth - focal_length);
/* Compute C in equation (8). Notice that the last multiplier was included in the absolute since
* it is negative when the object distance is less than the focal length, as noted in equation
* (7). */
float diameter = abs((distance_to_image_of_object - distance_to_image_of_focus) *
(focal_length / (f_stop * distance_to_image_of_object)));
/* The diameter is in meters, so multiply by the pixels per meter. */
float radius = (diameter / 2.0) * pixels_per_meter;
imageStore(radius_img, texel, vec4(min(max_radius, radius)));
}

View File

@@ -0,0 +1,12 @@
/* SPDX-FileCopyrightText: 2022 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl)
void main()
{
ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
float radius = texture_load(radius_tx, texel).x;
imageStore(radius_img, texel, vec4(min(max_radius, radius * scale)));
}

View File

@@ -0,0 +1,15 @@
/* SPDX-FileCopyrightText: 2023 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl)
void main()
{
ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
float input_value = texture_load(input_tx, texel).x;
float blurred_value = imageLoad(blurred_input_img, texel).x;
imageStore(blurred_input_img, texel, vec4(OPERATOR(input_value, blurred_value)));
}

View File

@@ -0,0 +1,37 @@
/* SPDX-FileCopyrightText: 2023 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#include "gpu_shader_create_info.hh"
GPU_SHADER_CREATE_INFO(compositor_defocus_radius_from_scale)
.local_group_size(16, 16)
.push_constant(Type::FLOAT, "scale")
.push_constant(Type::FLOAT, "max_radius")
.sampler(0, ImageType::FLOAT_2D, "radius_tx")
.image(0, GPU_R16F, Qualifier::WRITE, ImageType::FLOAT_2D, "radius_img")
.compute_source("compositor_defocus_radius_from_scale.glsl")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(compositor_defocus_radius_from_depth)
.local_group_size(16, 16)
.push_constant(Type::FLOAT, "f_stop")
.push_constant(Type::FLOAT, "max_radius")
.push_constant(Type::FLOAT, "focal_length")
.push_constant(Type::FLOAT, "pixels_per_meter")
.push_constant(Type::FLOAT, "distance_to_image_of_focus")
.sampler(0, ImageType::FLOAT_2D, "depth_tx")
.image(0, GPU_R16F, Qualifier::WRITE, ImageType::FLOAT_2D, "radius_img")
.compute_source("compositor_defocus_radius_from_depth.glsl")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(compositor_defocus_blur)
.local_group_size(16, 16)
.push_constant(Type::BOOL, "gamma_correct")
.push_constant(Type::INT, "search_radius")
.sampler(0, ImageType::FLOAT_2D, "input_tx")
.sampler(1, ImageType::FLOAT_2D, "weights_tx")
.sampler(2, ImageType::FLOAT_2D, "radius_tx")
.image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img")
.compute_source("compositor_defocus_blur.glsl")
.do_static_compilation(true);

View File

@@ -0,0 +1,21 @@
/* SPDX-FileCopyrightText: 2023 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#include "gpu_shader_create_info.hh"
GPU_SHADER_CREATE_INFO(compositor_morphological_blur_shared)
.local_group_size(16, 16)
.sampler(0, ImageType::FLOAT_2D, "input_tx")
.image(0, GPU_R16F, Qualifier::READ_WRITE, ImageType::FLOAT_2D, "blurred_input_img")
.compute_source("compositor_morphological_blur.glsl");
GPU_SHADER_CREATE_INFO(compositor_morphological_blur_dilate)
.additional_info("compositor_morphological_blur_shared")
.define("OPERATOR(x, y)", "max(x, y)")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(compositor_morphological_blur_erode)
.additional_info("compositor_morphological_blur_shared")
.define("OPERATOR(x, y)", "min(x, y)")
.do_static_compilation(true);

View File

@@ -8,12 +8,21 @@
#include <climits>
#include "DNA_camera_types.h"
#include "DNA_object_types.h"
#include "DNA_scene_types.h"
#include "BKE_camera.h"
#include "RNA_access.hh"
#include "UI_interface.hh"
#include "UI_resources.hh"
#include "COM_algorithm_morphological_blur.hh"
#include "COM_bokeh_kernel.hh"
#include "COM_node_operation.hh"
#include "COM_utilities.hh"
#include "node_composite_util.hh"
@@ -21,10 +30,15 @@
namespace blender::nodes::node_composite_defocus_cc {
NODE_STORAGE_FUNCS(NodeDefocus)
static void cmp_node_defocus_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Color>("Image").default_value({1.0f, 1.0f, 1.0f, 1.0f});
b.add_input<decl::Float>("Z").default_value(1.0f).min(0.0f).max(1.0f);
b.add_input<decl::Color>("Image")
.default_value({1.0f, 1.0f, 1.0f, 1.0f})
.compositor_domain_priority(0);
b.add_input<decl::Float>("Z").default_value(1.0f).min(0.0f).max(1.0f).compositor_domain_priority(
1);
b.add_output<decl::Color>("Image");
}
@@ -92,8 +106,233 @@ class DefocusOperation : public NodeOperation {
void execute() override
{
get_input("Image").pass_through(get_result("Image"));
context().set_info_message("Viewport compositor setup not fully supported");
Result &input = get_input("Image");
Result &output = get_result("Image");
if (input.is_single_value()) {
input.pass_through(output);
return;
}
Result radius = compute_defocus_radius();
const int maximum_defocus_radius = compute_maximum_defocus_radius();
/* The special zero value indicate a circle, in which case, the roundness should be set to
* 1, and the number of sides can be anything and is arbitrarily set to 3. */
const bool is_circle = node_storage(bnode()).bktype == 0;
const int2 kernel_size = int2(maximum_defocus_radius * 2);
const int sides = is_circle ? 3 : node_storage(bnode()).bktype;
const float rotation = node_storage(bnode()).rotation;
const float roundness = is_circle ? 1.0f : 0.0f;
const BokehKernel &bokeh_kernel = context().cache_manager().bokeh_kernels.get(
context(), kernel_size, sides, rotation, roundness, 0.0f, 0.0f);
GPUShader *shader = context().get_shader("compositor_defocus_blur");
GPU_shader_bind(shader);
GPU_shader_uniform_1b(shader, "gamma_correct", node_storage(bnode()).gamco);
GPU_shader_uniform_1i(shader, "search_radius", maximum_defocus_radius);
input.bind_as_texture(shader, "input_tx");
radius.bind_as_texture(shader, "radius_tx");
bokeh_kernel.bind_as_texture(shader, "weights_tx");
GPU_texture_filter_mode(bokeh_kernel.texture(), true);
const Domain domain = compute_domain();
output.allocate_texture(domain);
output.bind_as_image(shader, "output_img");
compute_dispatch_threads_at_least(shader, domain.size);
GPU_shader_unbind();
input.unbind_as_texture();
radius.unbind_as_texture();
bokeh_kernel.unbind_as_texture();
output.unbind_as_image();
radius.release();
}
Result compute_defocus_radius()
{
if (node_storage(bnode()).no_zbuf) {
return compute_defocus_radius_from_scale();
}
else {
return compute_defocus_radius_from_depth();
}
}
Result compute_defocus_radius_from_scale()
{
GPUShader *shader = context().get_shader("compositor_defocus_radius_from_scale");
GPU_shader_bind(shader);
GPU_shader_uniform_1f(shader, "scale", node_storage(bnode()).scale);
GPU_shader_uniform_1f(shader, "max_radius", node_storage(bnode()).maxblur);
Result &input_radius = get_input("Z");
input_radius.bind_as_texture(shader, "radius_tx");
Result output_radius = context().create_temporary_result(ResultType::Float);
const Domain domain = input_radius.domain();
output_radius.allocate_texture(domain);
output_radius.bind_as_image(shader, "radius_img");
compute_dispatch_threads_at_least(shader, domain.size);
GPU_shader_unbind();
input_radius.unbind_as_texture();
output_radius.unbind_as_image();
return output_radius;
}
Result compute_defocus_radius_from_depth()
{
GPUShader *shader = context().get_shader("compositor_defocus_radius_from_depth");
GPU_shader_bind(shader);
const float distance_to_image_of_focus = compute_distance_to_image_of_focus();
GPU_shader_uniform_1f(shader, "f_stop", get_f_stop());
GPU_shader_uniform_1f(shader, "focal_length", get_focal_length());
GPU_shader_uniform_1f(shader, "max_radius", node_storage(bnode()).maxblur);
GPU_shader_uniform_1f(shader, "pixels_per_meter", compute_pixels_per_meter());
GPU_shader_uniform_1f(shader, "distance_to_image_of_focus", distance_to_image_of_focus);
Result &input_depth = get_input("Z");
input_depth.bind_as_texture(shader, "depth_tx");
Result output_radius = context().create_temporary_result(ResultType::Float);
const Domain domain = input_depth.domain();
output_radius.allocate_texture(domain);
output_radius.bind_as_image(shader, "radius_img");
compute_dispatch_threads_at_least(shader, domain.size);
GPU_shader_unbind();
input_depth.unbind_as_texture();
output_radius.unbind_as_image();
/* We apply a dilate morphological operator on the radius computed from depth, the operator
* radius is the maximum possible defocus radius. This is done such that objects in
* focus---that is, objects whose defocus radius is small---are not affected by nearby out of
* focus objects, hence the use of dilation. */
const float morphological_radius = compute_maximum_defocus_radius();
Result eroded_radius = context().create_temporary_result(ResultType::Float);
morphological_blur(context(), output_radius, eroded_radius, float2(morphological_radius));
output_radius.release();
return eroded_radius;
}
/* Computes the maximum possible defocus radius in pixels. */
float compute_maximum_defocus_radius()
{
const float maximum_diameter = compute_maximum_diameter_of_circle_of_confusion();
const float pixels_per_meter = compute_pixels_per_meter();
const float radius = (maximum_diameter / 2.0f) * pixels_per_meter;
return math::min(radius, node_storage(bnode()).maxblur);
}
/* Computes the diameter of the circle of confusion at infinity. This computes the limit in
* figure (5) of the paper:
*
* Potmesil, Michael, and Indranil Chakravarty. "A lens and aperture camera model for synthetic
* image generation." ACM SIGGRAPH Computer Graphics 15.3 (1981): 297-305.
*
* Notice that the diameter is asymmetric around the focus point, and we are computing the
* limiting diameter at infinity, while another limiting diameter exist at zero distance from the
* lens. This is a limitation of the implementation, as it assumes far defocusing only. */
float compute_maximum_diameter_of_circle_of_confusion()
{
const float f_stop = get_f_stop();
const float focal_length = get_focal_length();
const float distance_to_image_of_focus = compute_distance_to_image_of_focus();
return math::abs((distance_to_image_of_focus / (f_stop * focal_length)) -
(focal_length / f_stop));
}
/* Computes the distance in meters to the image of the focus point across a lens of the specified
* focal length. This computes Vp in equation (7) of the paper:
*
* Potmesil, Michael, and Indranil Chakravarty. "A lens and aperture camera model for synthetic
* image generation." ACM SIGGRAPH Computer Graphics 15.3 (1981): 297-305. */
float compute_distance_to_image_of_focus()
{
const float focal_length = get_focal_length();
const float focus_distance = compute_focus_distance();
return (focal_length * focus_distance) / (focus_distance - focal_length);
}
/* Returns the focal length in meters. Fallback to 50 mm in case of an invalid camera. Ensure a
* minimum of 1e-6. */
float get_focal_length()
{
const Camera *camera = get_camera();
return camera ? math::max(1e-6f, camera->lens / 1000.0f) : 50.0f / 1000.0f;
}
/* Computes the distance to the point that is completely in focus. */
float compute_focus_distance()
{
return BKE_camera_object_dof_distance(get_camera_object());
}
/* Computes the number of pixels per meter of the sensor size. This is essentially the resolution
* over the sensor size, using the sensor fit axis. Fallback to DEFAULT_SENSOR_WIDTH in case of
* an invalid camera. Note that the stored sensor size is in millimeter, so convert to meters. */
float compute_pixels_per_meter()
{
const int2 size = compute_domain().size;
const Camera *camera = get_camera();
const float default_value = size.x / (DEFAULT_SENSOR_WIDTH / 1000.0f);
if (!camera) {
return default_value;
}
switch (camera->sensor_fit) {
case CAMERA_SENSOR_FIT_HOR:
return size.x / (camera->sensor_x / 1000.0f);
case CAMERA_SENSOR_FIT_VERT:
return size.y / (camera->sensor_y / 1000.0f);
case CAMERA_SENSOR_FIT_AUTO: {
return size.x > size.y ? size.x / (camera->sensor_x / 1000.0f) :
size.y / (camera->sensor_y / 1000.0f);
}
default:
break;
}
return default_value;
}
/* Returns the f-stop number. Fallback to 1e-3 for zero f-stop. */
const float get_f_stop()
{
return math::max(1e-3f, node_storage(bnode()).fstop);
}
const Camera *get_camera()
{
const Object *camera_object = get_camera_object();
if (!camera_object || camera_object->type != OB_CAMERA) {
return nullptr;
}
return reinterpret_cast<Camera *>(camera_object->data);
}
const Object *get_camera_object()
{
return get_scene()->camera;
}
const Scene *get_scene()
{
return bnode().id ? reinterpret_cast<Scene *>(bnode().id) : &context().get_scene();
}
};
@@ -116,8 +355,6 @@ void register_node_type_cmp_defocus()
ntype.initfunc = file_ns::node_composit_init_defocus;
node_type_storage(&ntype, "NodeDefocus", node_free_standard_storage, node_copy_standard_storage);
ntype.get_compositor_operation = file_ns::get_compositor_operation;
ntype.realtime_compositor_unsupported_message = N_(
"Node not supported in the Viewport compositor");
nodeRegisterType(&ntype);
}