VSE: Execute modifiers in strip-local space (#145688)

Currently when a strip has a transform that does not fill the whole
render area, first the image of the strip is transformed, and then
any modifiers are applied on that. This is mostly in the new
Compositor modifier, where procedural textures, gradients, image
coordinates "stick to the screen" instead of following the transformed
strip.

This changes the behavior so that first the modifiers are applied
to the strip image, and then the strip is transformed. This is
potentially a visually breaking change:
- This can alter visual look of existing strip, especially if they are
  scaled. Previous behavior was first scale filtering, then modifier;
  now it is first modifier, then scale filtering.
- Most obvious change is Compositor modifier (which is new in 5.0).
- Compositor modifier can actually expand the input image (e.g. Blur
  node with "expand bounds" option set), and that works.
- Note that Masks continue to be applied in global/screen space. There
  can be small look differences with rotated/scaled strips that use
  masks, due to Mask application now needing to do filtered mask image
  lookups.
- If anyone needs previous behavior (modifier is applied on the
  "whole screen"), they can put transformed strip into a meta strip,
  and apply the modifier on the meta strip itself.

Compositor modifier examples with images in the PR.

Pull Request: https://projects.blender.org/blender/blender/pulls/146181
This commit is contained in:
Aras Pranckevicius
2025-10-07 13:51:41 +02:00
committed by Aras Pranckevicius
parent f145e1f7e2
commit 8a74f7c0b0
33 changed files with 798 additions and 532 deletions

View File

@@ -66,7 +66,7 @@ class Context {
virtual Bounds<int2> get_compositing_region() const = 0;
/* Get the result where the result of the compositor should be written. */
virtual Result get_output() = 0;
virtual Result get_output(Domain domain) = 0;
/* Get the result where the result of the compositor viewer should be written, given the domain
* of the result to be viewed, its precision, and whether the output is a non-color data image
@@ -102,6 +102,12 @@ class Context {
* or support for viewers. */
virtual bool treat_viewer_as_compositor_output() const;
/* True if the compositor input/output should use output region/bounds setup in the context. */
virtual bool use_context_bounds_for_input_output() const
{
return true;
}
/* Populates the given meta data from the render stamp information of the given render pass. */
virtual void populate_meta_data_for_pass(const Scene *scene,
int view_layer_id,

View File

@@ -110,7 +110,7 @@ class Context : public compositor::Context {
.value_or(Bounds<int2>(int2(0)));
}
compositor::Result get_output() override
compositor::Result get_output(compositor::Domain /*domain*/) override
{
compositor::Result result = this->create_result(compositor::ResultType::Color,
compositor::ResultPrecision::Half);

View File

@@ -67,6 +67,7 @@ class GroupInputOperation : public NodeOperation {
else {
this->execute_pass_cpu(pass, result);
}
result.set_transformation(pass.domain().transformation);
}
void execute_pass_gpu(const Result &pass, Result &result)
@@ -125,9 +126,12 @@ class GroupInputOperation : public NodeOperation {
* compositing region into an appropriately sized result. */
const int2 lower_bound = this->context().get_compositing_region().min;
result.allocate_texture(Domain(this->context().get_compositing_region_size()));
const int2 size = this->context().use_context_bounds_for_input_output() ?
this->context().get_compositing_region_size() :
pass.domain().size;
result.allocate_texture(size);
parallel_for(result.domain().size, [&](const int2 texel) {
parallel_for(size, [&](const int2 texel) {
result.store_pixel_generic_type(texel, pass.load_pixel_generic_type(texel + lower_bound));
});
}

View File

@@ -68,7 +68,7 @@ class GroupOutputOperation : public NodeOperation {
float4 color = image.get_single_value<float4>();
const Domain domain = this->compute_domain();
Result output = this->context().get_output();
Result output = this->context().get_output(domain);
if (this->context().use_gpu()) {
GPU_texture_clear(output, GPU_DATA_FLOAT, color);
}
@@ -90,7 +90,7 @@ class GroupOutputOperation : public NodeOperation {
void execute_copy_gpu(const Result &image)
{
const Domain domain = this->compute_domain();
Result output = this->context().get_output();
Result output = this->context().get_output(domain);
gpu::Shader *shader = this->context().get_shader("compositor_write_output",
output.precision());
@@ -114,9 +114,11 @@ class GroupOutputOperation : public NodeOperation {
void execute_copy_cpu(const Result &image)
{
const Domain domain = this->compute_domain();
Result output = this->context().get_output();
Result output = this->context().get_output(domain);
const Bounds<int2> bounds = this->context().get_compositing_region();
const Bounds<int2> bounds = this->context().use_context_bounds_for_input_output() ?
this->context().get_compositing_region() :
Bounds<int2>(int2(0, 0), domain.size);
parallel_for(domain.size, [&](const int2 texel) {
const int2 output_texel = texel + bounds.min;
if (output_texel.x > bounds.max.x || output_texel.y > bounds.max.y) {
@@ -130,7 +132,10 @@ class GroupOutputOperation : public NodeOperation {
* applied. */
Domain compute_domain() override
{
return Domain(this->context().get_compositing_region_size());
if (this->context().use_context_bounds_for_input_output()) {
return Domain(this->context().get_compositing_region_size());
}
return NodeOperation::compute_domain();
}
};

View File

@@ -144,7 +144,9 @@ class ViewerOperation : public NodeOperation {
{
/* Viewers are treated as composite outputs that should be in the bounds of the compositing
* region. */
if (this->context().treat_viewer_as_compositor_output()) {
if (this->context().treat_viewer_as_compositor_output() &&
this->context().use_context_bounds_for_input_output())
{
return this->context().get_compositing_region();
}
@@ -156,7 +158,9 @@ class ViewerOperation : public NodeOperation {
{
/* Viewers are treated as composite outputs that should be in the domain of the compositing
* region. */
if (context().treat_viewer_as_compositor_output()) {
if (this->context().treat_viewer_as_compositor_output() &&
this->context().use_context_bounds_for_input_output())
{
return Domain(context().get_compositing_region_size());
}

View File

@@ -170,7 +170,7 @@ class Context : public compositor::Context {
return Bounds<int2>(int2(0), this->get_render_size());
}
compositor::Result get_output() override
compositor::Result get_output(compositor::Domain /*domain*/) override
{
const int2 render_size = get_render_size();
if (output_result_.is_allocated()) {

View File

@@ -55,10 +55,10 @@ struct StripModifierTypeInfo {
/* copy data from one modifier to another */
void (*copy_data)(StripModifierData *smd, StripModifierData *target);
/* Apply modifier on an image buffer.
* quad contains four corners of the (pre-transform) strip rectangle in pixel space. */
/* Apply modifier on an image buffer. */
void (*apply)(const RenderData *render_data,
const StripScreenQuad &quad,
const Strip *strip,
const float transform[3][3],
StripModifierData *smd,
ImBuf *ibuf,
ImBuf *mask);
@@ -82,11 +82,6 @@ void modifier_clear(Strip *strip);
void modifier_free(StripModifierData *smd);
void modifier_unique_name(Strip *strip, StripModifierData *smd);
StripModifierData *modifier_find_by_name(Strip *strip, const char *name);
void modifier_apply_stack(const RenderData *context,
SeqRenderState *state,
const Strip *strip,
ImBuf *ibuf,
int timeline_frame);
StripModifierData *modifier_copy(Strip &strip_dst, StripModifierData *mod_src);
void modifier_list_copy(Strip *strip_new, Strip *strip);
int sequence_supports_modifiers(Strip *strip);

View File

@@ -29,27 +29,32 @@ struct BrightContrastApplyOp {
float mul;
float add;
template<typename ImageT, typename MaskT>
void apply(ImageT *image, const MaskT *mask, IndexRange size)
template<typename ImageT, typename MaskSampler>
void apply(ImageT *image, MaskSampler &mask, int image_x, IndexRange y_range)
{
for ([[maybe_unused]] int64_t i : size) {
/* NOTE: arguably incorrect usage of "raw" values, should be un-premultiplied.
* Not changing behavior for now, but would be good to fix someday. */
float4 input = load_pixel_raw(image);
image += y_range.first() * image_x * 4;
for (int64_t y : y_range) {
mask.begin_row(y);
for ([[maybe_unused]] int64_t x : IndexRange(image_x)) {
/* NOTE: arguably incorrect usage of "raw" values, should be un-premultiplied.
* Not changing behavior for now, but would be good to fix someday. */
float4 input = load_pixel_raw(image);
float4 result;
result = input * this->mul + this->add;
result.w = input.w;
float4 result;
result = input * this->mul + this->add;
result.w = input.w;
apply_and_advance_mask(input, result, mask);
store_pixel_raw(result, image);
image += 4;
mask.apply_mask(input, result);
store_pixel_raw(result, image);
image += 4;
}
}
}
};
static void brightcontrast_apply(const RenderData * /*render_data*/,
const StripScreenQuad & /*quad*/,
const Strip * /*strip*/,
const float transform[3][3],
StripModifierData *smd,
ImBuf *ibuf,
ImBuf *mask)
@@ -76,7 +81,7 @@ static void brightcontrast_apply(const RenderData * /*render_data*/,
op.add = op.mul * brightness + delta;
}
apply_modifier_op(op, ibuf, mask);
apply_modifier_op(op, ibuf, mask, float3x3(transform));
}
static void brightcontrast_panel_draw(const bContext *C, Panel *panel)

View File

@@ -85,69 +85,79 @@ struct ColorBalanceApplyOp {
float lut[3][CB_TABLE_SIZE];
/* Apply on a byte image via a table lookup. */
template<typename MaskT> void apply(uchar *image, const MaskT *mask, IndexRange size)
template<typename MaskSampler>
void apply(uchar *image, MaskSampler &mask, int image_x, IndexRange y_range)
{
for ([[maybe_unused]] int64_t i : size) {
float4 input = load_pixel_premul(image);
image += y_range.first() * image_x * 4;
for (int64_t y : y_range) {
mask.begin_row(y);
for ([[maybe_unused]] int64_t x : IndexRange(image_x)) {
float4 input = load_pixel_premul(image);
float4 result;
int p0 = int(input.x * (CB_TABLE_SIZE - 1.0f) + 0.5f);
int p1 = int(input.y * (CB_TABLE_SIZE - 1.0f) + 0.5f);
int p2 = int(input.z * (CB_TABLE_SIZE - 1.0f) + 0.5f);
result.x = this->lut[0][p0];
result.y = this->lut[1][p1];
result.z = this->lut[2][p2];
result.w = input.w;
float4 result;
int p0 = int(input.x * (CB_TABLE_SIZE - 1.0f) + 0.5f);
int p1 = int(input.y * (CB_TABLE_SIZE - 1.0f) + 0.5f);
int p2 = int(input.z * (CB_TABLE_SIZE - 1.0f) + 0.5f);
result.x = this->lut[0][p0];
result.y = this->lut[1][p1];
result.z = this->lut[2][p2];
result.w = input.w;
apply_and_advance_mask(input, result, mask);
store_pixel_premul(result, image);
image += 4;
mask.apply_mask(input, result);
store_pixel_premul(result, image);
image += 4;
}
}
}
/* Apply on a float image by doing full math. */
template<typename MaskT> void apply(float *image, const MaskT *mask, IndexRange size)
template<typename MaskSampler>
void apply(float *image, MaskSampler &mask, int image_x, IndexRange y_range)
{
if (this->method == SEQ_COLOR_BALANCE_METHOD_LIFTGAMMAGAIN) {
/* Lift/Gamma/Gain */
for ([[maybe_unused]] int64_t i : size) {
float4 input = load_pixel_premul(image);
image += y_range.first() * image_x * 4;
for (int64_t y : y_range) {
mask.begin_row(y);
if (this->method == SEQ_COLOR_BALANCE_METHOD_LIFTGAMMAGAIN) {
/* Lift/Gamma/Gain */
for ([[maybe_unused]] int64_t x : IndexRange(image_x)) {
float4 input = load_pixel_premul(image);
float4 result;
result.x = color_balance_lgg(
input.x, this->lift.x, this->gain.x, this->gamma.x, this->multiplier);
result.y = color_balance_lgg(
input.y, this->lift.y, this->gain.y, this->gamma.y, this->multiplier);
result.z = color_balance_lgg(
input.z, this->lift.z, this->gain.z, this->gamma.z, this->multiplier);
result.w = input.w;
float4 result;
result.x = color_balance_lgg(
input.x, this->lift.x, this->gain.x, this->gamma.x, this->multiplier);
result.y = color_balance_lgg(
input.y, this->lift.y, this->gain.y, this->gamma.y, this->multiplier);
result.z = color_balance_lgg(
input.z, this->lift.z, this->gain.z, this->gamma.z, this->multiplier);
result.w = input.w;
apply_and_advance_mask(input, result, mask);
store_pixel_premul(result, image);
image += 4;
mask.apply_mask(input, result);
store_pixel_premul(result, image);
image += 4;
}
}
}
else if (this->method == SEQ_COLOR_BALANCE_METHOD_SLOPEOFFSETPOWER) {
/* Slope/Offset/Power */
for ([[maybe_unused]] int64_t i : size) {
float4 input = load_pixel_premul(image);
else if (this->method == SEQ_COLOR_BALANCE_METHOD_SLOPEOFFSETPOWER) {
/* Slope/Offset/Power */
for ([[maybe_unused]] int64_t x : IndexRange(image_x)) {
float4 input = load_pixel_premul(image);
float4 result;
result.x = color_balance_sop(
input.x, this->slope.x, this->offset.x, this->power.x, this->multiplier);
result.y = color_balance_sop(
input.y, this->slope.y, this->offset.y, this->power.y, this->multiplier);
result.z = color_balance_sop(
input.z, this->slope.z, this->offset.z, this->power.z, this->multiplier);
result.w = input.w;
float4 result;
result.x = color_balance_sop(
input.x, this->slope.x, this->offset.x, this->power.x, this->multiplier);
result.y = color_balance_sop(
input.y, this->slope.y, this->offset.y, this->power.y, this->multiplier);
result.z = color_balance_sop(
input.z, this->slope.z, this->offset.z, this->power.z, this->multiplier);
result.w = input.w;
apply_and_advance_mask(input, result, mask);
store_pixel_premul(result, image);
image += 4;
mask.apply_mask(input, result);
store_pixel_premul(result, image);
image += 4;
}
}
else {
BLI_assert_unreachable();
}
}
else {
BLI_assert_unreachable();
}
}
@@ -245,7 +255,8 @@ static void colorBalance_init_data(StripModifierData *smd)
}
static void colorBalance_apply(const RenderData * /*render_data*/,
const StripScreenQuad & /*quad*/,
const Strip * /*strip*/,
const float transform[3][3],
StripModifierData *smd,
ImBuf *ibuf,
ImBuf *mask)
@@ -254,7 +265,7 @@ static void colorBalance_apply(const RenderData * /*render_data*/,
ColorBalanceApplyOp op;
op.init(*cbmd, ibuf->byte_buffer.data != nullptr);
apply_modifier_op(op, ibuf, mask);
apply_modifier_op(op, ibuf, mask, float3x3(transform));
}
static void colorBalance_panel_draw(const bContext *C, Panel *panel)

View File

@@ -24,6 +24,7 @@
#include "SEQ_modifier.hh"
#include "SEQ_modifiertypes.hh"
#include "SEQ_render.hh"
#include "SEQ_transform.hh"
#include "UI_interface.hh"
#include "UI_interface_layout.hh"
@@ -42,18 +43,26 @@ class CompositorContext : public compositor::Context {
ImBuf *image_buffer_;
ImBuf *mask_buffer_;
float3x3 xform_;
public:
CompositorContext(const RenderData &render_data,
const SequencerCompositorModifierData *modifier_data,
ImBuf *image_buffer,
ImBuf *mask_buffer)
ImBuf *mask_buffer,
const Strip *strip)
: compositor::Context(),
render_data_(render_data),
modifier_data_(modifier_data),
image_buffer_(image_buffer),
mask_buffer_(mask_buffer)
mask_buffer_(mask_buffer),
xform_(float3x3::identity())
{
if (mask_buffer) {
/* Note: do not use passed transform matrix since compositor coordinate
* space is not from the image corner, but rather centered on the image. */
xform_ = math::invert(image_transform_matrix_get(render_data.scene, strip));
}
}
const Scene &get_scene() const override
@@ -80,24 +89,45 @@ class CompositorContext : public compositor::Context {
return true;
}
bool use_context_bounds_for_input_output() const override
{
return false;
}
Bounds<int2> get_compositing_region() const override
{
return Bounds<int2>(int2(0), int2(image_buffer_->x, image_buffer_->y));
}
compositor::Result get_output() override
compositor::Result get_output(compositor::Domain domain) override
{
compositor::Result result = this->create_result(compositor::ResultType::Color);
if (domain.size.x != image_buffer_->x || domain.size.y != image_buffer_->y) {
/* Output size is different (e.g. image is blurred with expanded bounds);
* need to allocate appropriately sized buffer. */
IMB_free_all_data(image_buffer_);
image_buffer_->x = domain.size.x;
image_buffer_->y = domain.size.y;
IMB_alloc_float_pixels(image_buffer_, 4, false);
}
result.wrap_external(image_buffer_->float_buffer.data,
int2(image_buffer_->x, image_buffer_->y));
return result;
}
compositor::Result get_viewer_output(compositor::Domain /*domain*/,
compositor::Result get_viewer_output(compositor::Domain domain,
bool /*is_data*/,
compositor::ResultPrecision /*precision*/) override
{
compositor::Result result = this->create_result(compositor::ResultType::Color);
if (domain.size.x != image_buffer_->x || domain.size.y != image_buffer_->y) {
/* Output size is different (e.g. image is blurred with expanded bounds);
* need to allocate appropriately sized buffer. */
IMB_free_all_data(image_buffer_);
image_buffer_->x = domain.size.x;
image_buffer_->y = domain.size.y;
IMB_alloc_float_pixels(image_buffer_, 4, false);
}
result.wrap_external(image_buffer_->float_buffer.data,
int2(image_buffer_->x, image_buffer_->y));
return result;
@@ -114,6 +144,7 @@ class CompositorContext : public compositor::Context {
else if (name == "Mask" && mask_buffer_) {
result.wrap_external(mask_buffer_->float_buffer.data,
int2(mask_buffer_->x, mask_buffer_->y));
result.set_transformation(xform_);
}
return result;
@@ -169,7 +200,8 @@ static bool ensure_linear_float_buffer(ImBuf *ibuf)
}
static void compositor_modifier_apply(const RenderData *render_data,
const StripScreenQuad & /*quad*/,
const Strip *strip,
const float /*transform*/[3][3],
StripModifierData *strip_modifier_data,
ImBuf *image_buffer,
ImBuf *mask)
@@ -189,7 +221,7 @@ static void compositor_modifier_apply(const RenderData *render_data,
const bool was_float_linear = ensure_linear_float_buffer(image_buffer);
const bool was_byte = image_buffer->float_buffer.data == nullptr;
CompositorContext context(*render_data, modifier_data, image_buffer, linear_mask);
CompositorContext context(*render_data, modifier_data, image_buffer, linear_mask, strip);
compositor::Evaluator evaluator(context);
evaluator.evaluate();

View File

@@ -48,25 +48,30 @@ static void curves_copy_data(StripModifierData *target, StripModifierData *smd)
struct CurvesApplyOp {
const CurveMapping *curve_mapping;
template<typename ImageT, typename MaskT>
void apply(ImageT *image, const MaskT *mask, IndexRange size)
template<typename ImageT, typename MaskSampler>
void apply(ImageT *image, MaskSampler &mask, int image_x, IndexRange y_range)
{
for ([[maybe_unused]] int64_t i : size) {
float4 input = load_pixel_premul(image);
image += y_range.first() * image_x * 4;
for (int64_t y : y_range) {
mask.begin_row(y);
for ([[maybe_unused]] int64_t x : IndexRange(image_x)) {
float4 input = load_pixel_premul(image);
float4 result;
BKE_curvemapping_evaluate_premulRGBF(this->curve_mapping, result, input);
result.w = input.w;
float4 result;
BKE_curvemapping_evaluate_premulRGBF(this->curve_mapping, result, input);
result.w = input.w;
apply_and_advance_mask(input, result, mask);
store_pixel_premul(result, image);
image += 4;
mask.apply_mask(input, result);
store_pixel_premul(result, image);
image += 4;
}
}
}
};
static void curves_apply(const RenderData * /*render_data*/,
const StripScreenQuad & /*quad*/,
const Strip * /*strip*/,
const float transform[3][3],
StripModifierData *smd,
ImBuf *ibuf,
ImBuf *mask)
@@ -83,7 +88,7 @@ static void curves_apply(const RenderData * /*render_data*/,
CurvesApplyOp op;
op.curve_mapping = &cmd->curve_mapping;
apply_modifier_op(op, ibuf, mask);
apply_modifier_op(op, ibuf, mask, float3x3(transform));
BKE_curvemapping_premultiply(&cmd->curve_mapping, true);
}

View File

@@ -61,47 +61,52 @@ static void hue_correct_copy_data(StripModifierData *target, StripModifierData *
struct HueCorrectApplyOp {
const CurveMapping *curve_mapping;
template<typename ImageT, typename MaskT>
void apply(ImageT *image, const MaskT *mask, IndexRange size)
template<typename ImageT, typename MaskSampler>
void apply(ImageT *image, MaskSampler &mask, int image_x, IndexRange y_range)
{
for ([[maybe_unused]] int64_t i : size) {
/* NOTE: arguably incorrect usage of "raw" values, should be un-premultiplied.
* Not changing behavior for now, but would be good to fix someday. */
float4 input = load_pixel_raw(image);
float4 result;
result.w = input.w;
image += y_range.first() * image_x * 4;
for (int64_t y : y_range) {
mask.begin_row(y);
for ([[maybe_unused]] int64_t x : IndexRange(image_x)) {
/* NOTE: arguably incorrect usage of "raw" values, should be un-premultiplied.
* Not changing behavior for now, but would be good to fix someday. */
float4 input = load_pixel_raw(image);
float4 result;
result.w = input.w;
float3 hsv;
rgb_to_hsv(input.x, input.y, input.z, &hsv.x, &hsv.y, &hsv.z);
float3 hsv;
rgb_to_hsv(input.x, input.y, input.z, &hsv.x, &hsv.y, &hsv.z);
/* adjust hue, scaling returned default 0.5 up to 1 */
float f;
f = BKE_curvemapping_evaluateF(this->curve_mapping, 0, hsv.x);
hsv.x += f - 0.5f;
/* adjust hue, scaling returned default 0.5 up to 1 */
float f;
f = BKE_curvemapping_evaluateF(this->curve_mapping, 0, hsv.x);
hsv.x += f - 0.5f;
/* adjust saturation, scaling returned default 0.5 up to 1 */
f = BKE_curvemapping_evaluateF(this->curve_mapping, 1, hsv.x);
hsv.y *= (f * 2.0f);
/* adjust saturation, scaling returned default 0.5 up to 1 */
f = BKE_curvemapping_evaluateF(this->curve_mapping, 1, hsv.x);
hsv.y *= (f * 2.0f);
/* adjust value, scaling returned default 0.5 up to 1 */
f = BKE_curvemapping_evaluateF(this->curve_mapping, 2, hsv.x);
hsv.z *= (f * 2.0f);
/* adjust value, scaling returned default 0.5 up to 1 */
f = BKE_curvemapping_evaluateF(this->curve_mapping, 2, hsv.x);
hsv.z *= (f * 2.0f);
hsv.x = hsv.x - floorf(hsv.x); /* mod 1.0 */
hsv.y = math::clamp(hsv.y, 0.0f, 1.0f);
hsv.x = hsv.x - floorf(hsv.x); /* mod 1.0 */
hsv.y = math::clamp(hsv.y, 0.0f, 1.0f);
/* convert back to rgb */
hsv_to_rgb(hsv.x, hsv.y, hsv.z, &result.x, &result.y, &result.z);
/* convert back to rgb */
hsv_to_rgb(hsv.x, hsv.y, hsv.z, &result.x, &result.y, &result.z);
apply_and_advance_mask(input, result, mask);
store_pixel_raw(result, image);
image += 4;
mask.apply_mask(input, result);
store_pixel_raw(result, image);
image += 4;
}
}
}
};
static void hue_correct_apply(const RenderData * /*render_data*/,
const StripScreenQuad & /*quad*/,
const Strip * /*strip*/,
const float transform[3][3],
StripModifierData *smd,
ImBuf *ibuf,
ImBuf *mask)
@@ -112,7 +117,7 @@ static void hue_correct_apply(const RenderData * /*render_data*/,
HueCorrectApplyOp op;
op.curve_mapping = &hcmd->curve_mapping;
apply_modifier_op(op, ibuf, mask);
apply_modifier_op(op, ibuf, mask, float3x3(transform));
}
static void hue_correct_panel_draw(const bContext *C, Panel *panel)

View File

@@ -7,6 +7,7 @@
*/
#include "BLI_math_base.h"
#include "BLI_math_matrix.hh"
#include "BLT_translation.hh"
@@ -14,6 +15,8 @@
#include "DNA_sequence_types.h"
#include "SEQ_modifier.hh"
#include "SEQ_render.hh"
#include "SEQ_transform.hh"
#include "UI_interface.hh"
#include "UI_interface_layout.hh"
@@ -22,49 +25,37 @@
namespace blender::seq {
static float load_mask_min(const uchar *&mask)
{
float m = float(min_iii(mask[0], mask[1], mask[2])) * (1.0f / 255.0f);
mask += 4;
return m;
}
static float load_mask_min(const float *&mask)
{
float m = min_fff(mask[0], mask[1], mask[2]);
mask += 4;
return m;
}
static float load_mask_min(const void *& /*mask*/)
{
return 1.0f;
}
struct MaskApplyOp {
template<typename ImageT, typename MaskT>
void apply(ImageT *image, const MaskT *mask, IndexRange size)
template<typename ImageT, typename MaskSampler>
void apply(ImageT *image, MaskSampler &mask, int image_x, IndexRange y_range)
{
for ([[maybe_unused]] int64_t i : size) {
float m = load_mask_min(mask);
image += y_range.first() * image_x * 4;
for (int64_t y : y_range) {
mask.begin_row(y);
for ([[maybe_unused]] int64_t x : IndexRange(image_x)) {
float m = mask.load_mask_min();
if constexpr (std::is_same_v<ImageT, uchar>) {
/* Byte buffer is straight, so only affect on alpha itself, this is
* the only way to alpha-over byte strip after applying mask modifier. */
image[3] = uchar(image[3] * m);
if constexpr (std::is_same_v<ImageT, uchar>) {
/* Byte buffer is straight, so only affect on alpha itself, this is
* the only way to alpha-over byte strip after applying mask modifier. */
image[3] = uchar(image[3] * m);
}
else if constexpr (std::is_same_v<ImageT, float>) {
/* Float buffers are premultiplied, so need to premul color as well to make it
* easy to alpha-over masked strip. */
float4 pix(image);
pix *= m;
*reinterpret_cast<float4 *>(image) = pix;
}
image += 4;
}
else if constexpr (std::is_same_v<ImageT, float>) {
/* Float buffers are premultiplied, so need to premul color as well to make it
* easy to alpha-over masked strip. */
float4 pix(image);
pix *= m;
*reinterpret_cast<float4 *>(image) = pix;
}
image += 4;
}
}
};
static void maskmodifier_apply(const RenderData * /*render_data*/,
const StripScreenQuad & /*quad*/,
static void maskmodifier_apply(const RenderData * /* render_data */,
const Strip * /*strip*/,
const float transform[3][3],
StripModifierData * /*smd*/,
ImBuf *ibuf,
ImBuf *mask)
@@ -75,7 +66,7 @@ static void maskmodifier_apply(const RenderData * /*render_data*/,
}
MaskApplyOp op;
apply_modifier_op(op, ibuf, mask);
apply_modifier_op(op, ibuf, mask, float3x3(transform));
/* Image has gained transparency. */
ibuf->planes = R_IMF_PLANES_RGBA;

View File

@@ -7,7 +7,6 @@
*/
#include "BLI_array.hh"
#include "BLI_math_geom.h"
#include "BLT_translation.hh"
@@ -76,14 +75,6 @@ static void pixels_to_scene_linear_byte(const ColorSpace *colorspace,
(float *)dst, int(count), 1, 4, colorspace, false);
}
static void scene_linear_to_image_chunk_float(ImBuf *ibuf, IndexRange range)
{
const ColorSpace *colorspace = ibuf->float_buffer.colorspace;
float4 *fptr = reinterpret_cast<float4 *>(ibuf->float_buffer.data);
IMB_colormanagement_scene_linear_to_colorspace(
(float *)(fptr + range.first()), int(range.size()), 1, 4, colorspace);
}
static void scene_linear_to_image_chunk_byte(float4 *src, ImBuf *ibuf, IndexRange range)
{
const ColorSpace *colorspace = ibuf->byte_buffer.colorspace;
@@ -97,104 +88,6 @@ static void scene_linear_to_image_chunk_byte(float4 *src, ImBuf *ibuf, IndexRang
}
}
static void tonemap_simple(float4 *scene_linear,
ImBuf *mask,
IndexRange range,
const AvgLogLum &avg)
{
const float4 *mask_float = mask != nullptr ? (const float4 *)mask->float_buffer.data : nullptr;
const uchar4 *mask_byte = mask != nullptr ? (const uchar4 *)mask->byte_buffer.data : nullptr;
int64_t index = 0;
for (const int64_t pixel_index : range) {
float4 input = scene_linear[index];
/* Apply correction. */
float3 pixel = input.xyz() * avg.al;
float3 d = pixel + avg.tmmd->offset;
pixel.x /= (d.x == 0.0f) ? 1.0f : d.x;
pixel.y /= (d.y == 0.0f) ? 1.0f : d.y;
pixel.z /= (d.z == 0.0f) ? 1.0f : d.z;
const float igm = avg.igm;
if (igm != 0.0f) {
pixel.x = powf(math::max(pixel.x, 0.0f), igm);
pixel.y = powf(math::max(pixel.y, 0.0f), igm);
pixel.z = powf(math::max(pixel.z, 0.0f), igm);
}
/* Apply mask. */
if (mask != nullptr) {
float3 msk(1.0f);
if (mask_byte != nullptr) {
rgb_uchar_to_float(msk, mask_byte[pixel_index]);
}
else if (mask_float != nullptr) {
msk = mask_float[pixel_index].xyz();
}
pixel = math::interpolate(input.xyz(), pixel, msk);
}
scene_linear[index] = float4(pixel.x, pixel.y, pixel.z, input.w);
index++;
}
}
static void tonemap_rd_photoreceptor(float4 *scene_linear,
ImBuf *mask,
IndexRange range,
const AvgLogLum &avg)
{
const float4 *mask_float = mask != nullptr ? (const float4 *)mask->float_buffer.data : nullptr;
const uchar4 *mask_byte = mask != nullptr ? (const uchar4 *)mask->byte_buffer.data : nullptr;
const float f = expf(-avg.tmmd->intensity);
const float m = (avg.tmmd->contrast > 0.0f) ? avg.tmmd->contrast :
(0.3f + 0.7f * powf(avg.auto_key, 1.4f));
const float ic = 1.0f - avg.tmmd->correction, ia = 1.0f - avg.tmmd->adaptation;
int64_t index = 0;
for (const int64_t pixel_index : range) {
float4 input = scene_linear[index];
/* Apply correction. */
float3 pixel = input.xyz();
const float L = IMB_colormanagement_get_luminance(pixel);
float I_l = pixel.x + ic * (L - pixel.x);
float I_g = avg.cav.x + ic * (avg.lav - avg.cav.x);
float I_a = I_l + ia * (I_g - I_l);
pixel.x /= std::max(pixel.x + powf(f * I_a, m), 1.0e-30f);
I_l = pixel.y + ic * (L - pixel.y);
I_g = avg.cav.y + ic * (avg.lav - avg.cav.y);
I_a = I_l + ia * (I_g - I_l);
pixel.y /= std::max(pixel.y + powf(f * I_a, m), 1.0e-30f);
I_l = pixel.z + ic * (L - pixel.z);
I_g = avg.cav.z + ic * (avg.lav - avg.cav.z);
I_a = I_l + ia * (I_g - I_l);
pixel.z /= std::max(pixel.z + powf(f * I_a, m), 1.0e-30f);
/* Apply mask. */
if (mask != nullptr) {
float3 msk(1.0f);
if (mask_byte != nullptr) {
rgb_uchar_to_float(msk, mask_byte[pixel_index]);
}
else if (mask_float != nullptr) {
msk = mask_float[pixel_index].xyz();
}
pixel = math::interpolate(input.xyz(), pixel, msk);
}
scene_linear[index] = float4(pixel.x, pixel.y, pixel.z, input.w);
index++;
}
}
static bool is_point_inside_quad(const StripScreenQuad &quad, int x, int y)
{
float2 pt(x + 0.5f, y + 0.5f);
return isect_point_quad_v2(pt, quad.v0, quad.v1, quad.v2, quad.v3);
}
struct AreaLuminance {
int64_t pixel_count = 0;
double sum = 0.0f;
@@ -204,41 +97,150 @@ struct AreaLuminance {
float max = -FLT_MAX;
};
static void tonemap_calc_chunk_luminance(const StripScreenQuad &quad,
const bool all_pixels_inside_quad,
const int width,
const IndexRange y_range,
const float4 *scene_linear,
AreaLuminance &r_lum)
static void scene_linear_to_image_chunk_float(ImBuf *ibuf, IndexRange range)
{
for (const int y : y_range) {
for (int x = 0; x < width; x++) {
if (all_pixels_inside_quad || is_point_inside_quad(quad, x, y)) {
float4 pixel = *scene_linear;
r_lum.pixel_count++;
float L = IMB_colormanagement_get_luminance(pixel);
r_lum.sum += L;
r_lum.color_sum.x += pixel.x;
r_lum.color_sum.y += pixel.y;
r_lum.color_sum.z += pixel.z;
r_lum.log_sum += logf(math::max(L, 0.0f) + 1e-5f);
r_lum.max = math::max(r_lum.max, L);
r_lum.min = math::min(r_lum.min, L);
const ColorSpace *colorspace = ibuf->float_buffer.colorspace;
float4 *fptr = reinterpret_cast<float4 *>(ibuf->float_buffer.data);
IMB_colormanagement_scene_linear_to_colorspace(
(float *)(fptr + range.first()), int(range.size()), 1, 4, colorspace);
}
template<typename MaskSampler>
static void tonemap_simple(
float4 *scene_linear, MaskSampler &mask, int image_x, IndexRange y_range, const AvgLogLum &avg)
{
for (int64_t y : y_range) {
mask.begin_row(y);
for ([[maybe_unused]] int64_t x : IndexRange(image_x)) {
float4 input = *scene_linear;
/* Apply correction. */
float3 pixel = input.xyz() * avg.al;
float3 d = pixel + avg.tmmd->offset;
pixel.x /= (d.x == 0.0f) ? 1.0f : d.x;
pixel.y /= (d.y == 0.0f) ? 1.0f : d.y;
pixel.z /= (d.z == 0.0f) ? 1.0f : d.z;
const float igm = avg.igm;
if (igm != 0.0f) {
pixel.x = powf(math::max(pixel.x, 0.0f), igm);
pixel.y = powf(math::max(pixel.y, 0.0f), igm);
pixel.z = powf(math::max(pixel.z, 0.0f), igm);
}
/* Apply mask. */
float4 result(pixel.x, pixel.y, pixel.z, input.w);
mask.apply_mask(input, result);
*scene_linear = result;
scene_linear++;
}
}
}
static AreaLuminance tonemap_calc_input_luminance(const StripScreenQuad &quad, const ImBuf *ibuf)
template<typename MaskSampler>
static void tonemap_rd_photoreceptor(
float4 *scene_linear, MaskSampler &mask, int image_x, IndexRange y_range, const AvgLogLum &avg)
{
/* Pixels outside the pre-transform strip area are ignored for luminance calculations.
* If strip area covers whole image, we can trivially accept all pixels. */
const bool all_pixels_inside_quad = is_point_inside_quad(quad, 0, 0) &&
is_point_inside_quad(quad, ibuf->x - 1, 0) &&
is_point_inside_quad(quad, 0, ibuf->y - 1) &&
is_point_inside_quad(quad, ibuf->x - 1, ibuf->y - 1);
const float f = expf(-avg.tmmd->intensity);
const float m = (avg.tmmd->contrast > 0.0f) ? avg.tmmd->contrast :
(0.3f + 0.7f * powf(avg.auto_key, 1.4f));
const float ic = 1.0f - avg.tmmd->correction, ia = 1.0f - avg.tmmd->adaptation;
for (int64_t y : y_range) {
mask.begin_row(y);
for ([[maybe_unused]] int64_t x : IndexRange(image_x)) {
float4 input = *scene_linear;
/* Apply correction. */
float3 pixel = input.xyz();
const float L = IMB_colormanagement_get_luminance(pixel);
float I_l = pixel.x + ic * (L - pixel.x);
float I_g = avg.cav.x + ic * (avg.lav - avg.cav.x);
float I_a = I_l + ia * (I_g - I_l);
pixel.x /= std::max(pixel.x + powf(f * I_a, m), 1.0e-30f);
I_l = pixel.y + ic * (L - pixel.y);
I_g = avg.cav.y + ic * (avg.lav - avg.cav.y);
I_a = I_l + ia * (I_g - I_l);
pixel.y /= std::max(pixel.y + powf(f * I_a, m), 1.0e-30f);
I_l = pixel.z + ic * (L - pixel.z);
I_g = avg.cav.z + ic * (avg.lav - avg.cav.z);
I_a = I_l + ia * (I_g - I_l);
pixel.z /= std::max(pixel.z + powf(f * I_a, m), 1.0e-30f);
/* Apply mask. */
float4 result(pixel.x, pixel.y, pixel.z, input.w);
mask.apply_mask(input, result);
*scene_linear = result;
scene_linear++;
}
}
}
struct TonemapApplyOp {
AreaLuminance lum;
AvgLogLum data;
eModTonemapType type;
ImBuf *ibuf;
template<typename ImageT, typename MaskSampler>
void apply(ImageT *image, MaskSampler &mask, int image_x, IndexRange y_range)
{
const IndexRange pixel_range(y_range.first() * image_x, y_range.size() * image_x);
if constexpr (std::is_same_v<ImageT, float>) {
/* Float pixels: no need for temporary storage. Luminance calculation already converted
* data to scene linear. */
float4 *pixels = (float4 *)(image + y_range.first() * image_x * 4);
if (this->type == SEQ_TONEMAP_RD_PHOTORECEPTOR) {
tonemap_rd_photoreceptor(pixels, mask, image_x, y_range, data);
}
else {
BLI_assert(this->type == SEQ_TONEMAP_RH_SIMPLE);
tonemap_simple(pixels, mask, image_x, y_range, data);
}
scene_linear_to_image_chunk_float(this->ibuf, pixel_range);
}
else {
/* Byte pixels: temporary storage for scene linear pixel values. */
Array<float4> scene_linear(pixel_range.size());
pixels_to_scene_linear_byte(ibuf->byte_buffer.colorspace,
ibuf->byte_buffer.data + pixel_range.first() * 4,
scene_linear.data(),
pixel_range.size());
if (this->type == SEQ_TONEMAP_RD_PHOTORECEPTOR) {
tonemap_rd_photoreceptor(scene_linear.data(), mask, image_x, y_range, data);
}
else {
BLI_assert(this->type == SEQ_TONEMAP_RH_SIMPLE);
tonemap_simple(scene_linear.data(), mask, image_x, y_range, data);
}
scene_linear_to_image_chunk_byte(scene_linear.data(), this->ibuf, pixel_range);
}
}
};
static void tonemap_calc_chunk_luminance(const int width,
const IndexRange y_range,
const float4 *scene_linear,
AreaLuminance &r_lum)
{
for ([[maybe_unused]] const int y : y_range) {
for (int x = 0; x < width; x++) {
float4 pixel = *scene_linear;
r_lum.pixel_count++;
float L = IMB_colormanagement_get_luminance(pixel);
r_lum.sum += L;
r_lum.color_sum.x += pixel.x;
r_lum.color_sum.y += pixel.y;
r_lum.color_sum.z += pixel.z;
r_lum.log_sum += logf(math::max(L, 0.0f) + 1e-5f);
r_lum.max = math::max(r_lum.max, L);
r_lum.min = math::min(r_lum.min, L);
scene_linear++;
}
}
}
static AreaLuminance tonemap_calc_input_luminance(const ImBuf *ibuf)
{
AreaLuminance lum;
lum = threading::parallel_reduce(
IndexRange(ibuf->y),
@@ -254,15 +256,14 @@ static AreaLuminance tonemap_calc_input_luminance(const StripScreenQuad &quad, c
float4 *fptr = reinterpret_cast<float4 *>(ibuf->float_buffer.data);
fptr += y_range.first() * ibuf->x;
pixels_to_scene_linear_float(ibuf->float_buffer.colorspace, fptr, chunk_size);
tonemap_calc_chunk_luminance(quad, all_pixels_inside_quad, ibuf->x, y_range, fptr, lum);
tonemap_calc_chunk_luminance(ibuf->x, y_range, fptr, lum);
}
else {
const uchar *bptr = ibuf->byte_buffer.data + y_range.first() * ibuf->x * 4;
Array<float4> scene_linear(chunk_size);
pixels_to_scene_linear_byte(
ibuf->byte_buffer.colorspace, bptr, scene_linear.data(), chunk_size);
tonemap_calc_chunk_luminance(
quad, all_pixels_inside_quad, ibuf->x, y_range, scene_linear.data(), lum);
tonemap_calc_chunk_luminance(ibuf->x, y_range, scene_linear.data(), lum);
}
return lum;
},
@@ -281,64 +282,36 @@ static AreaLuminance tonemap_calc_input_luminance(const StripScreenQuad &quad, c
}
static void tonemapmodifier_apply(const RenderData * /*render_data*/,
const StripScreenQuad &quad,
const Strip * /*strip*/,
const float transform[3][3],
StripModifierData *smd,
ImBuf *ibuf,
ImBuf *mask)
{
const SequencerTonemapModifierData *tmmd = (const SequencerTonemapModifierData *)smd;
AreaLuminance lum = tonemap_calc_input_luminance(quad, ibuf);
if (lum.pixel_count == 0) {
TonemapApplyOp op;
op.type = eModTonemapType(tmmd->type);
op.ibuf = ibuf;
op.lum = tonemap_calc_input_luminance(ibuf);
if (op.lum.pixel_count == 0) {
return; /* Strip is zero size or off-screen. */
}
AvgLogLum data;
data.tmmd = tmmd;
data.lav = lum.sum / lum.pixel_count;
data.cav.x = lum.color_sum.x / lum.pixel_count;
data.cav.y = lum.color_sum.y / lum.pixel_count;
data.cav.z = lum.color_sum.z / lum.pixel_count;
float maxl = log(double(lum.max) + 1e-5f);
float minl = log(double(lum.min) + 1e-5f);
float avl = lum.log_sum / lum.pixel_count;
data.auto_key = (maxl > minl) ? ((maxl - avl) / (maxl - minl)) : 1.0f;
op.data.tmmd = tmmd;
op.data.lav = op.lum.sum / op.lum.pixel_count;
op.data.cav.x = op.lum.color_sum.x / op.lum.pixel_count;
op.data.cav.y = op.lum.color_sum.y / op.lum.pixel_count;
op.data.cav.z = op.lum.color_sum.z / op.lum.pixel_count;
float maxl = log(double(op.lum.max) + 1e-5f);
float minl = log(double(op.lum.min) + 1e-5f);
float avl = op.lum.log_sum / op.lum.pixel_count;
op.data.auto_key = (maxl > minl) ? ((maxl - avl) / (maxl - minl)) : 1.0f;
float al = exp(double(avl));
data.al = (al == 0.0f) ? 0.0f : (tmmd->key / al);
data.igm = (tmmd->gamma == 0.0f) ? 1.0f : (1.0f / tmmd->gamma);
op.data.al = (al == 0.0f) ? 0.0f : (tmmd->key / al);
op.data.igm = (tmmd->gamma == 0.0f) ? 1.0f : (1.0f / tmmd->gamma);
threading::parallel_for(
IndexRange(int64_t(ibuf->x) * ibuf->y), 64 * 1024, [&](IndexRange range) {
if (ibuf->float_buffer.data != nullptr) {
/* Float pixels: no need for temporary storage. Luminance calculation already converted
* data to scene linear. */
float4 *pixels = (float4 *)(ibuf->float_buffer.data) + range.first();
if (tmmd->type == SEQ_TONEMAP_RD_PHOTORECEPTOR) {
tonemap_rd_photoreceptor(pixels, mask, range, data);
}
else {
BLI_assert(tmmd->type == SEQ_TONEMAP_RH_SIMPLE);
tonemap_simple(pixels, mask, range, data);
}
scene_linear_to_image_chunk_float(ibuf, range);
}
else {
/* Byte pixels: temporary storage for scene linear pixel values. */
Array<float4> scene_linear(range.size());
pixels_to_scene_linear_byte(ibuf->byte_buffer.colorspace,
ibuf->byte_buffer.data + range.first() * 4,
scene_linear.data(),
range.size());
if (tmmd->type == SEQ_TONEMAP_RD_PHOTORECEPTOR) {
tonemap_rd_photoreceptor(scene_linear.data(), mask, range, data);
}
else {
BLI_assert(tmmd->type == SEQ_TONEMAP_RH_SIMPLE);
tonemap_simple(scene_linear.data(), mask, range, data);
}
scene_linear_to_image_chunk_byte(scene_linear.data(), ibuf, range);
}
});
apply_modifier_op(op, ibuf, mask, float3x3(transform));
}
static void tonemapmodifier_panel_draw(const bContext *C, Panel *panel)

View File

@@ -30,36 +30,41 @@ static void whiteBalance_init_data(StripModifierData *smd)
struct WhiteBalanceApplyOp {
float multiplier[3];
template<typename ImageT, typename MaskT>
void apply(ImageT *image, const MaskT *mask, IndexRange size)
template<typename ImageT, typename MaskSampler>
void apply(ImageT *image, MaskSampler &mask, int image_x, IndexRange y_range)
{
for ([[maybe_unused]] int64_t i : size) {
float4 input = load_pixel_premul(image);
image += y_range.first() * image_x * 4;
for (int64_t y : y_range) {
mask.begin_row(y);
for ([[maybe_unused]] int64_t x : IndexRange(image_x)) {
float4 input = load_pixel_premul(image);
float4 result;
result.w = input.w;
float4 result;
result.w = input.w;
#if 0
mul_v3_v3(result, multiplier);
mul_v3_v3(result, multiplier);
#else
/* similar to division without the clipping */
for (int i = 0; i < 3; i++) {
/* Prevent pow argument from being negative. This whole math
* breaks down overall with any HDR colors; would be good to
* revisit and do something more proper. */
float f = max_ff(1.0f - input[i], 0.0f);
result[i] = 1.0f - powf(f, this->multiplier[i]);
}
/* similar to division without the clipping */
for (int i = 0; i < 3; i++) {
/* Prevent pow argument from being negative. This whole math
* breaks down overall with any HDR colors; would be good to
* revisit and do something more proper. */
float f = max_ff(1.0f - input[i], 0.0f);
result[i] = 1.0f - powf(f, this->multiplier[i]);
}
#endif
apply_and_advance_mask(input, result, mask);
store_pixel_premul(result, image);
image += 4;
mask.apply_mask(input, result);
store_pixel_premul(result, image);
image += 4;
}
}
}
};
static void whiteBalance_apply(const RenderData * /*render_data*/,
const StripScreenQuad & /*quad*/,
const Strip * /*strip*/,
const float transform[3][3],
StripModifierData *smd,
ImBuf *ibuf,
ImBuf *mask)
@@ -70,7 +75,7 @@ static void whiteBalance_apply(const RenderData * /*render_data*/,
op.multiplier[0] = (data->white_value[0] != 0.0f) ? 1.0f / data->white_value[0] : FLT_MAX;
op.multiplier[1] = (data->white_value[1] != 0.0f) ? 1.0f / data->white_value[1] : FLT_MAX;
op.multiplier[2] = (data->white_value[2] != 0.0f) ? 1.0f / data->white_value[2] : FLT_MAX;
apply_modifier_op(op, ibuf, mask);
apply_modifier_op(op, ibuf, mask, float3x3(transform));
}
static void whiteBalance_panel_draw(const bContext *C, Panel *panel)

View File

@@ -35,6 +35,7 @@
#include "SEQ_select.hh"
#include "SEQ_sequencer.hh"
#include "SEQ_time.hh"
#include "SEQ_transform.hh"
#include "SEQ_utils.hh"
#include "UI_interface.hh"
@@ -292,30 +293,6 @@ void store_pixel_raw(float4 pix, float *ptr)
*reinterpret_cast<float4 *>(ptr) = pix;
}
/* Byte mask */
void apply_and_advance_mask(float4 input, float4 &result, const uchar *&mask)
{
float3 m;
rgb_uchar_to_float(m, mask);
result.x = math::interpolate(input.x, result.x, m.x);
result.y = math::interpolate(input.y, result.y, m.y);
result.z = math::interpolate(input.z, result.z, m.z);
mask += 4;
}
/* Float mask */
void apply_and_advance_mask(float4 input, float4 &result, const float *&mask)
{
float3 m(mask);
result.x = math::interpolate(input.x, result.x, m.x);
result.y = math::interpolate(input.y, result.y, m.y);
result.z = math::interpolate(input.z, result.z, m.z);
mask += 4;
}
/* No mask */
void apply_and_advance_mask(float4 /*input*/, float4 & /*result*/, const void *& /*mask*/) {}
/**
* \a timeline_frame is offset by \a fra_offset only in case we are using a real mask.
*/
@@ -339,27 +316,17 @@ static ImBuf *modifier_render_mask_input(const RenderData *context,
* fine, but if it is a byte image then we also just take that without
* extra memory allocations or conversions. All modifiers are expected
* to handle mask being either type. */
mask_input = seq_render_mask(context, mask_id, timeline_frame - fra_offset, false);
mask_input = seq_render_mask(context->depsgraph,
context->rectx,
context->recty,
mask_id,
timeline_frame - fra_offset,
false);
}
return mask_input;
}
static ImBuf *modifier_mask_get(StripModifierData *smd,
const RenderData *context,
SeqRenderState *state,
int timeline_frame,
int fra_offset)
{
return modifier_render_mask_input(context,
state,
smd->mask_input_type,
smd->mask_strip,
smd->mask_id,
timeline_frame,
fra_offset);
}
/* -------------------------------------------------------------------- */
/** \name Public Modifier Functions
* \{ */
@@ -499,11 +466,10 @@ static bool skip_modifier(Scene *scene, const StripModifierData *smd, int timeli
void modifier_apply_stack(const RenderData *context,
SeqRenderState *state,
const Strip *strip,
const float3x3 &transform,
ImBuf *ibuf,
int timeline_frame)
{
const StripScreenQuad quad = get_strip_screen_quad(context, strip);
if (strip->modifiers.first && (strip->flag & SEQ_USE_LINEAR_MODIFIERS)) {
render_imbuf_from_sequencer_space(context->scene, ibuf);
}
@@ -530,8 +496,14 @@ void modifier_apply_stack(const RenderData *context,
frame_offset = smd->mask_id ? ((Mask *)smd->mask_id)->sfra : 0;
}
ImBuf *mask = modifier_mask_get(smd, context, state, timeline_frame, frame_offset);
smti->apply(context, quad, smd, ibuf, mask);
ImBuf *mask = modifier_render_mask_input(context,
state,
smd->mask_input_type,
smd->mask_strip,
smd->mask_id,
timeline_frame,
frame_offset);
smti->apply(context, strip, transform.ptr(), smd, ibuf, mask);
if (mask) {
IMB_freeImBuf(mask);
}

View File

@@ -8,6 +8,9 @@
* \ingroup sequencer
*/
#include "BLI_math_color.h"
#include "BLI_math_interp.hh"
#include "BLI_math_matrix.hh"
#include "BLI_math_vector.hh"
#include "BLI_task.hh"
@@ -15,6 +18,7 @@
struct bContext;
struct ARegionType;
struct ImBuf;
struct Strip;
struct uiLayout;
struct Panel;
@@ -23,6 +27,19 @@ struct PointerRNA;
namespace blender::seq {
struct RenderData;
struct SeqRenderState;
/* `transform` is transformation from strip image local pixel coordinates
* to the full render area pixel coordinates. This is used to sample
* modifier masks (since masks are in full render area space). */
void modifier_apply_stack(const RenderData *context,
SeqRenderState *state,
const Strip *strip,
const float3x3 &transform,
ImBuf *ibuf,
int timeline_frame);
bool modifier_persistent_uids_are_valid(const Strip &strip);
void draw_mask_input_type_settings(const bContext *C, uiLayout *layout, PointerRNA *ptr);
@@ -43,21 +60,186 @@ float4 load_pixel_raw(const uchar *ptr);
float4 load_pixel_raw(const float *ptr);
void store_pixel_raw(const float4 pix, uchar *ptr);
void store_pixel_raw(const float4 pix, float *ptr);
void apply_and_advance_mask(const float4 input, float4 &result, const uchar *&mask);
void apply_and_advance_mask(const float4 input, float4 &result, const float *&mask);
void apply_and_advance_mask(const float4 input, float4 &result, const void *&mask);
/* Mask sampler for #apply_modifier_op: no mask is present. */
struct MaskSamplerNone {
void begin_row(int64_t /*y*/) {}
void apply_mask(const float4 /*input*/, float4 & /*result*/) {}
float load_mask_min()
{
return 0.0f;
}
};
/* Mask sampler for #apply_modifier_op: floating point mask,
* same size as input, no transform. */
struct MaskSamplerDirectFloat {
MaskSamplerDirectFloat(const ImBuf *mask) : mask(mask)
{
BLI_assert(mask && mask->float_buffer.data);
}
void begin_row(int64_t y)
{
BLI_assert(y >= 0 && y < mask->y);
ptr = mask->float_buffer.data + y * mask->x * 4;
}
void apply_mask(const float4 input, float4 &result)
{
float3 m(this->ptr);
result.x = math::interpolate(input.x, result.x, m.x);
result.y = math::interpolate(input.y, result.y, m.y);
result.z = math::interpolate(input.z, result.z, m.z);
this->ptr += 4;
}
float load_mask_min()
{
float r = min_fff(this->ptr[0], this->ptr[1], this->ptr[2]);
this->ptr += 4;
return r;
}
const float *ptr = nullptr;
const ImBuf *mask;
};
/* Mask sampler for #apply_modifier_op: byte mask,
* same size as input, no transform. */
struct MaskSamplerDirectByte {
MaskSamplerDirectByte(const ImBuf *mask) : mask(mask)
{
BLI_assert(mask && mask->byte_buffer.data);
}
void begin_row(int64_t y)
{
BLI_assert(y >= 0 && y < mask->y);
ptr = mask->byte_buffer.data + y * mask->x * 4;
}
void apply_mask(const float4 input, float4 &result)
{
float3 m;
rgb_uchar_to_float(m, this->ptr);
result.x = math::interpolate(input.x, result.x, m.x);
result.y = math::interpolate(input.y, result.y, m.y);
result.z = math::interpolate(input.z, result.z, m.z);
this->ptr += 4;
}
float load_mask_min()
{
float r = float(min_iii(this->ptr[0], this->ptr[1], this->ptr[2])) * (1.0f / 255.0f);
this->ptr += 4;
return r;
}
const uchar *ptr = nullptr;
const ImBuf *mask;
};
/* Mask sampler for #apply_modifier_op: floating point mask,
* sample mask with a transform. */
struct MaskSamplerTransformedFloat {
MaskSamplerTransformedFloat(const ImBuf *mask, const float3x3 &transform)
: mask(mask), transform(transform)
{
BLI_assert(mask && mask->float_buffer.data);
start_uv = transform.location().xy();
add_x = transform.x_axis().xy();
add_y = transform.y_axis().xy();
}
void begin_row(int64_t y)
{
this->cur_y = y;
this->cur_x = 0;
/* Sample at pixel centers. */
this->cur_uv_row = this->start_uv + (y + 0.5f) * this->add_y + 0.5f * this->add_x;
}
void apply_mask(const float4 input, float4 &result)
{
float2 uv = this->cur_uv_row + this->cur_x * this->add_x - 0.5f;
float4 m;
math::interpolate_bilinear_border_fl(
this->mask->float_buffer.data, m, this->mask->x, this->mask->y, 4, uv.x, uv.y);
result.x = math::interpolate(input.x, result.x, m.x);
result.y = math::interpolate(input.y, result.y, m.y);
result.z = math::interpolate(input.z, result.z, m.z);
this->cur_x++;
}
float load_mask_min()
{
float2 uv = this->cur_uv_row + this->cur_x * this->add_x - 0.5f;
float4 m;
math::interpolate_bilinear_border_fl(
this->mask->float_buffer.data, m, this->mask->x, this->mask->y, 4, uv.x, uv.y);
float r = min_fff(m.x, m.y, m.z);
this->cur_x++;
return r;
}
int64_t cur_x = 0, cur_y = 0;
const ImBuf *mask;
const float3x3 transform;
float2 start_uv, add_x, add_y;
float2 cur_uv_row;
};
/* Mask sampler for #apply_modifier_op: byte mask,
* sample mask with a transform. */
struct MaskSamplerTransformedByte {
MaskSamplerTransformedByte(const ImBuf *mask, const float3x3 &transform)
: mask(mask), transform(transform)
{
BLI_assert(mask && mask->byte_buffer.data);
start_uv = transform.location().xy();
add_x = transform.x_axis().xy();
add_y = transform.y_axis().xy();
}
void begin_row(int64_t y)
{
this->cur_y = y;
this->cur_x = 0;
/* Sample at pixel centers. */
this->cur_uv_row = this->start_uv + (y + 0.5f) * this->add_y + 0.5f * this->add_x;
}
void apply_mask(const float4 input, float4 &result)
{
float2 uv = this->cur_uv_row + this->cur_x * this->add_x - 0.5f;
uchar4 mb = math::interpolate_bilinear_border_byte(
this->mask->byte_buffer.data, this->mask->x, this->mask->y, uv.x, uv.y);
float3 m;
rgb_uchar_to_float(m, mb);
result.x = math::interpolate(input.x, result.x, m.x);
result.y = math::interpolate(input.y, result.y, m.y);
result.z = math::interpolate(input.z, result.z, m.z);
this->cur_x++;
}
float load_mask_min()
{
float2 uv = this->cur_uv_row + this->cur_x * this->add_x - 0.5f;
uchar4 m = math::interpolate_bilinear_border_byte(
this->mask->byte_buffer.data, this->mask->x, this->mask->y, uv.x, uv.y);
float r = float(min_iii(m.x, m.y, m.z)) * (1.0f / 255.0f);
this->cur_x++;
return r;
}
int64_t cur_x = 0, cur_y = 0;
const ImBuf *mask;
const float3x3 transform;
float2 start_uv, add_x, add_y;
float2 cur_uv_row;
};
/* Given `T` that implements an `apply` function:
*
* template <typename ImageT, typename MaskT>
* void apply(ImageT* image, const MaskT* mask, IndexRange size);
* template <typename ImageT, typename MaskSampler>
* void apply(ImageT* image, MaskSampler &mask, int image_x, IndexRange y_range);
*
* this function calls the apply() function in parallel
* chunks of the image to process, and with needed
* uchar, float or void types (void is used for mask, when there is
* no masking). Both input and mask images are expected to have
* uchar or float ImageT types, and with appropriate MaskSampler
* instantiated, depending on whether the mask exists, data type
* of the mask, and whether it needs a transformation or can be
* sampled directly.
*
* Both input and mask images are expected to have
* 4 (RGBA) color channels. Input is modified. */
template<typename T> void apply_modifier_op(T &op, ImBuf *ibuf, const ImBuf *mask)
template<typename T>
void apply_modifier_op(T &op, ImBuf *ibuf, const ImBuf *mask, const float3x3 &mask_transform)
{
if (ibuf == nullptr) {
return;
@@ -66,37 +248,67 @@ template<typename T> void apply_modifier_op(T &op, ImBuf *ibuf, const ImBuf *mas
"Sequencer only supports 4 channel images");
BLI_assert_msg(mask == nullptr || mask->channels == 0 || mask->channels == 4,
"Sequencer only supports 4 channel images");
threading::parallel_for(IndexRange(size_t(ibuf->x) * ibuf->y), 32 * 1024, [&](IndexRange range) {
const bool direct_mask_sampling = mask == nullptr || (mask->x == ibuf->x && mask->y == ibuf->y &&
math::is_identity(mask_transform));
const int image_x = ibuf->x;
threading::parallel_for(IndexRange(ibuf->y), 16, [&](IndexRange y_range) {
uchar *image_byte = ibuf->byte_buffer.data;
float *image_float = ibuf->float_buffer.data;
const uchar *mask_byte = mask ? mask->byte_buffer.data : nullptr;
const float *mask_float = mask ? mask->float_buffer.data : nullptr;
const void *mask_none = nullptr;
int64_t offset = range.first() * 4;
/* Instantiate the needed processing function based on image/mask
* data types. */
if (image_byte) {
if (mask_byte) {
op.apply(image_byte + offset, mask_byte + offset, range);
if (direct_mask_sampling) {
MaskSamplerDirectByte sampler(mask);
op.apply(image_byte, sampler, image_x, y_range);
}
else {
MaskSamplerTransformedByte sampler(mask, mask_transform);
op.apply(image_byte, sampler, image_x, y_range);
}
}
else if (mask_float) {
op.apply(image_byte + offset, mask_float + offset, range);
if (direct_mask_sampling) {
MaskSamplerDirectFloat sampler(mask);
op.apply(image_byte, sampler, image_x, y_range);
}
else {
MaskSamplerTransformedFloat sampler(mask, mask_transform);
op.apply(image_byte, sampler, image_x, y_range);
}
}
else {
op.apply(image_byte + offset, mask_none, range);
MaskSamplerNone sampler;
op.apply(image_byte, sampler, image_x, y_range);
}
}
else if (image_float) {
if (mask_byte) {
op.apply(image_float + offset, mask_byte + offset, range);
if (direct_mask_sampling) {
MaskSamplerDirectByte sampler(mask);
op.apply(image_float, sampler, image_x, y_range);
}
else {
MaskSamplerTransformedByte sampler(mask, mask_transform);
op.apply(image_float, sampler, image_x, y_range);
}
}
else if (mask_float) {
op.apply(image_float + offset, mask_float + offset, range);
if (direct_mask_sampling) {
MaskSamplerDirectFloat sampler(mask);
op.apply(image_float, sampler, image_x, y_range);
}
else {
MaskSamplerTransformedFloat sampler(mask, mask_transform);
op.apply(image_float, sampler, image_x, y_range);
}
}
else {
op.apply(image_float + offset, mask_none, range);
MaskSamplerNone sampler;
op.apply(image_float, sampler, image_x, y_range);
}
}
});

View File

@@ -71,6 +71,7 @@
#include "cache/intra_frame_cache.hh"
#include "cache/source_image_cache.hh"
#include "effects/effects.hh"
#include "modifiers/modifier.hh"
#include "multiview.hh"
#include "prefetch.hh"
#include "proxy.hh"
@@ -440,19 +441,21 @@ static bool seq_need_scale_to_render_size(const Strip *strip, bool is_proxy_imag
return true;
}
static float3x3 sequencer_image_crop_transform_matrix(const Scene *scene,
const Strip *strip,
const ImBuf *in,
const ImBuf *out,
const float image_scale_factor,
const float preview_scale_factor)
static float3x3 calc_strip_transform_matrix(const Scene *scene,
const Strip *strip,
const int in_x,
const int in_y,
const int out_x,
const int out_y,
const float image_scale_factor,
const float preview_scale_factor)
{
const StripTransform *transform = strip->data->transform;
/* This value is intentionally kept as integer. Otherwise images with odd dimensions would
* be translated to center of canvas by non-integer value, which would cause it to be
* interpolated. Interpolation with 0 user defined translation is unwanted behavior. */
const int3 image_center_offs((out->x - in->x) / 2, (out->y - in->y) / 2, 0);
const int3 image_center_offs((out_x - in_x) / 2, (out_y - in_y) / 2, 0);
const float2 translation(transform->xofs * preview_scale_factor,
transform->yofs * preview_scale_factor);
@@ -461,12 +464,12 @@ static float3x3 sequencer_image_crop_transform_matrix(const Scene *scene,
transform->scale_y * image_scale_factor);
const float2 origin = image_transform_origin_get(scene, strip);
const float2 pivot(in->x * origin[0], in->y * origin[1]);
const float2 pivot(in_x * origin[0], in_y * origin[1]);
const float3x3 matrix = math::from_loc_rot_scale<float3x3>(
translation + float2(image_center_offs), rotation, scale);
const float3x3 mat_pivot = math::from_origin_transform(matrix, pivot);
return math::invert(mat_pivot);
return mat_pivot;
}
static void sequencer_image_crop_init(const Strip *strip,
@@ -531,17 +534,14 @@ static eIMBInterpolationFilterMode get_auto_filter(const StripTransform *transfo
return IMB_FILTER_BILINEAR;
}
static void sequencer_preprocess_transform_crop(
ImBuf *in, ImBuf *out, const RenderData *context, Strip *strip, const bool is_proxy_image)
static void sequencer_preprocess_transform_crop(ImBuf *in,
ImBuf *out,
const RenderData *context,
Strip *strip,
const float3x3 &matrix,
const bool do_scale_to_render_size,
const float preview_scale_factor)
{
const Scene *scene = context->scene;
const float preview_scale_factor = get_render_scale_factor(*context);
const bool do_scale_to_render_size = seq_need_scale_to_render_size(strip, is_proxy_image);
const float image_scale_factor = do_scale_to_render_size ? preview_scale_factor : 1.0f;
float3x3 matrix = sequencer_image_crop_transform_matrix(
scene, strip, in, out, image_scale_factor, preview_scale_factor);
/* Proxy image is smaller, so crop values must be corrected by proxy scale factor.
* Proxy scale factor always matches preview_scale_factor. */
rctf source_crop;
@@ -628,56 +628,26 @@ static ImBuf *input_preprocess(const RenderData *context,
const bool is_proxy_image)
{
Scene *scene = context->scene;
ImBuf *preprocessed_ibuf = nullptr;
/* Deinterlace. */
if ((strip->flag & SEQ_FILTERY) && !ELEM(strip->type, STRIP_TYPE_MOVIE, STRIP_TYPE_MOVIECLIP)) {
/* Change original image pointer to avoid another duplication in SEQ_USE_TRANSFORM. */
preprocessed_ibuf = IMB_makeSingleUser(ibuf);
ibuf = preprocessed_ibuf;
IMB_filtery(preprocessed_ibuf);
}
if (sequencer_use_crop(strip) || sequencer_use_transform(strip) || context->rectx != ibuf->x ||
context->recty != ibuf->y)
{
const int x = context->rectx;
const int y = context->recty;
preprocessed_ibuf = IMB_allocImBuf(
x, y, 32, ibuf->float_buffer.data ? IB_float_data : IB_byte_data);
sequencer_preprocess_transform_crop(ibuf, preprocessed_ibuf, context, strip, is_proxy_image);
seq_imbuf_assign_spaces(scene, preprocessed_ibuf);
IMB_metadata_copy(preprocessed_ibuf, ibuf);
IMB_freeImBuf(ibuf);
}
/* Duplicate ibuf if we still have original. */
if (preprocessed_ibuf == nullptr) {
preprocessed_ibuf = IMB_makeSingleUser(ibuf);
}
if (strip->flag & SEQ_FLIPX) {
IMB_flipx(preprocessed_ibuf);
}
if (strip->flag & SEQ_FLIPY) {
IMB_flipy(preprocessed_ibuf);
ibuf = IMB_makeSingleUser(ibuf);
IMB_filtery(ibuf);
}
if (strip->sat != 1.0f) {
IMB_saturation(preprocessed_ibuf, strip->sat);
ibuf = IMB_makeSingleUser(ibuf);
IMB_saturation(ibuf, strip->sat);
}
if (strip->flag & SEQ_MAKE_FLOAT) {
if (!preprocessed_ibuf->float_buffer.data) {
seq_imbuf_to_sequencer_space(scene, preprocessed_ibuf, true);
if (!ibuf->float_buffer.data) {
ibuf = IMB_makeSingleUser(ibuf);
seq_imbuf_to_sequencer_space(scene, ibuf, true);
}
if (preprocessed_ibuf->byte_buffer.data) {
IMB_free_byte_pixels(preprocessed_ibuf);
if (ibuf->byte_buffer.data) {
IMB_free_byte_pixels(ibuf);
}
}
@@ -687,15 +657,71 @@ static ImBuf *input_preprocess(const RenderData *context,
}
if (mul != 1.0f) {
ibuf = IMB_makeSingleUser(ibuf);
const bool multiply_alpha = (strip->flag & SEQ_MULTIPLY_ALPHA);
multiply_ibuf(preprocessed_ibuf, mul, multiply_alpha);
multiply_ibuf(ibuf, mul, multiply_alpha);
}
const float preview_scale_factor = get_render_scale_factor(*context);
const bool do_scale_to_render_size = seq_need_scale_to_render_size(strip, is_proxy_image);
const float image_scale_factor = do_scale_to_render_size ? preview_scale_factor : 1.0f;
if (strip->modifiers.first) {
modifier_apply_stack(context, state, strip, preprocessed_ibuf, timeline_frame);
ibuf = IMB_makeSingleUser(ibuf);
float3x3 matrix = calc_strip_transform_matrix(scene,
strip,
ibuf->x,
ibuf->y,
context->rectx,
context->recty,
image_scale_factor,
preview_scale_factor);
modifier_apply_stack(context, state, strip, matrix, ibuf, timeline_frame);
}
return preprocessed_ibuf;
if (sequencer_use_crop(strip) || sequencer_use_transform(strip) || context->rectx != ibuf->x ||
context->recty != ibuf->y)
{
const int x = context->rectx;
const int y = context->recty;
ImBuf *transformed_ibuf = IMB_allocImBuf(
x, y, 32, ibuf->float_buffer.data ? IB_float_data : IB_byte_data);
/* Note: calculate matrix again; modifiers can actually change the image size. */
float3x3 matrix = calc_strip_transform_matrix(scene,
strip,
ibuf->x,
ibuf->y,
context->rectx,
context->recty,
image_scale_factor,
preview_scale_factor);
matrix = math::invert(matrix);
sequencer_preprocess_transform_crop(ibuf,
transformed_ibuf,
context,
strip,
matrix,
do_scale_to_render_size,
preview_scale_factor);
seq_imbuf_assign_spaces(scene, transformed_ibuf);
IMB_metadata_copy(transformed_ibuf, ibuf);
IMB_freeImBuf(ibuf);
ibuf = transformed_ibuf;
}
if (strip->flag & SEQ_FLIPX) {
ibuf = IMB_makeSingleUser(ibuf);
IMB_flipx(ibuf);
}
if (strip->flag & SEQ_FLIPY) {
ibuf = IMB_makeSingleUser(ibuf);
IMB_flipy(ibuf);
}
return ibuf;
}
static ImBuf *seq_render_preprocess_ibuf(const RenderData *context,
@@ -1261,7 +1287,12 @@ static ImBuf *seq_render_movieclip_strip(const RenderData *context,
return ibuf;
}
ImBuf *seq_render_mask(const RenderData *context, Mask *mask, float frame_index, bool make_float)
ImBuf *seq_render_mask(Depsgraph *depsgraph,
int width,
int height,
const Mask *mask,
float frame_index,
bool make_float)
{
/* TODO: add option to rasterize to alpha imbuf? */
ImBuf *ibuf = nullptr;
@@ -1284,19 +1315,18 @@ ImBuf *seq_render_mask(const RenderData *context, Mask *mask, float frame_index,
/* anim-data */
adt = BKE_animdata_from_id(&mask->id);
const AnimationEvalContext anim_eval_context = BKE_animsys_eval_context_construct(
context->depsgraph, mask->sfra + frame_index);
depsgraph, mask->sfra + frame_index);
BKE_animsys_evaluate_animdata(&mask_temp->id, adt, &anim_eval_context, ADT_RECALC_ANIM, false);
maskbuf = MEM_malloc_arrayN<float>(size_t(context->rectx) * size_t(context->recty), __func__);
maskbuf = MEM_malloc_arrayN<float>(size_t(width) * size_t(height), __func__);
mr_handle = BKE_maskrasterize_handle_new();
BKE_maskrasterize_handle_init(
mr_handle, mask_temp, context->rectx, context->recty, true, true, true);
BKE_maskrasterize_handle_init(mr_handle, mask_temp, width, height, true, true, true);
BKE_id_free(nullptr, &mask_temp->id);
BKE_maskrasterize_buffer(mr_handle, context->rectx, context->recty, maskbuf);
BKE_maskrasterize_buffer(mr_handle, width, height, maskbuf);
BKE_maskrasterize_handle_free(mr_handle);
@@ -1305,12 +1335,11 @@ ImBuf *seq_render_mask(const RenderData *context, Mask *mask, float frame_index,
const float *fp_src;
float *fp_dst;
ibuf = IMB_allocImBuf(
context->rectx, context->recty, 32, IB_float_data | IB_uninitialized_pixels);
ibuf = IMB_allocImBuf(width, height, 32, IB_float_data | IB_uninitialized_pixels);
fp_src = maskbuf;
fp_dst = ibuf->float_buffer.data;
i = context->rectx * context->recty;
i = width * height;
while (--i) {
fp_dst[0] = fp_dst[1] = fp_dst[2] = *fp_src;
fp_dst[3] = 1.0f;
@@ -1324,12 +1353,11 @@ ImBuf *seq_render_mask(const RenderData *context, Mask *mask, float frame_index,
const float *fp_src;
uchar *ub_dst;
ibuf = IMB_allocImBuf(
context->rectx, context->recty, 32, IB_byte_data | IB_uninitialized_pixels);
ibuf = IMB_allocImBuf(width, height, 32, IB_byte_data | IB_uninitialized_pixels);
fp_src = maskbuf;
ub_dst = ibuf->byte_buffer.data;
i = context->rectx * context->recty;
i = width * height;
while (--i) {
ub_dst[0] = ub_dst[1] = ub_dst[2] = uchar(*fp_src * 255.0f); /* already clamped */
ub_dst[3] = 255;
@@ -1348,7 +1376,8 @@ static ImBuf *seq_render_mask_strip(const RenderData *context, Strip *strip, flo
{
bool make_float = (strip->flag & SEQ_MAKE_FLOAT) != 0;
return seq_render_mask(context, strip->mask, frame_index, make_float);
return seq_render_mask(
context->depsgraph, context->rectx, context->recty, strip->mask, frame_index, make_float);
}
static ImBuf *seq_render_scene_strip_ex(const RenderData *context,

View File

@@ -12,6 +12,7 @@
#include "BLI_set.hh"
#include "BLI_vector.hh"
struct Depsgraph;
struct ImBuf;
struct LinkNode;
struct ListBase;
@@ -55,7 +56,12 @@ ImBuf *seq_render_strip(const RenderData *context,
/* Renders Mask into an image suitable for sequencer:
* RGB channels contain mask intensity; alpha channel is opaque. */
ImBuf *seq_render_mask(const RenderData *context, Mask *mask, float frame_index, bool make_float);
ImBuf *seq_render_mask(Depsgraph *depsgraph,
int width,
int height,
const Mask *mask,
float frame_index,
bool make_float);
void seq_imbuf_assign_spaces(const Scene *scene, ImBuf *ibuf);
StripScreenQuad get_strip_screen_quad(const RenderData *context, const Strip *strip);

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.