Sculpt: Data oriented refactor for "Set Pivot Position" operator

Part of #118145.

Change the "Pivot to Unmasked" and "Pivot to Mask Border" operations
to avoid the vertex iteration macro. The former also uses a proper
weighted average now, so mostly-masked vertices have less influence.
The whole operation is multithreaded now so I expect it's a bit faster,
but I didn't explicitly test that.

Pull Request: https://projects.blender.org/blender/blender/pulls/124743
This commit is contained in:
Hans Goudey
2024-07-17 04:40:08 +02:00
committed by Hans Goudey
parent b8eb872583
commit 58518347f2

View File

@@ -622,13 +622,284 @@ static bool set_pivot_depends_on_cursor(bContext & /*C*/, wmOperatorType & /*ot*
return mode == PivotPositionMode::CursorSurface;
}
struct AveragePositionAccumulation {
double3 position;
double weight_total;
};
static AveragePositionAccumulation combine_average_position_accumulation(
const AveragePositionAccumulation &a, const AveragePositionAccumulation &b)
{
return AveragePositionAccumulation{a.position + b.position, a.weight_total + b.weight_total};
}
BLI_NOINLINE static void filter_positions_pivot_symmetry(const Span<float3> positions,
const float3 &pivot,
const ePaintSymmetryFlags symm,
const MutableSpan<float> factors)
{
BLI_assert(positions.size() == factors.size());
for (const int i : positions.index_range()) {
if (!SCULPT_check_vertex_pivot_symmetry(positions[i], pivot, symm)) {
factors[i] = 0.0f;
}
}
}
BLI_NOINLINE static void accumulate_weighted_average_position(const Span<float3> positions,
const Span<float> factors,
AveragePositionAccumulation &total)
{
BLI_assert(positions.size() == factors.size());
for (const int i : positions.index_range()) {
total.position += double3(positions[i] * factors[i]);
total.weight_total += factors[i];
}
}
static float3 average_unmasked_position(const Object &object,
const float3 &pivot,
const ePaintSymmetryFlags symm)
{
const SculptSession &ss = *object.sculpt;
PBVH &pbvh = *ss.pbvh;
Vector<PBVHNode *> nodes = bke::pbvh::search_gather(
pbvh, [&](PBVHNode &node) { return !node_fully_masked_or_hidden(node); });
struct LocalData {
Vector<float> factors;
Vector<float3> positions;
};
threading::EnumerableThreadSpecific<LocalData> all_tls;
switch (BKE_pbvh_type(pbvh)) {
case PBVH_FACES: {
const Mesh &mesh = *static_cast<const Mesh *>(object.data);
const Span<float3> vert_positions = BKE_pbvh_get_vert_positions(pbvh);
const AveragePositionAccumulation total = threading::parallel_reduce(
nodes.index_range(),
1,
AveragePositionAccumulation{},
[&](const IndexRange range, AveragePositionAccumulation sum) {
LocalData &tls = all_tls.local();
threading::isolate_task([&]() {
for (const PBVHNode *node : nodes.as_span().slice(range)) {
const Span<int> verts = bke::pbvh::node_unique_verts(*node);
tls.positions.reinitialize(verts.size());
const MutableSpan<float3> positions = tls.positions;
array_utils::gather(vert_positions, verts, positions);
tls.factors.reinitialize(verts.size());
const MutableSpan<float> factors = tls.factors;
fill_factor_from_hide_and_mask(mesh, verts, factors);
filter_positions_pivot_symmetry(positions, pivot, symm, factors);
accumulate_weighted_average_position(positions, factors, sum);
}
});
return sum;
},
combine_average_position_accumulation);
return float3(math::safe_divide(total.position, total.weight_total));
}
case PBVH_GRIDS: {
const SubdivCCG &subdiv_ccg = *ss.subdiv_ccg;
const CCGKey key = BKE_subdiv_ccg_key_top_level(subdiv_ccg);
const AveragePositionAccumulation total = threading::parallel_reduce(
nodes.index_range(),
1,
AveragePositionAccumulation{},
[&](const IndexRange range, AveragePositionAccumulation sum) {
LocalData &tls = all_tls.local();
for (const PBVHNode *node : nodes.as_span().slice(range)) {
const Span<int> grids = bke::pbvh::node_grid_indices(*node);
const MutableSpan positions = gather_grids_positions(
subdiv_ccg, grids, tls.positions);
tls.factors.reinitialize(positions.size());
const MutableSpan<float> factors = tls.factors;
fill_factor_from_hide_and_mask(subdiv_ccg, grids, factors);
filter_positions_pivot_symmetry(positions, pivot, symm, factors);
accumulate_weighted_average_position(positions, factors, sum);
}
return sum;
},
combine_average_position_accumulation);
return float3(math::safe_divide(total.position, total.weight_total));
}
case PBVH_BMESH: {
const AveragePositionAccumulation total = threading::parallel_reduce(
nodes.index_range(),
1,
AveragePositionAccumulation{},
[&](const IndexRange range, AveragePositionAccumulation sum) {
LocalData &tls = all_tls.local();
for (PBVHNode *node : nodes.as_span().slice(range)) {
const Set<BMVert *, 0> &verts = BKE_pbvh_bmesh_node_unique_verts(node);
const MutableSpan positions = gather_bmesh_positions(verts, tls.positions);
tls.factors.reinitialize(verts.size());
const MutableSpan<float> factors = tls.factors;
fill_factor_from_hide_and_mask(*ss.bm, verts, factors);
filter_positions_pivot_symmetry(positions, pivot, symm, factors);
accumulate_weighted_average_position(positions, factors, sum);
}
return sum;
},
combine_average_position_accumulation);
return float3(math::safe_divide(total.position, total.weight_total));
}
}
BLI_assert_unreachable();
return float3(0);
}
BLI_NOINLINE static void mask_border_weight_calc(const Span<float> masks,
const MutableSpan<float> factors)
{
constexpr float threshold = 0.2f;
for (const int i : masks.index_range()) {
if (std::abs(masks[i] - 0.5f) > threshold) {
factors[i] = 0.0f;
}
}
};
static float3 average_mask_border_position(const Object &object,
const float3 &pivot,
const ePaintSymmetryFlags symm)
{
const SculptSession &ss = *object.sculpt;
PBVH &pbvh = *ss.pbvh;
Vector<PBVHNode *> nodes = bke::pbvh::search_gather(
pbvh, [&](PBVHNode &node) { return !node_fully_masked_or_hidden(node); });
struct LocalData {
Vector<float> factors;
Vector<float> masks;
Vector<float3> positions;
};
threading::EnumerableThreadSpecific<LocalData> all_tls;
switch (BKE_pbvh_type(pbvh)) {
case PBVH_FACES: {
const Mesh &mesh = *static_cast<const Mesh *>(object.data);
const Span<float3> vert_positions = BKE_pbvh_get_vert_positions(pbvh);
const bke::AttributeAccessor attributes = mesh.attributes();
const VArraySpan mask_attr = *attributes.lookup_or_default<float>(
".sculpt_mask", bke::AttrDomain::Point, 0.0f);
const AveragePositionAccumulation total = threading::parallel_reduce(
nodes.index_range(),
1,
AveragePositionAccumulation{},
[&](const IndexRange range, AveragePositionAccumulation sum) {
LocalData &tls = all_tls.local();
threading::isolate_task([&]() {
for (const PBVHNode *node : nodes.as_span().slice(range)) {
const Span<int> verts = bke::pbvh::node_unique_verts(*node);
tls.positions.reinitialize(verts.size());
const MutableSpan<float3> positions = tls.positions;
array_utils::gather(vert_positions, verts, positions);
tls.masks.reinitialize(verts.size());
const MutableSpan<float> masks = tls.masks;
array_utils::gather(mask_attr, verts, masks);
tls.factors.reinitialize(verts.size());
const MutableSpan<float> factors = tls.factors;
fill_factor_from_hide(mesh, verts, factors);
mask_border_weight_calc(masks, factors);
filter_positions_pivot_symmetry(positions, pivot, symm, factors);
accumulate_weighted_average_position(positions, factors, sum);
}
});
return sum;
},
combine_average_position_accumulation);
return float3(math::safe_divide(total.position, total.weight_total));
}
case PBVH_GRIDS: {
const SubdivCCG &subdiv_ccg = *ss.subdiv_ccg;
const CCGKey key = BKE_subdiv_ccg_key_top_level(subdiv_ccg);
const AveragePositionAccumulation total = threading::parallel_reduce(
nodes.index_range(),
1,
AveragePositionAccumulation{},
[&](const IndexRange range, AveragePositionAccumulation sum) {
LocalData &tls = all_tls.local();
for (const PBVHNode *node : nodes.as_span().slice(range)) {
const Span<int> grids = bke::pbvh::node_grid_indices(*node);
const MutableSpan positions = gather_grids_positions(
subdiv_ccg, grids, tls.positions);
tls.masks.reinitialize(positions.size());
const MutableSpan<float> masks = tls.masks;
mask::gather_mask_grids(subdiv_ccg, grids, masks);
tls.factors.reinitialize(positions.size());
const MutableSpan<float> factors = tls.factors;
fill_factor_from_hide(subdiv_ccg, grids, factors);
mask_border_weight_calc(masks, factors);
filter_positions_pivot_symmetry(positions, pivot, symm, factors);
accumulate_weighted_average_position(positions, factors, sum);
}
return sum;
},
combine_average_position_accumulation);
return float3(math::safe_divide(total.position, total.weight_total));
}
case PBVH_BMESH: {
const AveragePositionAccumulation total = threading::parallel_reduce(
nodes.index_range(),
1,
AveragePositionAccumulation{},
[&](const IndexRange range, AveragePositionAccumulation sum) {
LocalData &tls = all_tls.local();
for (PBVHNode *node : nodes.as_span().slice(range)) {
const Set<BMVert *, 0> &verts = BKE_pbvh_bmesh_node_unique_verts(node);
const MutableSpan positions = gather_bmesh_positions(verts, tls.positions);
tls.masks.reinitialize(verts.size());
const MutableSpan<float> masks = tls.masks;
mask::gather_mask_bmesh(*ss.bm, verts, masks);
tls.factors.reinitialize(verts.size());
const MutableSpan<float> factors = tls.factors;
fill_factor_from_hide(verts, factors);
mask_border_weight_calc(masks, factors);
filter_positions_pivot_symmetry(positions, pivot, symm, factors);
accumulate_weighted_average_position(positions, factors, sum);
}
return sum;
},
combine_average_position_accumulation);
return float3(math::safe_divide(total.position, total.weight_total));
}
}
BLI_assert_unreachable();
return float3(0);
}
static int set_pivot_position_exec(bContext *C, wmOperator *op)
{
Object &ob = *CTX_data_active_object(C);
SculptSession &ss = *ob.sculpt;
ARegion *region = CTX_wm_region(C);
Depsgraph *depsgraph = CTX_data_ensure_evaluated_depsgraph(C);
const char symm = SCULPT_mesh_symmetry_xyz_get(ob);
const ePaintSymmetryFlags symm = SCULPT_mesh_symmetry_xyz_get(ob);
const PivotPositionMode mode = PivotPositionMode(RNA_enum_get(op->ptr, "mode"));
@@ -659,52 +930,11 @@ static int set_pivot_position_exec(bContext *C, wmOperator *op)
copy_v3_v3(ss.pivot_pos, stroke_location);
}
}
else if (mode == PivotPositionMode::Unmasked) {
ss.pivot_pos = average_unmasked_position(ob, ss.pivot_pos, symm);
}
else {
Vector<PBVHNode *> nodes = bke::pbvh::search_gather(*ss.pbvh, {});
float avg[3];
int total = 0;
zero_v3(avg);
/* Pivot to unmasked. */
if (mode == PivotPositionMode::Unmasked) {
for (PBVHNode *node : nodes) {
PBVHVertexIter vd;
BKE_pbvh_vertex_iter_begin (*ss.pbvh, node, vd, PBVH_ITER_UNIQUE) {
const float mask = vd.mask;
if (mask < 1.0f) {
if (SCULPT_check_vertex_pivot_symmetry(vd.co, ss.pivot_pos, symm)) {
add_v3_v3(avg, vd.co);
total++;
}
}
}
BKE_pbvh_vertex_iter_end;
}
}
/* Pivot to mask border. */
else if (mode == PivotPositionMode::MaskBorder) {
const float threshold = 0.2f;
for (PBVHNode *node : nodes) {
PBVHVertexIter vd;
BKE_pbvh_vertex_iter_begin (*ss.pbvh, node, vd, PBVH_ITER_UNIQUE) {
const float mask = vd.mask;
if (mask < (0.5f + threshold) && mask > (0.5f - threshold)) {
if (SCULPT_check_vertex_pivot_symmetry(vd.co, ss.pivot_pos, symm)) {
add_v3_v3(avg, vd.co);
total++;
}
}
}
BKE_pbvh_vertex_iter_end;
}
}
if (total > 0) {
mul_v3_fl(avg, 1.0f / total);
copy_v3_v3(ss.pivot_pos, avg);
}
ss.pivot_pos = average_mask_border_position(ob, ss.pivot_pos, symm);
}
/* Update the viewport navigation rotation origin. */