Mesh: Propagate attributes in automatic edge calculation
Support propagation of attributes from the original mesh edges when generating mesh edges. This implies both deduplication of original edges and creation of new edges needed for faces. While original edges are not propagated, attributes are still propagated if an edge was part of any face. To make original edges distinct we simply choose the first one to be a value source. One important part of this patch is definition of invariant of the mesh on input: 1. All edges must be valid (their vertices must be correct). The algorithm only deal with duplicate and implicit edges. 2. There is no way to provide info about corner edges attribute (whether it'svalid or not). If all input edges are valid and there are no new edges then we could skip re-generation of already valid corner edge indices. But for now we always do. The new implementation depends on input mesh invariant so this drops an old patch to fix hard crash related with duplicate edge vertices. New code will crash independently to such patch. Hopefully there must be no problems after #138633. The changes don't affect performance much. If anything, in some cases the edge calculation can become a few percent faster. See the PR for more performance testing details. Pull Request: https://projects.blender.org/blender/blender/pulls/132492
This commit is contained in:
committed by
Hans Goudey
parent
7e38ffe508
commit
157e7e0351
@@ -23,6 +23,8 @@ enum class AttrType : int16_t;
|
||||
struct AttributeMetaData;
|
||||
struct AttributeAccessorFunctions;
|
||||
|
||||
struct AttributeFilter;
|
||||
|
||||
namespace mesh {
|
||||
/* -------------------------------------------------------------------- */
|
||||
/** \name Polygon Data Evaluation
|
||||
@@ -384,6 +386,11 @@ Mesh *mesh_new_no_attributes(int verts_num, int edges_num, int faces_num, int co
|
||||
/** Calculate edges from faces. */
|
||||
void mesh_calc_edges(Mesh &mesh, bool keep_existing_edges, bool select_new_edges);
|
||||
|
||||
void mesh_calc_edges(Mesh &mesh,
|
||||
bool keep_existing_edges,
|
||||
bool select_new_edges,
|
||||
const AttributeFilter &attribute_filter);
|
||||
|
||||
void mesh_translate(Mesh &mesh, const float3 &translation, bool do_shape_keys);
|
||||
|
||||
void mesh_transform(Mesh &mesh, const float4x4 &transform, bool do_shape_keys);
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
#include "BLI_vector_set.hh"
|
||||
|
||||
#include "BKE_attribute.hh"
|
||||
#include "BKE_attribute_filter.hh"
|
||||
#include "BKE_attribute_math.hh"
|
||||
#include "BKE_customdata.hh"
|
||||
#include "BKE_mesh.hh"
|
||||
|
||||
@@ -47,6 +49,20 @@ static void reserve_hash_maps(const Mesh &mesh,
|
||||
edge_maps, [&](EdgeMap &edge_map) { edge_map.reserve(totedge_guess / edge_maps.size()); });
|
||||
}
|
||||
|
||||
static OffsetIndices<int> edge_map_offsets(const Span<EdgeMap> maps, Array<int> &r_sizes)
|
||||
{
|
||||
r_sizes.reinitialize(maps.size() + 1);
|
||||
for (const int map_i : maps.index_range()) {
|
||||
r_sizes[map_i] = maps[map_i].size();
|
||||
}
|
||||
return offset_indices::accumulate_counts_to_offsets(r_sizes);
|
||||
}
|
||||
|
||||
static int edge_to_hash_map_i(const OrderedEdge edge, const uint32_t parallel_mask)
|
||||
{
|
||||
return parallel_mask & edge_hash_2(edge);
|
||||
}
|
||||
|
||||
static void add_existing_edges_to_hash_maps(const Mesh &mesh,
|
||||
const uint32_t parallel_mask,
|
||||
MutableSpan<EdgeMap> edge_maps)
|
||||
@@ -58,7 +74,7 @@ static void add_existing_edges_to_hash_maps(const Mesh &mesh,
|
||||
for (const int2 edge : edges) {
|
||||
const OrderedEdge ordered_edge(edge);
|
||||
/* Only add the edge when it belongs into this map. */
|
||||
if (task_index == (parallel_mask & edge_hash_2(ordered_edge))) {
|
||||
if (task_index == edge_to_hash_map_i(ordered_edge, parallel_mask)) {
|
||||
edge_map.add(ordered_edge);
|
||||
}
|
||||
}
|
||||
@@ -78,22 +94,21 @@ static void add_face_edges_to_hash_maps(const Mesh &mesh,
|
||||
for (const int corner : face) {
|
||||
const int vert = corner_verts[corner];
|
||||
const int vert_prev = corner_verts[bke::mesh::face_corner_prev(face, corner)];
|
||||
/* Can only be the same when the mesh data is invalid. */
|
||||
if (LIKELY(vert_prev != vert)) {
|
||||
const OrderedEdge ordered_edge(vert_prev, vert);
|
||||
/* Only add the edge when it belongs into this map. */
|
||||
if (task_index == (parallel_mask & edge_hash_2(ordered_edge))) {
|
||||
edge_map.add(ordered_edge);
|
||||
}
|
||||
const OrderedEdge ordered_edge(vert_prev, vert);
|
||||
/* Only add the edge when it belongs into this map. */
|
||||
if (task_index == edge_to_hash_map_i(ordered_edge, parallel_mask)) {
|
||||
edge_map.add(ordered_edge);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
static void serialize_and_initialize_deduplicated_edges(MutableSpan<EdgeMap> edge_maps,
|
||||
const OffsetIndices<int> edge_offsets,
|
||||
MutableSpan<int2> new_edges)
|
||||
static void serialize_and_initialize_deduplicated_edges(
|
||||
MutableSpan<EdgeMap> edge_maps,
|
||||
const OffsetIndices<int> edge_offsets,
|
||||
const OffsetIndices<int> prefix_skip_offsets,
|
||||
MutableSpan<int2> new_edges)
|
||||
{
|
||||
threading::parallel_for_each(edge_maps, [&](EdgeMap &edge_map) {
|
||||
const int task_index = &edge_map - edge_maps.data();
|
||||
@@ -101,8 +116,18 @@ static void serialize_and_initialize_deduplicated_edges(MutableSpan<EdgeMap> edg
|
||||
return;
|
||||
}
|
||||
|
||||
MutableSpan<int2> result_edges = new_edges.slice(edge_offsets[task_index]);
|
||||
result_edges.copy_from(edge_map.as_span().cast<int2>());
|
||||
if (prefix_skip_offsets[task_index].size() == edge_offsets[task_index].size()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const IndexRange all_map_edges = edge_offsets[task_index];
|
||||
const IndexRange prefix_to_skip = prefix_skip_offsets[task_index];
|
||||
const IndexRange map_edges = IndexRange::from_begin_size(
|
||||
all_map_edges.start() - prefix_to_skip.start(),
|
||||
all_map_edges.size() - prefix_to_skip.size());
|
||||
|
||||
MutableSpan<int2> result_edges = new_edges.slice(map_edges);
|
||||
result_edges.copy_from(edge_map.as_span().drop_front(prefix_to_skip.size()).cast<int2>());
|
||||
});
|
||||
}
|
||||
|
||||
@@ -119,16 +144,8 @@ static void update_edge_indices_in_face_loops(const OffsetIndices<int> faces,
|
||||
for (const int corner : face) {
|
||||
const int vert = corner_verts[corner];
|
||||
const int vert_prev = corner_verts[bke::mesh::face_corner_next(face, corner)];
|
||||
if (UNLIKELY(vert == vert_prev)) {
|
||||
/* This is an invalid edge; normally this does not happen in Blender,
|
||||
* but it can be part of an imported mesh with invalid geometry. See
|
||||
* #76514. */
|
||||
corner_edges[corner] = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
const OrderedEdge ordered_edge(vert_prev, vert);
|
||||
const int task_index = parallel_mask & edge_hash_2(ordered_edge);
|
||||
const int task_index = edge_to_hash_map_i(ordered_edge, parallel_mask);
|
||||
const EdgeMap &edge_map = edge_maps[task_index];
|
||||
const int edge_i = edge_map.index_of(ordered_edge);
|
||||
const int edge_index = edge_offsets[task_index][edge_i];
|
||||
@@ -155,28 +172,68 @@ static void clear_hash_tables(MutableSpan<EdgeMap> edge_maps)
|
||||
threading::parallel_for_each(edge_maps, [](EdgeMap &edge_map) { edge_map.clear(); });
|
||||
}
|
||||
|
||||
static void deselect_known_edges(const OffsetIndices<int> edge_offsets,
|
||||
const Span<EdgeMap> edge_maps,
|
||||
const uint32_t parallel_mask,
|
||||
const Span<int2> known_edges,
|
||||
MutableSpan<bool> selection)
|
||||
static IndexMask mask_first_distinct_edges(const Span<int2> edges,
|
||||
const IndexMask &edges_to_check,
|
||||
const Span<EdgeMap> edge_maps,
|
||||
const uint32_t parallel_mask,
|
||||
const OffsetIndices<int> edge_offsets,
|
||||
IndexMaskMemory &memory)
|
||||
{
|
||||
threading::parallel_for(known_edges.index_range(), 2048, [&](const IndexRange range) {
|
||||
for (const int2 original_edge : known_edges.slice(range)) {
|
||||
const OrderedEdge ordered_edge(original_edge);
|
||||
const int task_index = parallel_mask & edge_hash_2(ordered_edge);
|
||||
const EdgeMap &edge_map = edge_maps[task_index];
|
||||
const int edge_i = edge_map.index_of(ordered_edge);
|
||||
const int edge_index = edge_offsets[task_index][edge_i];
|
||||
selection[edge_index] = false;
|
||||
}
|
||||
if (edges_to_check.is_empty()) {
|
||||
return {};
|
||||
}
|
||||
|
||||
constexpr int no_original_edge = std::numeric_limits<int>::max();
|
||||
Array<int> map_edge_to_first_original(edge_offsets.total_size());
|
||||
map_edge_to_first_original.as_mutable_span().fill(no_original_edge);
|
||||
|
||||
/* TODO: Lock-free parallel version? BLI' "atomic::min<T>(T&, T);" ? */
|
||||
edges_to_check.foreach_index_optimized<int>([&](const int edge_i) {
|
||||
const OrderedEdge edge = edges[edge_i];
|
||||
const int map_i = calc_edges::edge_to_hash_map_i(edge, parallel_mask);
|
||||
const int edge_index = edge_maps[map_i].index_of(edge);
|
||||
|
||||
int &original_edge = map_edge_to_first_original[edge_offsets[map_i][edge_index]];
|
||||
original_edge = math::min(original_edge, edge_i);
|
||||
});
|
||||
|
||||
/* Note: #map_edge_to_first_original might still contains #no_original_edge if edges was both non
|
||||
* distinct and not full set. */
|
||||
|
||||
return IndexMask::from_predicate(
|
||||
edges_to_check, GrainSize(2048), memory, [&](const int srd_edge_i) {
|
||||
const OrderedEdge edge = edges[srd_edge_i];
|
||||
const int map_i = calc_edges::edge_to_hash_map_i(edge, parallel_mask);
|
||||
const int edge_index = edge_maps[map_i].index_of(edge);
|
||||
return map_edge_to_first_original[edge_offsets[map_i][edge_index]] == srd_edge_i;
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace calc_edges
|
||||
|
||||
void mesh_calc_edges(Mesh &mesh, bool keep_existing_edges, const bool select_new_edges)
|
||||
void mesh_calc_edges(Mesh &mesh,
|
||||
bool keep_existing_edges,
|
||||
const bool select_new_edges,
|
||||
const AttributeFilter &attribute_filter)
|
||||
{
|
||||
|
||||
if (mesh.edges_num == 0 && mesh.corners_num == 0) {
|
||||
/* BLI_assert(BKE_mesh_is_valid(&mesh)); */
|
||||
return;
|
||||
}
|
||||
|
||||
if (mesh.corners_num == 0 && !keep_existing_edges) {
|
||||
CustomData_free(&mesh.edge_data);
|
||||
mesh.edges_num = 0;
|
||||
mesh.tag_loose_edges_none();
|
||||
/* BLI_assert(BKE_mesh_is_valid(&mesh)); */
|
||||
return;
|
||||
}
|
||||
|
||||
BLI_assert(std::all_of(mesh.edges().begin(), mesh.edges().end(), [&](const int2 edge) {
|
||||
return edge.x != edge.y;
|
||||
}));
|
||||
|
||||
/* Parallelization is achieved by having multiple hash tables for different subsets of edges.
|
||||
* Each edge is assigned to one of the hash maps based on the lower bits of a hash value. */
|
||||
const int parallel_maps = calc_edges::get_parallel_maps_count(mesh);
|
||||
@@ -185,53 +242,288 @@ void mesh_calc_edges(Mesh &mesh, bool keep_existing_edges, const bool select_new
|
||||
Array<calc_edges::EdgeMap> edge_maps(parallel_maps);
|
||||
calc_edges::reserve_hash_maps(mesh, keep_existing_edges, edge_maps);
|
||||
|
||||
/* Add all edges. */
|
||||
Array<int> original_edge_maps_prefix_size(edge_maps.size() + 1, 0);
|
||||
if (keep_existing_edges) {
|
||||
calc_edges::add_existing_edges_to_hash_maps(mesh, parallel_mask, edge_maps);
|
||||
calc_edges::edge_map_offsets(edge_maps, original_edge_maps_prefix_size);
|
||||
}
|
||||
const OffsetIndices<int> original_edge_maps_prefix(original_edge_maps_prefix_size.as_span());
|
||||
const int original_unique_edge_num = original_edge_maps_prefix.total_size();
|
||||
const bool original_edges_are_distinct = original_unique_edge_num == mesh.edges_num;
|
||||
|
||||
if (mesh.corners_num == 0 && keep_existing_edges && original_edges_are_distinct) {
|
||||
/* BLI_assert(BKE_mesh_is_valid(&mesh)); */
|
||||
return;
|
||||
}
|
||||
|
||||
calc_edges::add_face_edges_to_hash_maps(mesh, parallel_mask, edge_maps);
|
||||
Array<int> edge_sizes;
|
||||
const OffsetIndices<int> edge_offsets = calc_edges::edge_map_offsets(edge_maps, edge_sizes);
|
||||
const bool no_new_edges = edge_offsets.total_size() == original_unique_edge_num;
|
||||
|
||||
Array<int> edge_sizes(edge_maps.size() + 1);
|
||||
for (const int i : edge_maps.index_range()) {
|
||||
edge_sizes[i] = edge_maps[i].size();
|
||||
}
|
||||
const OffsetIndices<int> edge_offsets = offset_indices::accumulate_counts_to_offsets(edge_sizes);
|
||||
MutableAttributeAccessor dst_attributes = mesh.attributes_for_write();
|
||||
dst_attributes.add<int>(".corner_edge", AttrDomain::Corner, AttributeInitConstruct());
|
||||
MutableSpan<int> corner_edges = mesh.corner_edges_for_write();
|
||||
#ifndef NDEBUG
|
||||
corner_edges.fill(-1);
|
||||
#endif
|
||||
|
||||
/* Create new edges. */
|
||||
MutableAttributeAccessor attributes = mesh.attributes_for_write();
|
||||
attributes.add<int>(".corner_edge", AttrDomain::Corner, AttributeInitConstruct());
|
||||
MutableSpan<int2> new_edges(MEM_calloc_arrayN<int2>(edge_offsets.total_size(), __func__),
|
||||
edge_offsets.total_size());
|
||||
calc_edges::serialize_and_initialize_deduplicated_edges(edge_maps, edge_offsets, new_edges);
|
||||
calc_edges::update_edge_indices_in_face_loops(mesh.faces(),
|
||||
mesh.corner_verts(),
|
||||
edge_maps,
|
||||
parallel_mask,
|
||||
edge_offsets,
|
||||
mesh.corner_edges_for_write());
|
||||
|
||||
Array<int2> original_edges;
|
||||
if (keep_existing_edges && select_new_edges) {
|
||||
original_edges.reinitialize(mesh.edges_num);
|
||||
array_utils::copy(mesh.edges(), original_edges.as_mutable_span());
|
||||
const OffsetIndices<int> faces = mesh.faces();
|
||||
const Span<int2> original_edges = mesh.edges();
|
||||
const Span<int> corner_verts = mesh.corner_verts();
|
||||
if (keep_existing_edges && original_edges_are_distinct && no_new_edges) {
|
||||
/* We need a way to say from caller side if we should generate corner edge attribute even in
|
||||
* that case. TODO: make this optional. */
|
||||
calc_edges::update_edge_indices_in_face_loops(
|
||||
faces, corner_verts, edge_maps, parallel_mask, edge_offsets, corner_edges);
|
||||
BLI_assert(!corner_edges.contains(-1));
|
||||
BLI_assert(BKE_mesh_is_valid(&mesh));
|
||||
return;
|
||||
}
|
||||
|
||||
/* Free old CustomData and assign new one. */
|
||||
CustomData_free(&mesh.edge_data);
|
||||
CustomData_reset(&mesh.edge_data);
|
||||
mesh.edges_num = edge_offsets.total_size();
|
||||
attributes.add<int2>(".edge_verts", AttrDomain::Edge, AttributeInitMoveArray(new_edges.data()));
|
||||
const int result_edges_num = edge_offsets.total_size();
|
||||
|
||||
IndexMaskMemory memory;
|
||||
IndexRange back_range_of_new_edges;
|
||||
IndexMask src_to_dst_mask;
|
||||
|
||||
MutableSpan<int2> edge_verts(MEM_malloc_arrayN<int2>(result_edges_num, AT), result_edges_num);
|
||||
#ifndef NDEBUG
|
||||
edge_verts.fill(int2(-1));
|
||||
#endif
|
||||
|
||||
if (keep_existing_edges) {
|
||||
back_range_of_new_edges = IndexRange(result_edges_num).drop_front(original_unique_edge_num);
|
||||
|
||||
if (original_edges_are_distinct) {
|
||||
src_to_dst_mask = IndexRange(original_unique_edge_num);
|
||||
}
|
||||
else {
|
||||
src_to_dst_mask = calc_edges::mask_first_distinct_edges(original_edges,
|
||||
original_edges.index_range(),
|
||||
edge_maps,
|
||||
parallel_mask,
|
||||
edge_offsets,
|
||||
memory);
|
||||
}
|
||||
BLI_assert(src_to_dst_mask.size() == original_unique_edge_num);
|
||||
|
||||
array_utils::gather(
|
||||
original_edges, src_to_dst_mask, edge_verts.take_front(original_unique_edge_num));
|
||||
|
||||
/* In order to reduce permutations of edge attributes we must provide result edge indices near
|
||||
* to original. */
|
||||
Array<int> edge_map_to_result_index(result_edges_num);
|
||||
#ifndef NDEBUG
|
||||
edge_map_to_result_index.as_mutable_span().fill(-1);
|
||||
#endif
|
||||
|
||||
if (original_edges_are_distinct) {
|
||||
/* TODO: Do we can group edges by .low vertex? Or by hash, but with Span<int> of edges by
|
||||
* group?... */
|
||||
threading::parallel_for_each(edge_maps.index_range(), [&](const int map_i) {
|
||||
int edge_map_iter = 0;
|
||||
for (const int edge_i : IndexRange(mesh.edges_num)) {
|
||||
const int edge_map = calc_edges::edge_to_hash_map_i(original_edges[edge_i],
|
||||
parallel_mask);
|
||||
if (map_i != edge_map) {
|
||||
continue;
|
||||
}
|
||||
edge_map_to_result_index[edge_offsets[edge_map][edge_map_iter]] = edge_i;
|
||||
edge_map_iter++;
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
src_to_dst_mask.foreach_index(
|
||||
GrainSize(1024), [&](const int src_index, const int dst_index) {
|
||||
const OrderedEdge edge = original_edges[src_index];
|
||||
const int map_i = calc_edges::edge_to_hash_map_i(edge, parallel_mask);
|
||||
const int edge_index = edge_maps[map_i].index_of(edge);
|
||||
edge_map_to_result_index[edge_offsets[map_i][edge_index]] = dst_index;
|
||||
});
|
||||
}
|
||||
|
||||
if (!no_new_edges) {
|
||||
BLI_assert(edge_offsets.data().size() == original_edge_maps_prefix.data().size());
|
||||
|
||||
/* TODO: Check if all new edges are range. */
|
||||
const int new_edges_start = original_unique_edge_num;
|
||||
for (const int map_i : edge_maps.index_range()) {
|
||||
const IndexRange map_edges = edge_offsets[map_i];
|
||||
const IndexRange prefix_edges = original_edge_maps_prefix[map_i];
|
||||
const IndexRange new_edges_in_map = map_edges.drop_front(prefix_edges.size());
|
||||
|
||||
const int new_edges_start_pos = map_edges.start() - prefix_edges.start();
|
||||
const int map_new_edges_start = new_edges_start + new_edges_start_pos;
|
||||
array_utils::fill_index_range(
|
||||
edge_map_to_result_index.as_mutable_span().slice(new_edges_in_map),
|
||||
map_new_edges_start);
|
||||
}
|
||||
}
|
||||
|
||||
BLI_assert(!edge_map_to_result_index.as_span().contains(-1));
|
||||
calc_edges::update_edge_indices_in_face_loops(
|
||||
faces, corner_verts, edge_maps, parallel_mask, edge_offsets, corner_edges);
|
||||
array_utils::gather(edge_map_to_result_index.as_span(), corner_edges.as_span(), corner_edges);
|
||||
|
||||
calc_edges::serialize_and_initialize_deduplicated_edges(
|
||||
edge_maps,
|
||||
edge_offsets,
|
||||
original_edge_maps_prefix,
|
||||
edge_verts.drop_front(original_unique_edge_num));
|
||||
}
|
||||
else {
|
||||
if (mesh.edges_num != 0) {
|
||||
const IndexMask original_corner_edges = IndexMask::from_predicate(
|
||||
IndexRange(mesh.edges_num), GrainSize(2048), memory, [&](const int edge_i) {
|
||||
const OrderedEdge edge = original_edges[edge_i];
|
||||
const int map_i = calc_edges::edge_to_hash_map_i(edge, parallel_mask);
|
||||
return edge_maps[map_i].contains(edge);
|
||||
});
|
||||
src_to_dst_mask = calc_edges::mask_first_distinct_edges(
|
||||
original_edges, original_corner_edges, edge_maps, parallel_mask, edge_offsets, memory);
|
||||
|
||||
const int old_corner_edges_num = src_to_dst_mask.size();
|
||||
back_range_of_new_edges = IndexRange(result_edges_num).drop_front(old_corner_edges_num);
|
||||
|
||||
Array<int> edge_map_to_result_index;
|
||||
if (!src_to_dst_mask.is_empty()) {
|
||||
/* TODO: Check if mask is range. */
|
||||
edge_map_to_result_index.reinitialize(result_edges_num);
|
||||
edge_map_to_result_index.as_mutable_span().fill(1);
|
||||
src_to_dst_mask.foreach_index([&](const int original_edge_i) {
|
||||
const OrderedEdge edge = original_edges[original_edge_i];
|
||||
const int edge_map = calc_edges::edge_to_hash_map_i(edge, parallel_mask);
|
||||
const int edge_index = edge_maps[edge_map].index_of(edge);
|
||||
edge_map_to_result_index[edge_offsets[edge_map][edge_index]] = 0;
|
||||
});
|
||||
|
||||
offset_indices::accumulate_counts_to_offsets(edge_map_to_result_index.as_mutable_span(),
|
||||
old_corner_edges_num);
|
||||
|
||||
src_to_dst_mask.foreach_index([&](const int original_edge_i, const int dst_edge_i) {
|
||||
const OrderedEdge edge = original_edges[original_edge_i];
|
||||
const int edge_map = calc_edges::edge_to_hash_map_i(edge, parallel_mask);
|
||||
const int edge_index = edge_maps[edge_map].index_of(edge);
|
||||
edge_map_to_result_index[edge_offsets[edge_map][edge_index]] = dst_edge_i;
|
||||
});
|
||||
|
||||
array_utils::gather(
|
||||
original_edges, src_to_dst_mask, edge_verts.take_front(old_corner_edges_num));
|
||||
|
||||
threading::parallel_for_each(edge_maps, [&](calc_edges::EdgeMap &edge_map) {
|
||||
const int task_index = &edge_map - edge_maps.data();
|
||||
if (edge_offsets[task_index].is_empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
array_utils::scatter<int2>(
|
||||
edge_map.as_span().cast<int2>(),
|
||||
edge_map_to_result_index.as_span().slice(edge_offsets[task_index]),
|
||||
edge_verts.slice(edge_offsets[task_index]));
|
||||
});
|
||||
|
||||
calc_edges::update_edge_indices_in_face_loops(
|
||||
faces, corner_verts, edge_maps, parallel_mask, edge_offsets, corner_edges);
|
||||
|
||||
array_utils::gather(
|
||||
edge_map_to_result_index.as_span(), corner_edges.as_span(), corner_edges);
|
||||
}
|
||||
else {
|
||||
calc_edges::update_edge_indices_in_face_loops(
|
||||
faces, corner_verts, edge_maps, parallel_mask, edge_offsets, corner_edges);
|
||||
calc_edges::serialize_and_initialize_deduplicated_edges(
|
||||
edge_maps, edge_offsets, original_edge_maps_prefix, edge_verts);
|
||||
}
|
||||
}
|
||||
else {
|
||||
back_range_of_new_edges = IndexRange(result_edges_num);
|
||||
BLI_assert(original_edge_maps_prefix.total_size() == 0);
|
||||
calc_edges::update_edge_indices_in_face_loops(
|
||||
faces, corner_verts, edge_maps, parallel_mask, edge_offsets, corner_edges);
|
||||
calc_edges::serialize_and_initialize_deduplicated_edges(
|
||||
edge_maps, edge_offsets, original_edge_maps_prefix, edge_verts);
|
||||
}
|
||||
}
|
||||
|
||||
BLI_assert(std::all_of(
|
||||
edge_verts.begin(), edge_verts.end(), [&](const int2 edge) { return edge.x != edge.y; }));
|
||||
|
||||
BLI_assert(!corner_edges.contains(-1));
|
||||
BLI_assert(!edge_verts.contains(int2(-1)));
|
||||
|
||||
BLI_assert(src_to_dst_mask.size() + back_range_of_new_edges.size() == result_edges_num);
|
||||
BLI_assert(back_range_of_new_edges.one_after_last() == result_edges_num);
|
||||
|
||||
Vector<std::string> attributes_to_drop;
|
||||
/* TODO: Need ::all_pass() on #attribute_filter to know if this loop can be skipped. */
|
||||
mesh.attributes().foreach_attribute([&](const AttributeIter &attribute) {
|
||||
if (attribute.data_type == AttrType::String) {
|
||||
return;
|
||||
}
|
||||
if (attribute.domain != AttrDomain::Edge) {
|
||||
return;
|
||||
}
|
||||
if (!attribute_filter.allow_skip(attribute.name)) {
|
||||
return;
|
||||
}
|
||||
attributes_to_drop.append(attribute.name);
|
||||
});
|
||||
|
||||
for (const StringRef attribute : attributes_to_drop) {
|
||||
dst_attributes.remove(attribute);
|
||||
}
|
||||
|
||||
CustomData_free_layer_named(&mesh.edge_data, ".edge_verts");
|
||||
for (CustomDataLayer &layer : MutableSpan(mesh.edge_data.layers, mesh.edge_data.totlayer)) {
|
||||
const void *src_data = layer.data;
|
||||
const size_t elem_size = CustomData_sizeof(eCustomDataType(layer.type));
|
||||
|
||||
void *dst_data = MEM_malloc_arrayN(result_edges_num, elem_size, AT);
|
||||
if (src_data != nullptr) {
|
||||
if (layer.type == CD_ORIGINDEX) {
|
||||
const Span src(static_cast<const int *>(src_data), mesh.edges_num);
|
||||
MutableSpan dst(static_cast<int *>(dst_data), result_edges_num);
|
||||
array_utils::gather(src, src_to_dst_mask, dst.take_front(src_to_dst_mask.size()));
|
||||
dst.slice(back_range_of_new_edges).fill(-1);
|
||||
}
|
||||
else {
|
||||
const CPPType *type = custom_data_type_to_cpp_type(eCustomDataType(layer.type));
|
||||
BLI_assert(type != nullptr);
|
||||
const GSpan src(type, src_data, mesh.edges_num);
|
||||
GMutableSpan dst(type, dst_data, result_edges_num);
|
||||
array_utils::gather(src, src_to_dst_mask, dst.take_front(src_to_dst_mask.size()));
|
||||
type->fill_assign_n(type->default_value(),
|
||||
dst.slice(back_range_of_new_edges).data(),
|
||||
dst.slice(back_range_of_new_edges).size());
|
||||
}
|
||||
layer.sharing_info->remove_user_and_delete_if_last();
|
||||
}
|
||||
|
||||
layer.data = dst_data;
|
||||
layer.sharing_info = implicit_sharing::info_for_mem_free(dst_data);
|
||||
}
|
||||
|
||||
mesh.edges_num = result_edges_num;
|
||||
|
||||
dst_attributes.add<int2>(
|
||||
".edge_verts", AttrDomain::Edge, AttributeInitMoveArray(edge_verts.data()));
|
||||
|
||||
if (select_new_edges) {
|
||||
MutableAttributeAccessor attributes = mesh.attributes_for_write();
|
||||
SpanAttributeWriter<bool> select_edge = attributes.lookup_or_add_for_write_span<bool>(
|
||||
".select_edge", AttrDomain::Edge);
|
||||
if (select_edge) {
|
||||
select_edge.span.fill(true);
|
||||
if (!original_edges.is_empty()) {
|
||||
calc_edges::deselect_known_edges(
|
||||
edge_offsets, edge_maps, parallel_mask, original_edges, select_edge.span);
|
||||
}
|
||||
dst_attributes.remove(".select_edge");
|
||||
if (ELEM(back_range_of_new_edges.size(), 0, mesh.edges_num)) {
|
||||
const bool fill_value = back_range_of_new_edges.size() == mesh.edges_num;
|
||||
dst_attributes.add<int2>(
|
||||
".select_edge",
|
||||
AttrDomain::Edge,
|
||||
AttributeInitVArray(VArray<bool>::from_single(fill_value, mesh.edges_num)));
|
||||
}
|
||||
else {
|
||||
SpanAttributeWriter<bool> select_edge = dst_attributes.lookup_or_add_for_write_span<bool>(
|
||||
".select_edge", AttrDomain::Edge);
|
||||
select_edge.span.drop_back(back_range_of_new_edges.size()).fill(false);
|
||||
select_edge.span.take_back(back_range_of_new_edges.size()).fill(true);
|
||||
select_edge.finish();
|
||||
}
|
||||
}
|
||||
@@ -243,6 +535,13 @@ void mesh_calc_edges(Mesh &mesh, bool keep_existing_edges, const bool select_new
|
||||
|
||||
/* Explicitly clear edge maps, because that way it can be parallelized. */
|
||||
calc_edges::clear_hash_tables(edge_maps);
|
||||
|
||||
/* BLI_assert(BKE_mesh_is_valid(&mesh)); */
|
||||
}
|
||||
|
||||
void mesh_calc_edges(Mesh &mesh, bool keep_existing_edges, const bool select_new_edges)
|
||||
{
|
||||
mesh_calc_edges(mesh, keep_existing_edges, select_new_edges, AttributeFilter::default_filter());
|
||||
}
|
||||
|
||||
} // namespace blender::bke
|
||||
|
||||
Reference in New Issue
Block a user