/* SPDX-FileCopyrightText: 2023 Blender Authors * * SPDX-License-Identifier: GPL-2.0-or-later */ /** \file * \ingroup bke * * Functions for accessing mesh connectivity data. * eg: faces connected to verts, UVs connected to verts. */ #include "MEM_guardedalloc.h" #include "atomic_ops.h" #include "BLI_array.hh" #include "BLI_bitmap.h" #include "BLI_buffer.h" #include "BLI_function_ref.hh" #include "BLI_math_geom.h" #include "BLI_math_vector.h" #include "BLI_task.hh" #include "BLI_utildefines.h" #include "BKE_customdata.hh" #include "BKE_mesh.hh" #include "BKE_mesh_mapping.hh" #include "BLI_memarena.h" #include "BLI_strict_flags.h" /* Keep last. */ /* -------------------------------------------------------------------- */ /** \name Mesh Connectivity Mapping * \{ */ UvVertMap *BKE_mesh_uv_vert_map_create(const blender::OffsetIndices faces, const bool *hide_poly, const bool *select_poly, const int *corner_verts, const float (*mloopuv)[2], uint totvert, const float limit[2], const bool selected, const bool use_winding) { /* NOTE: N-gon version WIP, based on #BM_uv_vert_map_create. */ UvVertMap *vmap; UvMapVert *buf; int i, totuv, nverts; BLI_buffer_declare_static(blender::float2, tf_uv_buf, BLI_BUFFER_NOP, 32); totuv = 0; /* generate UvMapVert array */ for (const int64_t a : faces.index_range()) { if (!selected || (!(hide_poly && hide_poly[a]) && (select_poly && select_poly[a]))) { totuv += int(faces[a].size()); } } if (totuv == 0) { return nullptr; } vmap = (UvVertMap *)MEM_callocN(sizeof(*vmap), "UvVertMap"); buf = vmap->buf = (UvMapVert *)MEM_callocN(sizeof(*vmap->buf) * size_t(totuv), "UvMapVert"); vmap->vert = (UvMapVert **)MEM_callocN(sizeof(*vmap->vert) * totvert, "UvMapVert*"); if (!vmap->vert || !vmap->buf) { BKE_mesh_uv_vert_map_free(vmap); return nullptr; } bool *winding = nullptr; if (use_winding) { winding = static_cast( MEM_calloc_arrayN(sizeof(*winding), size_t(faces.size()), "winding")); } for (const int64_t a : faces.index_range()) { const blender::IndexRange face = faces[a]; if (!selected || (!(hide_poly && hide_poly[a]) && (select_poly && select_poly[a]))) { float(*tf_uv)[2] = nullptr; if (use_winding) { tf_uv = (float(*)[2])BLI_buffer_reinit_data( &tf_uv_buf, blender::float2, size_t(face.size())); } nverts = int(face.size()); for (i = 0; i < nverts; i++) { buf->loop_of_face_index = ushort(i); buf->face_index = uint(a); buf->separate = false; buf->next = vmap->vert[corner_verts[face[i]]]; vmap->vert[corner_verts[face[i]]] = buf; if (use_winding) { copy_v2_v2(tf_uv[i], mloopuv[face[i]]); } buf++; } if (use_winding) { winding[a] = cross_poly_v2(tf_uv, uint(nverts)) < 0; } } } /* sort individual uvs for each vert */ for (uint a = 0; a < totvert; a++) { UvMapVert *newvlist = nullptr, *vlist = vmap->vert[a]; UvMapVert *iterv, *v, *lastv, *next; const float *uv, *uv2; float uvdiff[2]; while (vlist) { v = vlist; vlist = vlist->next; v->next = newvlist; newvlist = v; uv = mloopuv[faces[v->face_index].start() + v->loop_of_face_index]; lastv = nullptr; iterv = vlist; while (iterv) { next = iterv->next; uv2 = mloopuv[faces[iterv->face_index].start() + iterv->loop_of_face_index]; sub_v2_v2v2(uvdiff, uv2, uv); if (fabsf(uv[0] - uv2[0]) < limit[0] && fabsf(uv[1] - uv2[1]) < limit[1] && (!use_winding || winding[iterv->face_index] == winding[v->face_index])) { if (lastv) { lastv->next = next; } else { vlist = next; } iterv->next = newvlist; newvlist = iterv; } else { lastv = iterv; } iterv = next; } newvlist->separate = true; } vmap->vert[a] = newvlist; } if (use_winding) { MEM_freeN(winding); } BLI_buffer_free(&tf_uv_buf); return vmap; } UvMapVert *BKE_mesh_uv_vert_map_get_vert(UvVertMap *vmap, uint v) { return vmap->vert[v]; } void BKE_mesh_uv_vert_map_free(UvVertMap *vmap) { if (vmap) { if (vmap->vert) { MEM_freeN(vmap->vert); } if (vmap->buf) { MEM_freeN(vmap->buf); } MEM_freeN(vmap); } } void BKE_mesh_vert_corner_tri_map_create(MeshElemMap **r_map, int **r_mem, const int totvert, const blender::int3 *corner_tris, const int tris_num, const int *corner_verts, const int /*corners_num*/) { MeshElemMap *map = MEM_cnew_array(size_t(totvert), __func__); int *indices = static_cast(MEM_mallocN(sizeof(int) * size_t(tris_num) * 3, __func__)); int *index_step; int i; /* count face users */ for (i = 0; i < tris_num; i++) { for (int j = 3; j--;) { map[corner_verts[corner_tris[i][j]]].count++; } } /* create offsets */ index_step = indices; for (i = 0; i < totvert; i++) { map[i].indices = index_step; index_step += map[i].count; /* re-count, using this as an index below */ map[i].count = 0; } /* assign corner_tri-edge users */ for (i = 0; i < tris_num; i++) { for (int j = 3; j--;) { MeshElemMap *map_ele = &map[corner_verts[corner_tris[i][j]]]; map_ele->indices[map_ele->count++] = i; } } *r_map = map; *r_mem = indices; } void BKE_mesh_origindex_map_create(MeshElemMap **r_map, int **r_mem, const int totsource, const int *final_origindex, const int totfinal) { MeshElemMap *map = MEM_cnew_array(size_t(totsource), __func__); int *indices = static_cast(MEM_mallocN(sizeof(int) * size_t(totfinal), __func__)); int *index_step; int i; /* count face users */ for (i = 0; i < totfinal; i++) { if (final_origindex[i] != ORIGINDEX_NONE) { BLI_assert(final_origindex[i] < totsource); map[final_origindex[i]].count++; } } /* create offsets */ index_step = indices; for (i = 0; i < totsource; i++) { map[i].indices = index_step; index_step += map[i].count; /* re-count, using this as an index below */ map[i].count = 0; } /* Assign face-tessellation users. */ for (i = 0; i < totfinal; i++) { if (final_origindex[i] != ORIGINDEX_NONE) { MeshElemMap *map_ele = &map[final_origindex[i]]; map_ele->indices[map_ele->count++] = i; } } *r_map = map; *r_mem = indices; } void BKE_mesh_origindex_map_create_corner_tri(MeshElemMap **r_map, int **r_mem, const blender::OffsetIndices faces, const int *corner_tri_faces, const int corner_tris_num) { MeshElemMap *map = MEM_cnew_array(size_t(faces.size()), __func__); int *indices = static_cast(MEM_mallocN(sizeof(int) * size_t(corner_tris_num), __func__)); int *index_step; /* create offsets */ index_step = indices; for (const int64_t i : faces.index_range()) { map[i].indices = index_step; index_step += blender::bke::mesh::face_triangles_num(int(faces[i].size())); } /* Assign face-tessellation users. */ for (int i = 0; i < corner_tris_num; i++) { MeshElemMap *map_ele = &map[corner_tri_faces[i]]; map_ele->indices[map_ele->count++] = i; } *r_map = map; *r_mem = indices; } namespace blender::bke::mesh { static Array create_reverse_offsets(const Span indices, const int items_num) { Array offsets(items_num + 1, 0); offset_indices::build_reverse_offsets(indices, offsets); return offsets; } static void sort_small_groups(const OffsetIndices groups, const int grain_size, MutableSpan indices) { threading::parallel_for(groups.index_range(), grain_size, [&](const IndexRange range) { for (const int64_t index : range) { MutableSpan group = indices.slice(groups[index]); std::sort(group.begin(), group.end()); } }); } static Array reverse_indices_in_groups(const Span group_indices, const OffsetIndices offsets) { if (group_indices.is_empty()) { return {}; } BLI_assert(*std::max_element(group_indices.begin(), group_indices.end()) < offsets.size()); BLI_assert(*std::min_element(group_indices.begin(), group_indices.end()) >= 0); /* `counts` keeps track of how many elements have been added to each group, and is incremented * atomically by many threads in parallel. `calloc` can be measurably faster than a parallel fill * of zero. Alternatively the offsets could be copied and incremented directly, but the cost of * the copy is slightly higher than the cost of `calloc`. */ int *counts = MEM_cnew_array(size_t(offsets.size()), __func__); BLI_SCOPED_DEFER([&]() { MEM_freeN(counts); }) Array results(group_indices.size()); threading::parallel_for(group_indices.index_range(), 1024, [&](const IndexRange range) { for (const int64_t i : range) { const int group_index = group_indices[i]; const int index_in_group = atomic_fetch_and_add_int32(&counts[group_index], 1); results[offsets[group_index][index_in_group]] = int(i); } }); sort_small_groups(offsets, 1024, results); return results; } /* A version of #reverse_indices_in_groups that stores face indices instead of corner indices. */ static void reverse_group_indices_in_groups(const OffsetIndices groups, const Span group_to_elem, const OffsetIndices offsets, MutableSpan results) { int *counts = MEM_cnew_array(size_t(offsets.size()), __func__); BLI_SCOPED_DEFER([&]() { MEM_freeN(counts); }) threading::parallel_for(groups.index_range(), 1024, [&](const IndexRange range) { for (const int64_t face : range) { for (const int elem : group_to_elem.slice(groups[face])) { const int index_in_group = atomic_fetch_and_add_int32(&counts[elem], 1); results[offsets[elem][index_in_group]] = int(face); } } }); sort_small_groups(offsets, 1024, results); } static GroupedSpan gather_groups(const Span group_indices, const int groups_num, Array &r_offsets, Array &r_indices) { r_offsets = create_reverse_offsets(group_indices, groups_num); r_indices = reverse_indices_in_groups(group_indices, r_offsets.as_span()); return {OffsetIndices(r_offsets), r_indices}; } Array build_corner_to_face_map(const OffsetIndices faces) { Array map(faces.total_size()); offset_indices::build_reverse_map(faces, map); return map; } GroupedSpan build_vert_to_edge_map(const Span edges, const int verts_num, Array &r_offsets, Array &r_indices) { r_offsets = create_reverse_offsets(edges.cast(), verts_num); const OffsetIndices offsets(r_offsets); r_indices.reinitialize(offsets.total_size()); /* Version of #reverse_indices_in_groups that accounts for storing two indices for each edge. */ int *counts = MEM_cnew_array(size_t(offsets.size()), __func__); BLI_SCOPED_DEFER([&]() { MEM_freeN(counts); }) threading::parallel_for(edges.index_range(), 1024, [&](const IndexRange range) { for (const int64_t edge : range) { for (const int vert : {edges[edge][0], edges[edge][1]}) { const int index_in_group = atomic_fetch_and_add_int32(&counts[vert], 1); r_indices[offsets[vert][index_in_group]] = int(edge); } } }); sort_small_groups(offsets, 1024, r_indices); return {offsets, r_indices}; } void build_vert_to_face_indices(const OffsetIndices faces, const Span corner_verts, const OffsetIndices offsets, MutableSpan face_indices) { reverse_group_indices_in_groups(faces, corner_verts, offsets, face_indices); } GroupedSpan build_vert_to_face_map(const OffsetIndices faces, const Span corner_verts, const int verts_num, Array &r_offsets, Array &r_indices) { r_offsets = create_reverse_offsets(corner_verts, verts_num); r_indices.reinitialize(r_offsets.last()); build_vert_to_face_indices(faces, corner_verts, OffsetIndices(r_offsets), r_indices); return {OffsetIndices(r_offsets), r_indices}; } Array build_vert_to_corner_indices(const Span corner_verts, const OffsetIndices offsets) { return reverse_indices_in_groups(corner_verts, offsets); } GroupedSpan build_vert_to_corner_map(const Span corner_verts, const int verts_num, Array &r_offsets, Array &r_indices) { return gather_groups(corner_verts, verts_num, r_offsets, r_indices); } GroupedSpan build_edge_to_corner_map(const Span corner_edges, const int edges_num, Array &r_offsets, Array &r_indices) { return gather_groups(corner_edges, edges_num, r_offsets, r_indices); } GroupedSpan build_edge_to_face_map(const OffsetIndices faces, const Span corner_edges, const int edges_num, Array &r_offsets, Array &r_indices) { r_offsets = create_reverse_offsets(corner_edges, edges_num); r_indices.reinitialize(r_offsets.last()); reverse_group_indices_in_groups(faces, corner_edges, OffsetIndices(r_offsets), r_indices); return {OffsetIndices(r_offsets), r_indices}; } } // namespace blender::bke::mesh /** \} */ /* -------------------------------------------------------------------- */ /** \name Mesh loops/face islands. * Used currently for UVs and 'smooth groups'. * \{ */ /** * Callback deciding whether the given face/loop/edge define an island boundary or not. */ using MeshRemap_CheckIslandBoundary = blender::FunctionRef edge_face_map_elem)>; static void face_edge_loop_islands_calc(const int totedge, const blender::OffsetIndices faces, const blender::Span corner_edges, blender::GroupedSpan edge_face_map, const bool use_bitflags, MeshRemap_CheckIslandBoundary edge_boundary_check, int **r_face_groups, int *r_totgroup, BLI_bitmap **r_edge_borders, int *r_totedgeborder) { int *face_groups; int *face_stack; BLI_bitmap *edge_borders = nullptr; int num_edgeborders = 0; int face_prev = 0; const int temp_face_group_id = 3; /* Placeholder value. */ /* Group we could not find any available bit, will be reset to 0 at end. */ const int face_group_id_overflowed = 5; int tot_group = 0; bool group_id_overflow = false; if (faces.is_empty()) { *r_totgroup = 0; *r_face_groups = nullptr; if (r_edge_borders) { *r_edge_borders = nullptr; *r_totedgeborder = 0; } return; } if (r_edge_borders) { edge_borders = BLI_BITMAP_NEW(totedge, __func__); *r_totedgeborder = 0; } blender::Array edge_to_face_src_offsets; blender::Array edge_to_face_src_indices; if (edge_face_map.is_empty()) { edge_face_map = blender::bke::mesh::build_edge_to_face_map( faces, corner_edges, totedge, edge_to_face_src_offsets, edge_to_face_src_indices); } face_groups = static_cast(MEM_callocN(sizeof(int) * size_t(faces.size()), __func__)); face_stack = static_cast(MEM_mallocN(sizeof(int) * size_t(faces.size()), __func__)); while (true) { int face; int bit_face_group_mask = 0; int face_group_id; int ps_curr_idx = 0, ps_end_idx = 0; /* stack indices */ for (face = face_prev; face < int(faces.size()); face++) { if (face_groups[face] == 0) { break; } } if (face == int(faces.size())) { /* all done */ break; } face_group_id = use_bitflags ? temp_face_group_id : ++tot_group; /* start searching from here next time */ face_prev = face + 1; face_groups[face] = face_group_id; face_stack[ps_end_idx++] = face; while (ps_curr_idx != ps_end_idx) { face = face_stack[ps_curr_idx++]; BLI_assert(face_groups[face] == face_group_id); for (const int64_t loop : faces[face]) { const int edge = corner_edges[loop]; /* loop over face users */ const blender::Span map_ele = edge_face_map[edge]; const int *p = map_ele.data(); int i = int(map_ele.size()); if (!edge_boundary_check(face, int(loop), edge, i, map_ele)) { for (; i--; p++) { /* if we meet other non initialized its a bug */ BLI_assert(ELEM(face_groups[*p], 0, face_group_id)); if (face_groups[*p] == 0) { face_groups[*p] = face_group_id; face_stack[ps_end_idx++] = *p; } } } else { if (edge_borders && !BLI_BITMAP_TEST(edge_borders, edge)) { BLI_BITMAP_ENABLE(edge_borders, edge); num_edgeborders++; } if (use_bitflags) { /* Find contiguous smooth groups already assigned, * these are the values we can't reuse! */ for (; i--; p++) { int bit = face_groups[*p]; if (!ELEM(bit, 0, face_group_id, face_group_id_overflowed) && !(bit_face_group_mask & bit)) { bit_face_group_mask |= bit; } } } } } } /* And now, we have all our face from current group in face_stack * (from 0 to (ps_end_idx - 1)), * as well as all smoothgroups bits we can't use in bit_face_group_mask. */ if (use_bitflags) { int i, *p, gid_bit = 0; face_group_id = 1; /* Find first bit available! */ for (; (face_group_id & bit_face_group_mask) && (gid_bit < 32); gid_bit++) { face_group_id <<= 1; /* will 'overflow' on last possible iteration. */ } if (UNLIKELY(gid_bit > 31)) { /* All bits used in contiguous smooth groups, we can't do much! * NOTE: this is *very* unlikely - theoretically, four groups are enough, * I don't think we can reach this goal with such a simple algorithm, * but I don't think either we'll never need all 32 groups! */ printf( "Warning, could not find an available id for current smooth group, faces will me " "marked " "as out of any smooth group...\n"); /* Can't use 0, will have to set them to this value later. */ face_group_id = face_group_id_overflowed; group_id_overflow = true; } if (gid_bit > tot_group) { tot_group = gid_bit; } /* And assign the final smooth group id to that face group! */ for (i = ps_end_idx, p = face_stack; i--; p++) { face_groups[*p] = face_group_id; } } } if (use_bitflags) { /* used bits are zero-based. */ tot_group++; } if (UNLIKELY(group_id_overflow)) { int i = int(faces.size()), *gid = face_groups; for (; i--; gid++) { if (*gid == face_group_id_overflowed) { *gid = 0; } } /* Using 0 as group id adds one more group! */ tot_group++; } MEM_freeN(face_stack); *r_totgroup = tot_group; *r_face_groups = face_groups; if (r_edge_borders) { *r_edge_borders = edge_borders; *r_totedgeborder = num_edgeborders; } } int *BKE_mesh_calc_smoothgroups(int edges_num, const blender::OffsetIndices faces, const blender::Span corner_edges, const blender::Span sharp_edges, const blender::Span sharp_faces, int *r_totgroup, bool use_bitflags) { int *face_groups = nullptr; auto face_is_smooth = [&](const int i) { return sharp_faces.is_empty() || !sharp_faces[i]; }; auto face_is_island_boundary_smooth = [&](const int face_index, const int /*loop_index*/, const int edge_index, const int edge_user_count, const blender::Span edge_face_map_elem) { /* Edge is sharp if one of its faces is flat, or edge itself is sharp, * or edge is not used by exactly two faces. */ if (face_is_smooth(face_index) && !(!sharp_edges.is_empty() && sharp_edges[edge_index]) && (edge_user_count == 2)) { /* In that case, edge appears to be smooth, but we need to check its other face too. */ const int other_face_index = (face_index == edge_face_map_elem[0]) ? edge_face_map_elem[1] : edge_face_map_elem[0]; return !face_is_smooth(other_face_index); } return true; }; face_edge_loop_islands_calc(edges_num, faces, corner_edges, {}, use_bitflags, face_is_island_boundary_smooth, &face_groups, r_totgroup, nullptr, nullptr); return face_groups; } #define MISLAND_DEFAULT_BUFSIZE 64 void BKE_mesh_loop_islands_init(MeshIslandStore *island_store, const short item_type, const int items_num, const short island_type, const short innercut_type) { MemArena *mem = island_store->mem; if (mem == nullptr) { mem = BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE, __func__); island_store->mem = mem; } /* else memarena should be cleared */ BLI_assert( ELEM(item_type, MISLAND_TYPE_VERT, MISLAND_TYPE_EDGE, MISLAND_TYPE_POLY, MISLAND_TYPE_LOOP)); BLI_assert(ELEM( island_type, MISLAND_TYPE_VERT, MISLAND_TYPE_EDGE, MISLAND_TYPE_POLY, MISLAND_TYPE_LOOP)); island_store->item_type = item_type; island_store->items_to_islands_num = items_num; island_store->items_to_islands = static_cast( BLI_memarena_alloc(mem, sizeof(*island_store->items_to_islands) * size_t(items_num))); island_store->island_type = island_type; island_store->islands_num_alloc = MISLAND_DEFAULT_BUFSIZE; island_store->islands = static_cast( BLI_memarena_alloc(mem, sizeof(*island_store->islands) * island_store->islands_num_alloc)); island_store->innercut_type = innercut_type; island_store->innercuts = static_cast( BLI_memarena_alloc(mem, sizeof(*island_store->innercuts) * island_store->islands_num_alloc)); } void BKE_mesh_loop_islands_clear(MeshIslandStore *island_store) { island_store->item_type = MISLAND_TYPE_NONE; island_store->items_to_islands_num = 0; island_store->items_to_islands = nullptr; island_store->island_type = MISLAND_TYPE_NONE; island_store->islands_num = 0; island_store->islands = nullptr; island_store->innercut_type = MISLAND_TYPE_NONE; island_store->innercuts = nullptr; if (island_store->mem) { BLI_memarena_clear(island_store->mem); } island_store->islands_num_alloc = 0; } void BKE_mesh_loop_islands_free(MeshIslandStore *island_store) { if (island_store->mem) { BLI_memarena_free(island_store->mem); island_store->mem = nullptr; } } void BKE_mesh_loop_islands_add(MeshIslandStore *island_store, const int item_num, const int *items_indices, const int num_island_items, int *island_item_indices, const int num_innercut_items, int *innercut_item_indices) { MemArena *mem = island_store->mem; MeshElemMap *isld, *innrcut; const int curr_island_idx = island_store->islands_num++; const size_t curr_num_islands = size_t(island_store->islands_num); int i = item_num; while (i--) { island_store->items_to_islands[items_indices[i]] = curr_island_idx; } if (UNLIKELY(curr_num_islands > island_store->islands_num_alloc)) { MeshElemMap **islds, **innrcuts; island_store->islands_num_alloc *= 2; islds = static_cast( BLI_memarena_alloc(mem, sizeof(*islds) * island_store->islands_num_alloc)); memcpy(islds, island_store->islands, sizeof(*islds) * (curr_num_islands - 1)); island_store->islands = islds; innrcuts = static_cast( BLI_memarena_alloc(mem, sizeof(*innrcuts) * island_store->islands_num_alloc)); memcpy(innrcuts, island_store->innercuts, sizeof(*innrcuts) * (curr_num_islands - 1)); island_store->innercuts = innrcuts; } island_store->islands[curr_island_idx] = isld = static_cast( BLI_memarena_alloc(mem, sizeof(*isld))); isld->count = num_island_items; isld->indices = static_cast( BLI_memarena_alloc(mem, sizeof(*isld->indices) * size_t(num_island_items))); memcpy(isld->indices, island_item_indices, sizeof(*isld->indices) * size_t(num_island_items)); island_store->innercuts[curr_island_idx] = innrcut = static_cast( BLI_memarena_alloc(mem, sizeof(*innrcut))); innrcut->count = num_innercut_items; innrcut->indices = static_cast( BLI_memarena_alloc(mem, sizeof(*innrcut->indices) * size_t(num_innercut_items))); memcpy(innrcut->indices, innercut_item_indices, sizeof(*innrcut->indices) * size_t(num_innercut_items)); } static bool mesh_calc_islands_loop_face_uv(const int totedge, const bool *uv_seams, const blender::OffsetIndices faces, const int *corner_verts, const int *corner_edges, const int corners_num, const float (*luvs)[2], MeshIslandStore *r_island_store) { using namespace blender; int *face_groups = nullptr; int num_face_groups; int *face_indices; int *loop_indices; int num_pidx, num_lidx; /* Those are used to detect 'inner cuts', i.e. edges that are borders, * and yet have two or more faces of a same group using them * (typical case: seam used to unwrap properly a cylinder). */ BLI_bitmap *edge_borders = nullptr; int num_edge_borders = 0; char *edge_border_count = nullptr; int *edge_innercut_indices = nullptr; int num_einnercuts = 0; int grp_idx; BKE_mesh_loop_islands_clear(r_island_store); BKE_mesh_loop_islands_init( r_island_store, MISLAND_TYPE_LOOP, corners_num, MISLAND_TYPE_POLY, MISLAND_TYPE_EDGE); Array edge_to_face_offsets; Array edge_to_face_indices; const GroupedSpan edge_to_face_map = bke::mesh::build_edge_to_face_map( faces, {corner_edges, corners_num}, totedge, edge_to_face_offsets, edge_to_face_indices); Array edge_to_loop_offsets; Array edge_to_loop_indices; GroupedSpan edge_to_loop_map; if (luvs) { edge_to_loop_map = bke::mesh::build_edge_to_corner_map( {corner_edges, corners_num}, totedge, edge_to_loop_offsets, edge_to_loop_indices); } /* TODO: I'm not sure edge seam flag is enough to define UV islands? * Maybe we should also consider UV-maps values * themselves (i.e. different UV-edges for a same mesh-edge => boundary edge too?). * Would make things much more complex though, * and each UVMap would then need its own mesh mapping, not sure we want that at all! */ auto mesh_check_island_boundary_uv = [&](const int /*face_index*/, const int loop_index, const int edge_index, const int /*edge_user_count*/, const Span /*edge_face_map_elem*/) -> bool { if (luvs) { const Span edge_to_loops = edge_to_loop_map[corner_edges[loop_index]]; BLI_assert(edge_to_loops.size() >= 2 && (edge_to_loops.size() % 2) == 0); const int v1 = corner_verts[edge_to_loops[0]]; const int v2 = corner_verts[edge_to_loops[1]]; const float *uvco_v1 = luvs[edge_to_loops[0]]; const float *uvco_v2 = luvs[edge_to_loops[1]]; for (int i = 2; i < edge_to_loops.size(); i += 2) { if (corner_verts[edge_to_loops[i]] == v1) { if (!equals_v2v2(uvco_v1, luvs[edge_to_loops[i]]) || !equals_v2v2(uvco_v2, luvs[edge_to_loops[i + 1]])) { return true; } } else { BLI_assert(corner_verts[edge_to_loops[i]] == v2); UNUSED_VARS_NDEBUG(v2); if (!equals_v2v2(uvco_v2, luvs[edge_to_loops[i]]) || !equals_v2v2(uvco_v1, luvs[edge_to_loops[i + 1]])) { return true; } } } return false; } /* Edge is UV boundary if tagged as seam. */ return uv_seams && uv_seams[edge_index]; }; face_edge_loop_islands_calc(totedge, faces, {corner_edges, corners_num}, edge_to_face_map, false, mesh_check_island_boundary_uv, &face_groups, &num_face_groups, &edge_borders, &num_edge_borders); if (!num_face_groups) { if (edge_borders) { MEM_freeN(edge_borders); } return false; } if (num_edge_borders) { edge_border_count = static_cast( MEM_mallocN(sizeof(*edge_border_count) * size_t(totedge), __func__)); edge_innercut_indices = static_cast( MEM_mallocN(sizeof(*edge_innercut_indices) * size_t(num_edge_borders), __func__)); } face_indices = static_cast( MEM_mallocN(sizeof(*face_indices) * size_t(faces.size()), __func__)); loop_indices = static_cast( MEM_mallocN(sizeof(*loop_indices) * size_t(corners_num), __func__)); /* NOTE: here we ignore '0' invalid group - this should *never* happen in this case anyway? */ for (grp_idx = 1; grp_idx <= num_face_groups; grp_idx++) { num_pidx = num_lidx = 0; if (num_edge_borders) { num_einnercuts = 0; memset(edge_border_count, 0, sizeof(*edge_border_count) * size_t(totedge)); } for (const int64_t p_idx : faces.index_range()) { if (face_groups[p_idx] != grp_idx) { continue; } face_indices[num_pidx++] = int(p_idx); for (const int64_t corner : faces[p_idx]) { const int edge_i = corner_edges[corner]; loop_indices[num_lidx++] = int(corner); if (num_edge_borders && BLI_BITMAP_TEST(edge_borders, edge_i) && (edge_border_count[edge_i] < 2)) { edge_border_count[edge_i]++; if (edge_border_count[edge_i] == 2) { edge_innercut_indices[num_einnercuts++] = edge_i; } } } } BKE_mesh_loop_islands_add(r_island_store, num_lidx, loop_indices, num_pidx, face_indices, num_einnercuts, edge_innercut_indices); } MEM_freeN(face_indices); MEM_freeN(loop_indices); MEM_freeN(face_groups); if (edge_borders) { MEM_freeN(edge_borders); } if (num_edge_borders) { MEM_freeN(edge_border_count); MEM_freeN(edge_innercut_indices); } return true; } bool BKE_mesh_calc_islands_loop_face_edgeseam(const float (*vert_positions)[3], const int totvert, const blender::int2 *edges, const int totedge, const bool *uv_seams, const blender::OffsetIndices faces, const int *corner_verts, const int *corner_edges, const int corners_num, MeshIslandStore *r_island_store) { UNUSED_VARS(vert_positions, totvert, edges); return mesh_calc_islands_loop_face_uv( totedge, uv_seams, faces, corner_verts, corner_edges, corners_num, nullptr, r_island_store); } bool BKE_mesh_calc_islands_loop_face_uvmap(float (*vert_positions)[3], const int totvert, blender::int2 *edges, const int totedge, const bool *uv_seams, const blender::OffsetIndices faces, const int *corner_verts, const int *corner_edges, const int corners_num, const float (*luvs)[2], MeshIslandStore *r_island_store) { UNUSED_VARS(vert_positions, totvert, edges); BLI_assert(luvs != nullptr); return mesh_calc_islands_loop_face_uv( totedge, uv_seams, faces, corner_verts, corner_edges, corners_num, luvs, r_island_store); } /** \} */