Mesh: Reduce memory usage calculating custom normals

Reduces the size of the struct storing normal space from 72 to 64.
This commit is contained in:
Hans Goudey
2023-04-27 10:31:24 -04:00
parent 18f4fd6b85
commit 9292e094e7
2 changed files with 6 additions and 13 deletions

View File

@@ -397,9 +397,6 @@ typedef struct MLoopNorSpace {
* - BMLoop pointers. */
struct LinkNode *loops;
char flags;
/** To be used for extended processing related to loop normal spaces (aka smooth fans). */
void *user_data;
} MLoopNorSpace;
/**
* MLoopNorSpace.flags

View File

@@ -214,6 +214,7 @@ static void apply_weights_vertex_normal(WeightedNormalModifierData *wnmd,
blender::Array<blender::float3> loop_normals;
WeightedNormalDataAggregateItem *items_data = nullptr;
Array<int> item_index_per_corner(corner_verts.size(), 0);
int items_num = 0;
if (keep_sharp) {
BLI_bitmap *done_loops = BLI_BITMAP_NEW(corner_verts.size(), __func__);
@@ -255,7 +256,7 @@ static void apply_weights_vertex_normal(WeightedNormalModifierData *wnmd,
itdt->curr_strength = FACE_STRENGTH_WEAK;
MLoopNorSpace *lnor_space = lnors_spacearr.lspacearr[ml_index];
lnor_space->user_data = itdt;
item_index_per_corner[ml_index] = item_index;
if (!(lnor_space->flags & MLNOR_SPACE_IS_SINGLE)) {
for (LinkNode *lnode = lnor_space->loops; lnode; lnode = lnode->next) {
@@ -294,9 +295,7 @@ static void apply_weights_vertex_normal(WeightedNormalModifierData *wnmd,
for (const int ml_index : polys[poly_index]) {
const int mv_index = corner_verts[ml_index];
WeightedNormalDataAggregateItem *item_data =
keep_sharp ? static_cast<WeightedNormalDataAggregateItem *>(
lnors_spacearr.lspacearr[ml_index]->user_data) :
&items_data[mv_index];
keep_sharp ? &items_data[item_index_per_corner[ml_index]] : &items_data[mv_index];
aggregate_item_normal(
wnmd, wn_data, item_data, mv_index, poly_index, mp_val, use_face_influence);
@@ -312,9 +311,7 @@ static void apply_weights_vertex_normal(WeightedNormalModifierData *wnmd,
const int poly_index = loop_to_poly[ml_index];
const int mv_index = corner_verts[ml_index];
WeightedNormalDataAggregateItem *item_data =
keep_sharp ? static_cast<WeightedNormalDataAggregateItem *>(
lnors_spacearr.lspacearr[ml_index]->user_data) :
&items_data[mv_index];
keep_sharp ? &items_data[item_index_per_corner[ml_index]] : &items_data[mv_index];
aggregate_item_normal(
wnmd, wn_data, item_data, mv_index, poly_index, ml_val, use_face_influence);
@@ -337,8 +334,7 @@ static void apply_weights_vertex_normal(WeightedNormalModifierData *wnmd,
* (before this modifier is applied, at start of this function),
* so no need to recompute them here. */
for (int ml_index = 0; ml_index < corner_verts.size(); ml_index++) {
WeightedNormalDataAggregateItem *item_data = static_cast<WeightedNormalDataAggregateItem *>(
lnors_spacearr.lspacearr[ml_index]->user_data);
WeightedNormalDataAggregateItem *item_data = &items_data[item_index_per_corner[ml_index]];
if (!is_zero_v3(item_data->normal)) {
copy_v3_v3(loop_normals[ml_index], item_data->normal);
}
@@ -366,7 +362,7 @@ static void apply_weights_vertex_normal(WeightedNormalModifierData *wnmd,
/* NOTE: in theory, we could avoid this extra allocation & copying...
* But think we can live with it for now,
* and it makes code simpler & cleaner. */
blender::Array<blender::float3> vert_normals(verts_num, float3(0));
blender::Array<blender::float3> vert_normals(verts_num, float3(0.0f));
for (int ml_index = 0; ml_index < corner_verts.size(); ml_index++) {
const int mv_index = corner_verts[ml_index];