2023-08-16 00:20:26 +10:00
|
|
|
/* SPDX-FileCopyrightText: 2023 Blender Authors
|
2023-05-31 16:19:06 +02:00
|
|
|
*
|
|
|
|
|
* SPDX-License-Identifier: GPL-2.0-or-later */
|
2016-05-25 19:12:43 +10:00
|
|
|
|
2019-02-18 08:08:12 +11:00
|
|
|
/** \file
|
|
|
|
|
* \ingroup edmesh
|
2016-05-25 19:12:43 +10:00
|
|
|
*/
|
|
|
|
|
|
2025-01-07 12:39:13 +01:00
|
|
|
#include <algorithm>
|
2024-07-03 15:32:05 +02:00
|
|
|
#include <variant>
|
|
|
|
|
|
2016-05-25 19:12:43 +10:00
|
|
|
#include "MEM_guardedalloc.h"
|
|
|
|
|
|
2018-04-16 16:27:55 +02:00
|
|
|
#include "CLG_log.h"
|
|
|
|
|
|
2020-03-19 09:33:03 +01:00
|
|
|
#include "DNA_key_types.h"
|
|
|
|
|
#include "DNA_layer_types.h"
|
2016-05-25 19:12:43 +10:00
|
|
|
#include "DNA_mesh_types.h"
|
2018-06-29 14:56:38 +02:00
|
|
|
#include "DNA_meshdata_types.h"
|
2016-05-25 19:12:43 +10:00
|
|
|
#include "DNA_object_types.h"
|
2020-12-15 10:47:58 +11:00
|
|
|
#include "DNA_scene_types.h"
|
2016-05-25 19:12:43 +10:00
|
|
|
|
2018-03-19 14:17:59 +01:00
|
|
|
#include "BLI_array_utils.h"
|
2023-04-13 14:57:57 +02:00
|
|
|
#include "BLI_implicit_sharing.hh"
|
2020-03-19 09:33:03 +01:00
|
|
|
#include "BLI_listbase.h"
|
2025-01-07 12:39:13 +01:00
|
|
|
#include "BLI_math_base.h"
|
2023-09-01 21:37:11 +02:00
|
|
|
#include "BLI_string.h"
|
2023-03-11 11:57:15 +11:00
|
|
|
#include "BLI_task.hh"
|
2024-07-03 15:32:05 +02:00
|
|
|
#include "BLI_vector.hh"
|
2016-05-25 19:12:43 +10:00
|
|
|
|
2023-11-16 11:41:55 +01:00
|
|
|
#include "BKE_context.hh"
|
|
|
|
|
#include "BKE_customdata.hh"
|
|
|
|
|
#include "BKE_editmesh.hh"
|
2024-01-30 14:42:07 -05:00
|
|
|
#include "BKE_key.hh"
|
2024-01-23 15:18:09 -05:00
|
|
|
#include "BKE_layer.hh"
|
2024-01-15 12:44:04 -05:00
|
|
|
#include "BKE_lib_id.hh"
|
2023-12-01 19:43:16 +01:00
|
|
|
#include "BKE_main.hh"
|
2023-03-12 22:29:15 +01:00
|
|
|
#include "BKE_mesh.hh"
|
2023-10-09 23:41:53 +02:00
|
|
|
#include "BKE_object.hh"
|
2024-01-15 12:26:09 -05:00
|
|
|
#include "BKE_undo_system.hh"
|
2016-05-25 19:12:43 +10:00
|
|
|
|
2023-09-22 03:18:17 +02:00
|
|
|
#include "DEG_depsgraph.hh"
|
2018-04-01 11:03:25 +02:00
|
|
|
|
2023-08-05 02:57:52 +02:00
|
|
|
#include "ED_mesh.hh"
|
|
|
|
|
#include "ED_object.hh"
|
|
|
|
|
#include "ED_undo.hh"
|
2023-08-04 23:11:22 +02:00
|
|
|
#include "ED_util.hh"
|
2016-05-25 19:12:43 +10:00
|
|
|
|
2023-08-04 23:11:22 +02:00
|
|
|
#include "WM_api.hh"
|
|
|
|
|
#include "WM_types.hh"
|
2018-03-19 14:17:59 +01:00
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
#define USE_ARRAY_STORE
|
2016-05-25 19:12:43 +10:00
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
#ifdef USE_ARRAY_STORE
|
|
|
|
|
// # define DEBUG_PRINT
|
|
|
|
|
// # define DEBUG_TIME
|
|
|
|
|
# ifdef DEBUG_TIME
|
2024-01-19 14:32:28 +01:00
|
|
|
# include "BLI_time_utildefines.h"
|
2016-05-30 15:31:31 +10:00
|
|
|
# endif
|
2016-05-25 19:12:43 +10:00
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
# include "BLI_array_store.h"
|
2016-06-08 18:34:01 +10:00
|
|
|
# include "BLI_array_store_utils.h"
|
2023-03-23 00:52:51 +11:00
|
|
|
/**
|
|
|
|
|
* This used to be much smaller (256), but this caused too much overhead
|
|
|
|
|
* when selection moved to boolean arrays. Especially with high-poly meshes
|
|
|
|
|
* where managing a large number of small chunks could be slow, blocking user interactivity.
|
|
|
|
|
* Use a larger value (in bytes) which calculates the chunk size using #array_chunk_size_calc.
|
|
|
|
|
* See: #105046 & #105205.
|
|
|
|
|
*/
|
|
|
|
|
# define ARRAY_CHUNK_SIZE_IN_BYTES 65536
|
|
|
|
|
# define ARRAY_CHUNK_NUM_MIN 256
|
2016-05-30 15:31:31 +10:00
|
|
|
|
|
|
|
|
# define USE_ARRAY_STORE_THREAD
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#ifdef USE_ARRAY_STORE_THREAD
|
|
|
|
|
# include "BLI_task.h"
|
|
|
|
|
#endif
|
|
|
|
|
|
2018-04-16 16:27:55 +02:00
|
|
|
/** We only need this locally. */
|
|
|
|
|
static CLG_LogRef LOG = {"ed.undo.mesh"};
|
|
|
|
|
|
2018-03-19 14:17:59 +01:00
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Undo Conversion
|
|
|
|
|
* \{ */
|
2016-05-30 15:31:31 +10:00
|
|
|
|
|
|
|
|
#ifdef USE_ARRAY_STORE
|
|
|
|
|
|
2023-03-23 00:52:51 +11:00
|
|
|
static size_t array_chunk_size_calc(const size_t stride)
|
|
|
|
|
{
|
|
|
|
|
/* Return a chunk size that targets a size in bytes,
|
|
|
|
|
* this is done so boolean arrays don't add so much overhead and
|
2023-03-23 10:52:40 +11:00
|
|
|
* larger arrays aren't so big as to waste memory, see: #105205. */
|
2023-03-23 00:52:51 +11:00
|
|
|
return std::max(ARRAY_CHUNK_NUM_MIN, ARRAY_CHUNK_SIZE_IN_BYTES / power_of_2_max_i(stride));
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
/* Single linked list of layers stored per type */
|
2022-10-07 10:40:44 -05:00
|
|
|
struct BArrayCustomData {
|
|
|
|
|
BArrayCustomData *next;
|
2022-06-01 14:38:06 +10:00
|
|
|
eCustomDataType type;
|
2024-07-03 15:32:05 +02:00
|
|
|
blender::Array<std::variant<BArrayState *, blender::ImplicitSharingInfoAndData>> states;
|
2022-10-07 10:40:44 -05:00
|
|
|
};
|
2016-05-30 15:31:31 +10:00
|
|
|
|
|
|
|
|
#endif
|
2016-05-25 19:12:43 +10:00
|
|
|
|
2022-10-07 10:40:44 -05:00
|
|
|
struct UndoMesh {
|
2021-05-27 16:34:41 +10:00
|
|
|
/**
|
|
|
|
|
* This undo-meshes in `um_arraystore.local_links`.
|
|
|
|
|
* Not to be confused with the next and previous undo steps.
|
|
|
|
|
*/
|
2022-10-07 10:40:44 -05:00
|
|
|
UndoMesh *local_next, *local_prev;
|
2021-05-27 16:34:41 +10:00
|
|
|
|
2023-12-08 16:40:06 -05:00
|
|
|
Mesh mesh;
|
2016-05-25 19:12:43 +10:00
|
|
|
int selectmode;
|
2022-03-03 17:32:07 +05:30
|
|
|
char uv_selectmode;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2024-01-14 11:48:51 +11:00
|
|
|
/**
|
|
|
|
|
* The active shape key associated with this mesh.
|
|
|
|
|
*
|
|
|
|
|
* NOTE(@ideasman42): This isn't a perfect solution, if you edit keys and change shapes this
|
|
|
|
|
* works well (fixing #32442), but editing shape keys, going into object mode, removing or
|
|
|
|
|
* changing their order, then go back into edit-mode and undo will give issues - where the old
|
|
|
|
|
* index will be out of sync with the new object index.
|
2016-05-25 19:12:43 +10:00
|
|
|
*
|
|
|
|
|
* There are a few ways this could be made to work but for now its a known limitation with mixing
|
2024-01-14 11:48:51 +11:00
|
|
|
* object and edit-mode operations.
|
|
|
|
|
*/
|
2016-05-25 19:12:43 +10:00
|
|
|
int shapenr;
|
2016-05-30 15:31:31 +10:00
|
|
|
|
|
|
|
|
#ifdef USE_ARRAY_STORE
|
2022-10-10 11:21:53 +11:00
|
|
|
/* Null arrays are considered empty. */
|
2016-06-08 18:34:01 +10:00
|
|
|
struct { /* most data is stored as 'custom' data */
|
2016-05-30 15:31:31 +10:00
|
|
|
BArrayCustomData *vdata, *edata, *ldata, *pdata;
|
2023-07-24 22:06:55 +02:00
|
|
|
BArrayState *face_offset_indices;
|
2016-05-30 15:31:31 +10:00
|
|
|
BArrayState **keyblocks;
|
|
|
|
|
BArrayState *mselect;
|
|
|
|
|
} store;
|
|
|
|
|
#endif /* USE_ARRAY_STORE */
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-03-19 14:17:59 +01:00
|
|
|
size_t undo_size;
|
2022-10-07 10:40:44 -05:00
|
|
|
};
|
2016-05-25 19:12:43 +10:00
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
#ifdef USE_ARRAY_STORE
|
|
|
|
|
|
2018-03-19 14:17:59 +01:00
|
|
|
/* -------------------------------------------------------------------- */
|
2016-05-30 15:31:31 +10:00
|
|
|
/** \name Array Store
|
|
|
|
|
* \{ */
|
|
|
|
|
|
2023-03-11 11:57:15 +11:00
|
|
|
/**
|
|
|
|
|
* Store separate #BArrayStore_AtSize so multiple threads
|
|
|
|
|
* can access array stores without locking.
|
|
|
|
|
*/
|
|
|
|
|
enum {
|
|
|
|
|
ARRAY_STORE_INDEX_VERT = 0,
|
|
|
|
|
ARRAY_STORE_INDEX_EDGE,
|
|
|
|
|
ARRAY_STORE_INDEX_LOOP,
|
|
|
|
|
ARRAY_STORE_INDEX_POLY,
|
Mesh: Replace MPoly struct with offset indices
Implements #95967.
Currently the `MPoly` struct is 12 bytes, and stores the index of a
face's first corner and the number of corners/verts/edges. Polygons
and corners are always created in order by Blender, meaning each
face's corners will be after the previous face's corners. We can take
advantage of this fact and eliminate the redundancy in mesh face
storage by only storing a single integer corner offset for each face.
The size of the face is then encoded by the offset of the next face.
The size of a single integer is 4 bytes, so this reduces memory
usage by 3 times.
The same method is used for `CurvesGeometry`, so Blender already has
an abstraction to simplify using these offsets called `OffsetIndices`.
This class is used to easily retrieve a range of corner indices for
each face. This also gives the opportunity for sharing some logic with
curves.
Another benefit of the change is that the offsets and sizes stored in
`MPoly` can no longer disagree with each other. Storing faces in the
order of their corners can simplify some code too.
Face/polygon variables now use the `IndexRange` type, which comes with
quite a few utilities that can simplify code.
Some:
- The offset integer array has to be one longer than the face count to
avoid a branch for every face, which means the data is no longer part
of the mesh's `CustomData`.
- We lose the ability to "reference" an original mesh's offset array
until more reusable CoW from #104478 is committed. That will be added
in a separate commit.
- Since they aren't part of `CustomData`, poly offsets often have to be
copied manually.
- To simplify using `OffsetIndices` in many places, some functions and
structs in headers were moved to only compile in C++.
- All meshes created by Blender use the same order for faces and face
corners, but just in case, meshes with mismatched order are fixed by
versioning code.
- `MeshPolygon.totloop` is no longer editable in RNA. This API break is
necessary here unfortunately. It should be worth it in 3.6, since
that's the best way to allow loading meshes from 4.0, which is
important for an LTS version.
Pull Request: https://projects.blender.org/blender/blender/pulls/105938
2023-04-04 20:39:28 +02:00
|
|
|
ARRAY_STORE_INDEX_POLY_OFFSETS,
|
2023-03-11 11:57:15 +11:00
|
|
|
ARRAY_STORE_INDEX_SHAPE,
|
|
|
|
|
ARRAY_STORE_INDEX_MSEL,
|
|
|
|
|
};
|
|
|
|
|
# define ARRAY_STORE_INDEX_NUM (ARRAY_STORE_INDEX_MSEL + 1)
|
|
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
static struct {
|
2023-03-11 11:57:15 +11:00
|
|
|
BArrayStore_AtSize bs_stride[ARRAY_STORE_INDEX_NUM];
|
2016-05-30 15:31:31 +10:00
|
|
|
int users;
|
|
|
|
|
|
2021-05-27 16:34:41 +10:00
|
|
|
/**
|
|
|
|
|
* A list of #UndoMesh items ordered from oldest to newest
|
|
|
|
|
* used to access previous undo data for a mesh.
|
|
|
|
|
*/
|
2016-05-30 15:31:31 +10:00
|
|
|
ListBase local_links;
|
|
|
|
|
|
|
|
|
|
# ifdef USE_ARRAY_STORE_THREAD
|
|
|
|
|
TaskPool *task_pool;
|
|
|
|
|
# endif
|
|
|
|
|
|
2023-03-11 11:57:15 +11:00
|
|
|
} um_arraystore = {{{nullptr}}};
|
2016-05-30 15:31:31 +10:00
|
|
|
|
2022-10-07 10:40:44 -05:00
|
|
|
static void um_arraystore_cd_compact(CustomData *cdata,
|
2016-05-30 15:31:31 +10:00
|
|
|
const size_t data_len,
|
2023-03-11 11:57:15 +11:00
|
|
|
const bool create,
|
|
|
|
|
const int bs_index,
|
2016-05-30 15:31:31 +10:00
|
|
|
const BArrayCustomData *bcd_reference,
|
|
|
|
|
BArrayCustomData **r_bcd_first)
|
|
|
|
|
{
|
2024-07-03 15:32:05 +02:00
|
|
|
using namespace blender;
|
2016-05-30 15:31:31 +10:00
|
|
|
if (data_len == 0) {
|
|
|
|
|
if (create) {
|
2022-10-07 10:40:44 -05:00
|
|
|
*r_bcd_first = nullptr;
|
2016-05-30 15:31:31 +10:00
|
|
|
}
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
const BArrayCustomData *bcd_reference_current = bcd_reference;
|
2022-10-07 10:40:44 -05:00
|
|
|
BArrayCustomData *bcd = nullptr, *bcd_first = nullptr, *bcd_prev = nullptr;
|
2016-05-30 15:31:31 +10:00
|
|
|
for (int layer_start = 0, layer_end; layer_start < cdata->totlayer; layer_start = layer_end) {
|
2022-10-07 10:40:44 -05:00
|
|
|
const eCustomDataType type = eCustomDataType(cdata->layers[layer_start].type);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2021-02-12 00:09:48 +11:00
|
|
|
/* Perform a full copy on dynamic layers.
|
|
|
|
|
*
|
|
|
|
|
* Unfortunately we can't compare dynamic layer types as they contain allocated pointers,
|
|
|
|
|
* which burns CPU cycles looking for duplicate data that doesn't exist.
|
|
|
|
|
* The array data isn't comparable once copied from the mesh,
|
2023-02-12 14:37:16 +11:00
|
|
|
* this bottlenecks on high poly meshes, see #84114.
|
2021-02-12 00:09:48 +11:00
|
|
|
*
|
2024-07-03 15:32:05 +02:00
|
|
|
* Ideally the data would be expanded into a format that could be de-duplicated effectively,
|
|
|
|
|
* this would require a flat representation of each dynamic custom-data layer.
|
2021-02-12 00:09:48 +11:00
|
|
|
*
|
2024-07-03 15:32:05 +02:00
|
|
|
* Instead, these non-trivial custom data layer are stored in the undo system using implicit
|
|
|
|
|
* sharing, to avoid the copy from the undo mesh.
|
2021-02-12 00:09:48 +11:00
|
|
|
*/
|
|
|
|
|
const bool layer_type_is_dynamic = CustomData_layertype_is_dynamic(type);
|
|
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
layer_end = layer_start + 1;
|
|
|
|
|
while ((layer_end < cdata->totlayer) && (type == cdata->layers[layer_end].type)) {
|
|
|
|
|
layer_end++;
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
const int stride = CustomData_sizeof(type);
|
2023-03-23 00:52:51 +11:00
|
|
|
BArrayStore *bs = create ? BLI_array_store_at_size_ensure(&um_arraystore.bs_stride[bs_index],
|
|
|
|
|
stride,
|
|
|
|
|
array_chunk_size_calc(stride)) :
|
2022-10-07 10:40:44 -05:00
|
|
|
nullptr;
|
2016-05-30 15:31:31 +10:00
|
|
|
const int layer_len = layer_end - layer_start;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
if (create) {
|
|
|
|
|
if (bcd_reference_current && (bcd_reference_current->type == type)) {
|
|
|
|
|
/* common case, the reference is aligned */
|
|
|
|
|
}
|
|
|
|
|
else {
|
2022-10-07 10:40:44 -05:00
|
|
|
bcd_reference_current = nullptr;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2021-02-05 16:23:34 +11:00
|
|
|
/* Do a full lookup when unaligned. */
|
2016-05-30 15:31:31 +10:00
|
|
|
if (bcd_reference) {
|
|
|
|
|
const BArrayCustomData *bcd_iter = bcd_reference;
|
|
|
|
|
while (bcd_iter) {
|
|
|
|
|
if (bcd_iter->type == type) {
|
|
|
|
|
bcd_reference_current = bcd_iter;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
bcd_iter = bcd_iter->next;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
if (create) {
|
2024-07-03 15:32:05 +02:00
|
|
|
bcd = MEM_new<BArrayCustomData>(__func__);
|
2022-10-07 10:40:44 -05:00
|
|
|
bcd->next = nullptr;
|
2016-05-30 15:31:31 +10:00
|
|
|
bcd->type = type;
|
2024-07-03 15:32:05 +02:00
|
|
|
bcd->states.reinitialize(layer_end - layer_start);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
if (bcd_prev) {
|
|
|
|
|
bcd_prev->next = bcd;
|
|
|
|
|
bcd_prev = bcd;
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
bcd_first = bcd;
|
|
|
|
|
bcd_prev = bcd;
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2016-05-30 15:31:31 +10:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
CustomDataLayer *layer = &cdata->layers[layer_start];
|
|
|
|
|
for (int i = 0; i < layer_len; i++, layer++) {
|
|
|
|
|
if (create) {
|
|
|
|
|
if (layer->data) {
|
2021-02-12 00:09:48 +11:00
|
|
|
if (layer_type_is_dynamic) {
|
2024-07-03 15:32:05 +02:00
|
|
|
/* See comment on `layer_type_is_dynamic` above. */
|
|
|
|
|
const ImplicitSharingInfo *sharing_info;
|
|
|
|
|
if (layer->sharing_info) {
|
|
|
|
|
sharing_info = layer->sharing_info;
|
|
|
|
|
sharing_info->add_user();
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
sharing_info = implicit_sharing::info_for_mem_free(layer->data);
|
|
|
|
|
}
|
|
|
|
|
bcd->states[i] = ImplicitSharingInfoAndData{sharing_info, layer->data};
|
2021-02-12 00:09:48 +11:00
|
|
|
}
|
2024-07-03 15:32:05 +02:00
|
|
|
else {
|
|
|
|
|
BArrayState *state_reference = nullptr;
|
|
|
|
|
if (bcd_reference_current && i < bcd_reference_current->states.size()) {
|
|
|
|
|
state_reference = std::get<BArrayState *>(bcd_reference_current->states[i]);
|
|
|
|
|
}
|
2021-02-12 00:09:48 +11:00
|
|
|
|
2024-07-03 15:32:05 +02:00
|
|
|
bcd->states[i] = BLI_array_store_state_add(
|
|
|
|
|
bs, layer->data, size_t(data_len) * stride, state_reference);
|
|
|
|
|
}
|
2016-05-30 15:31:31 +10:00
|
|
|
}
|
|
|
|
|
else {
|
2022-10-07 10:40:44 -05:00
|
|
|
bcd->states[i] = nullptr;
|
2016-05-30 15:31:31 +10:00
|
|
|
}
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
if (layer->data) {
|
2023-04-13 14:57:57 +02:00
|
|
|
if (layer->sharing_info) {
|
2024-07-03 15:32:05 +02:00
|
|
|
layer->sharing_info->remove_user_and_delete_if_last();
|
|
|
|
|
layer->sharing_info = nullptr;
|
|
|
|
|
layer->data = nullptr;
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
MEM_SAFE_FREE(layer->data);
|
2023-04-13 14:57:57 +02:00
|
|
|
}
|
2016-05-30 15:31:31 +10:00
|
|
|
}
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
if (create) {
|
|
|
|
|
if (bcd_reference_current) {
|
|
|
|
|
bcd_reference_current = bcd_reference_current->next;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
if (create) {
|
|
|
|
|
*r_bcd_first = bcd_first;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* \note There is no room for data going out of sync here.
|
|
|
|
|
* The layers and the states are stored together so this can be kept working.
|
|
|
|
|
*/
|
|
|
|
|
static void um_arraystore_cd_expand(const BArrayCustomData *bcd,
|
2022-10-07 10:40:44 -05:00
|
|
|
CustomData *cdata,
|
2016-05-30 15:31:31 +10:00
|
|
|
const size_t data_len)
|
|
|
|
|
{
|
2024-07-03 15:32:05 +02:00
|
|
|
using namespace blender;
|
2016-05-30 15:31:31 +10:00
|
|
|
CustomDataLayer *layer = cdata->layers;
|
|
|
|
|
while (bcd) {
|
|
|
|
|
const int stride = CustomData_sizeof(bcd->type);
|
2024-07-03 15:32:05 +02:00
|
|
|
for (int i = 0; i < bcd->states.size(); i++) {
|
2016-05-30 15:31:31 +10:00
|
|
|
BLI_assert(bcd->type == layer->type);
|
2024-07-03 15:32:05 +02:00
|
|
|
if (std::holds_alternative<BArrayState *>(bcd->states[i])) {
|
|
|
|
|
BArrayState *state = std::get<BArrayState *>(bcd->states[i]);
|
|
|
|
|
if (state) {
|
|
|
|
|
size_t state_len;
|
|
|
|
|
layer->data = BLI_array_store_state_data_get_alloc(state, &state_len);
|
|
|
|
|
BLI_assert(stride * data_len == state_len);
|
|
|
|
|
UNUSED_VARS_NDEBUG(stride, data_len);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
layer->data = nullptr;
|
|
|
|
|
}
|
2016-05-30 15:31:31 +10:00
|
|
|
}
|
|
|
|
|
else {
|
2024-07-03 15:32:05 +02:00
|
|
|
ImplicitSharingInfoAndData state = std::get<ImplicitSharingInfoAndData>(bcd->states[i]);
|
|
|
|
|
layer->data = const_cast<void *>(state.data);
|
|
|
|
|
layer->sharing_info = state.sharing_info;
|
|
|
|
|
layer->sharing_info->add_user();
|
2016-05-30 15:31:31 +10:00
|
|
|
}
|
|
|
|
|
layer++;
|
|
|
|
|
}
|
|
|
|
|
bcd = bcd->next;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-03-11 11:57:15 +11:00
|
|
|
static void um_arraystore_cd_free(BArrayCustomData *bcd, const int bs_index)
|
2016-05-30 15:31:31 +10:00
|
|
|
{
|
2024-07-03 15:32:05 +02:00
|
|
|
using namespace blender;
|
2016-05-30 15:31:31 +10:00
|
|
|
while (bcd) {
|
|
|
|
|
BArrayCustomData *bcd_next = bcd->next;
|
|
|
|
|
const int stride = CustomData_sizeof(bcd->type);
|
2023-03-11 11:57:15 +11:00
|
|
|
BArrayStore *bs = BLI_array_store_at_size_get(&um_arraystore.bs_stride[bs_index], stride);
|
2024-07-03 15:32:05 +02:00
|
|
|
for (int i = 0; i < bcd->states.size(); i++) {
|
|
|
|
|
if (std::holds_alternative<BArrayState *>(bcd->states[i])) {
|
2024-07-05 11:26:18 -04:00
|
|
|
if (BArrayState *state = std::get<BArrayState *>(bcd->states[i])) {
|
|
|
|
|
BLI_array_store_state_remove(bs, state);
|
|
|
|
|
}
|
2024-07-03 15:32:05 +02:00
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
ImplicitSharingInfoAndData state = std::get<ImplicitSharingInfoAndData>(bcd->states[i]);
|
|
|
|
|
state.sharing_info->remove_user_and_delete_if_last();
|
2016-05-30 15:31:31 +10:00
|
|
|
}
|
|
|
|
|
}
|
2024-07-03 15:32:05 +02:00
|
|
|
MEM_delete(bcd);
|
2016-05-30 15:31:31 +10:00
|
|
|
bcd = bcd_next;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* \param create: When false, only free the arrays.
|
|
|
|
|
* This is done since when reading from an undo state, they must be temporarily expanded.
|
|
|
|
|
* then discarded afterwards, having this argument avoids having 2x code paths.
|
|
|
|
|
*/
|
|
|
|
|
static void um_arraystore_compact_ex(UndoMesh *um, const UndoMesh *um_ref, bool create)
|
|
|
|
|
{
|
2023-12-08 16:40:06 -05:00
|
|
|
Mesh *mesh = &um->mesh;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-03-11 11:57:15 +11:00
|
|
|
/* Compacting can be time consuming, run in parallel.
|
|
|
|
|
*
|
|
|
|
|
* NOTE(@ideasman42): this could be further parallelized with every custom-data layer
|
2023-03-24 08:34:21 -04:00
|
|
|
* running in its own thread. If this is a bottleneck it's worth considering.
|
|
|
|
|
* At the moment it seems fast enough to split by domain.
|
|
|
|
|
* Since this is itself a background thread, using too many threads here could
|
2023-03-11 11:57:15 +11:00
|
|
|
* interfere with foreground tasks. */
|
|
|
|
|
blender::threading::parallel_invoke(
|
2023-12-20 02:21:48 +01:00
|
|
|
4096 < (mesh->verts_num + mesh->edges_num + mesh->corners_num + mesh->faces_num),
|
2023-03-11 11:57:15 +11:00
|
|
|
[&]() {
|
2023-12-08 16:40:06 -05:00
|
|
|
um_arraystore_cd_compact(&mesh->vert_data,
|
2023-12-20 02:21:48 +01:00
|
|
|
mesh->verts_num,
|
2023-03-11 11:57:15 +11:00
|
|
|
create,
|
|
|
|
|
ARRAY_STORE_INDEX_VERT,
|
|
|
|
|
um_ref ? um_ref->store.vdata : nullptr,
|
|
|
|
|
&um->store.vdata);
|
|
|
|
|
},
|
|
|
|
|
[&]() {
|
2023-12-08 16:40:06 -05:00
|
|
|
um_arraystore_cd_compact(&mesh->edge_data,
|
2023-12-20 02:21:48 +01:00
|
|
|
mesh->edges_num,
|
2023-03-11 11:57:15 +11:00
|
|
|
create,
|
|
|
|
|
ARRAY_STORE_INDEX_EDGE,
|
|
|
|
|
um_ref ? um_ref->store.edata : nullptr,
|
|
|
|
|
&um->store.edata);
|
|
|
|
|
},
|
|
|
|
|
[&]() {
|
2023-12-19 20:38:59 -05:00
|
|
|
um_arraystore_cd_compact(&mesh->corner_data,
|
2023-12-20 02:21:48 +01:00
|
|
|
mesh->corners_num,
|
2023-03-11 11:57:15 +11:00
|
|
|
create,
|
|
|
|
|
ARRAY_STORE_INDEX_LOOP,
|
|
|
|
|
um_ref ? um_ref->store.ldata : nullptr,
|
|
|
|
|
&um->store.ldata);
|
|
|
|
|
},
|
|
|
|
|
[&]() {
|
2023-12-08 16:40:06 -05:00
|
|
|
um_arraystore_cd_compact(&mesh->face_data,
|
|
|
|
|
mesh->faces_num,
|
2023-03-11 11:57:15 +11:00
|
|
|
create,
|
|
|
|
|
ARRAY_STORE_INDEX_POLY,
|
|
|
|
|
um_ref ? um_ref->store.pdata : nullptr,
|
|
|
|
|
&um->store.pdata);
|
|
|
|
|
},
|
Mesh: Replace MPoly struct with offset indices
Implements #95967.
Currently the `MPoly` struct is 12 bytes, and stores the index of a
face's first corner and the number of corners/verts/edges. Polygons
and corners are always created in order by Blender, meaning each
face's corners will be after the previous face's corners. We can take
advantage of this fact and eliminate the redundancy in mesh face
storage by only storing a single integer corner offset for each face.
The size of the face is then encoded by the offset of the next face.
The size of a single integer is 4 bytes, so this reduces memory
usage by 3 times.
The same method is used for `CurvesGeometry`, so Blender already has
an abstraction to simplify using these offsets called `OffsetIndices`.
This class is used to easily retrieve a range of corner indices for
each face. This also gives the opportunity for sharing some logic with
curves.
Another benefit of the change is that the offsets and sizes stored in
`MPoly` can no longer disagree with each other. Storing faces in the
order of their corners can simplify some code too.
Face/polygon variables now use the `IndexRange` type, which comes with
quite a few utilities that can simplify code.
Some:
- The offset integer array has to be one longer than the face count to
avoid a branch for every face, which means the data is no longer part
of the mesh's `CustomData`.
- We lose the ability to "reference" an original mesh's offset array
until more reusable CoW from #104478 is committed. That will be added
in a separate commit.
- Since they aren't part of `CustomData`, poly offsets often have to be
copied manually.
- To simplify using `OffsetIndices` in many places, some functions and
structs in headers were moved to only compile in C++.
- All meshes created by Blender use the same order for faces and face
corners, but just in case, meshes with mismatched order are fixed by
versioning code.
- `MeshPolygon.totloop` is no longer editable in RNA. This API break is
necessary here unfortunately. It should be worth it in 3.6, since
that's the best way to allow loading meshes from 4.0, which is
important for an LTS version.
Pull Request: https://projects.blender.org/blender/blender/pulls/105938
2023-04-04 20:39:28 +02:00
|
|
|
[&]() {
|
2023-12-08 16:40:06 -05:00
|
|
|
if (mesh->face_offset_indices) {
|
2023-07-24 22:06:55 +02:00
|
|
|
BLI_assert(create == (um->store.face_offset_indices == nullptr));
|
Mesh: Replace MPoly struct with offset indices
Implements #95967.
Currently the `MPoly` struct is 12 bytes, and stores the index of a
face's first corner and the number of corners/verts/edges. Polygons
and corners are always created in order by Blender, meaning each
face's corners will be after the previous face's corners. We can take
advantage of this fact and eliminate the redundancy in mesh face
storage by only storing a single integer corner offset for each face.
The size of the face is then encoded by the offset of the next face.
The size of a single integer is 4 bytes, so this reduces memory
usage by 3 times.
The same method is used for `CurvesGeometry`, so Blender already has
an abstraction to simplify using these offsets called `OffsetIndices`.
This class is used to easily retrieve a range of corner indices for
each face. This also gives the opportunity for sharing some logic with
curves.
Another benefit of the change is that the offsets and sizes stored in
`MPoly` can no longer disagree with each other. Storing faces in the
order of their corners can simplify some code too.
Face/polygon variables now use the `IndexRange` type, which comes with
quite a few utilities that can simplify code.
Some:
- The offset integer array has to be one longer than the face count to
avoid a branch for every face, which means the data is no longer part
of the mesh's `CustomData`.
- We lose the ability to "reference" an original mesh's offset array
until more reusable CoW from #104478 is committed. That will be added
in a separate commit.
- Since they aren't part of `CustomData`, poly offsets often have to be
copied manually.
- To simplify using `OffsetIndices` in many places, some functions and
structs in headers were moved to only compile in C++.
- All meshes created by Blender use the same order for faces and face
corners, but just in case, meshes with mismatched order are fixed by
versioning code.
- `MeshPolygon.totloop` is no longer editable in RNA. This API break is
necessary here unfortunately. It should be worth it in 3.6, since
that's the best way to allow loading meshes from 4.0, which is
important for an LTS version.
Pull Request: https://projects.blender.org/blender/blender/pulls/105938
2023-04-04 20:39:28 +02:00
|
|
|
if (create) {
|
2023-07-24 22:06:55 +02:00
|
|
|
BArrayState *state_reference = um_ref ? um_ref->store.face_offset_indices : nullptr;
|
2023-12-08 16:40:06 -05:00
|
|
|
const size_t stride = sizeof(*mesh->face_offset_indices);
|
Mesh: Replace MPoly struct with offset indices
Implements #95967.
Currently the `MPoly` struct is 12 bytes, and stores the index of a
face's first corner and the number of corners/verts/edges. Polygons
and corners are always created in order by Blender, meaning each
face's corners will be after the previous face's corners. We can take
advantage of this fact and eliminate the redundancy in mesh face
storage by only storing a single integer corner offset for each face.
The size of the face is then encoded by the offset of the next face.
The size of a single integer is 4 bytes, so this reduces memory
usage by 3 times.
The same method is used for `CurvesGeometry`, so Blender already has
an abstraction to simplify using these offsets called `OffsetIndices`.
This class is used to easily retrieve a range of corner indices for
each face. This also gives the opportunity for sharing some logic with
curves.
Another benefit of the change is that the offsets and sizes stored in
`MPoly` can no longer disagree with each other. Storing faces in the
order of their corners can simplify some code too.
Face/polygon variables now use the `IndexRange` type, which comes with
quite a few utilities that can simplify code.
Some:
- The offset integer array has to be one longer than the face count to
avoid a branch for every face, which means the data is no longer part
of the mesh's `CustomData`.
- We lose the ability to "reference" an original mesh's offset array
until more reusable CoW from #104478 is committed. That will be added
in a separate commit.
- Since they aren't part of `CustomData`, poly offsets often have to be
copied manually.
- To simplify using `OffsetIndices` in many places, some functions and
structs in headers were moved to only compile in C++.
- All meshes created by Blender use the same order for faces and face
corners, but just in case, meshes with mismatched order are fixed by
versioning code.
- `MeshPolygon.totloop` is no longer editable in RNA. This API break is
necessary here unfortunately. It should be worth it in 3.6, since
that's the best way to allow loading meshes from 4.0, which is
important for an LTS version.
Pull Request: https://projects.blender.org/blender/blender/pulls/105938
2023-04-04 20:39:28 +02:00
|
|
|
BArrayStore *bs = BLI_array_store_at_size_ensure(
|
|
|
|
|
&um_arraystore.bs_stride[ARRAY_STORE_INDEX_POLY_OFFSETS],
|
|
|
|
|
stride,
|
|
|
|
|
array_chunk_size_calc(stride));
|
2023-12-08 16:40:06 -05:00
|
|
|
um->store.face_offset_indices = BLI_array_store_state_add(bs,
|
|
|
|
|
mesh->face_offset_indices,
|
|
|
|
|
size_t(mesh->faces_num + 1) *
|
|
|
|
|
stride,
|
|
|
|
|
state_reference);
|
Mesh: Replace MPoly struct with offset indices
Implements #95967.
Currently the `MPoly` struct is 12 bytes, and stores the index of a
face's first corner and the number of corners/verts/edges. Polygons
and corners are always created in order by Blender, meaning each
face's corners will be after the previous face's corners. We can take
advantage of this fact and eliminate the redundancy in mesh face
storage by only storing a single integer corner offset for each face.
The size of the face is then encoded by the offset of the next face.
The size of a single integer is 4 bytes, so this reduces memory
usage by 3 times.
The same method is used for `CurvesGeometry`, so Blender already has
an abstraction to simplify using these offsets called `OffsetIndices`.
This class is used to easily retrieve a range of corner indices for
each face. This also gives the opportunity for sharing some logic with
curves.
Another benefit of the change is that the offsets and sizes stored in
`MPoly` can no longer disagree with each other. Storing faces in the
order of their corners can simplify some code too.
Face/polygon variables now use the `IndexRange` type, which comes with
quite a few utilities that can simplify code.
Some:
- The offset integer array has to be one longer than the face count to
avoid a branch for every face, which means the data is no longer part
of the mesh's `CustomData`.
- We lose the ability to "reference" an original mesh's offset array
until more reusable CoW from #104478 is committed. That will be added
in a separate commit.
- Since they aren't part of `CustomData`, poly offsets often have to be
copied manually.
- To simplify using `OffsetIndices` in many places, some functions and
structs in headers were moved to only compile in C++.
- All meshes created by Blender use the same order for faces and face
corners, but just in case, meshes with mismatched order are fixed by
versioning code.
- `MeshPolygon.totloop` is no longer editable in RNA. This API break is
necessary here unfortunately. It should be worth it in 3.6, since
that's the best way to allow loading meshes from 4.0, which is
important for an LTS version.
Pull Request: https://projects.blender.org/blender/blender/pulls/105938
2023-04-04 20:39:28 +02:00
|
|
|
}
|
2023-12-08 16:40:06 -05:00
|
|
|
blender::implicit_sharing::free_shared_data(&mesh->face_offset_indices,
|
|
|
|
|
&mesh->runtime->face_offsets_sharing_info);
|
Mesh: Replace MPoly struct with offset indices
Implements #95967.
Currently the `MPoly` struct is 12 bytes, and stores the index of a
face's first corner and the number of corners/verts/edges. Polygons
and corners are always created in order by Blender, meaning each
face's corners will be after the previous face's corners. We can take
advantage of this fact and eliminate the redundancy in mesh face
storage by only storing a single integer corner offset for each face.
The size of the face is then encoded by the offset of the next face.
The size of a single integer is 4 bytes, so this reduces memory
usage by 3 times.
The same method is used for `CurvesGeometry`, so Blender already has
an abstraction to simplify using these offsets called `OffsetIndices`.
This class is used to easily retrieve a range of corner indices for
each face. This also gives the opportunity for sharing some logic with
curves.
Another benefit of the change is that the offsets and sizes stored in
`MPoly` can no longer disagree with each other. Storing faces in the
order of their corners can simplify some code too.
Face/polygon variables now use the `IndexRange` type, which comes with
quite a few utilities that can simplify code.
Some:
- The offset integer array has to be one longer than the face count to
avoid a branch for every face, which means the data is no longer part
of the mesh's `CustomData`.
- We lose the ability to "reference" an original mesh's offset array
until more reusable CoW from #104478 is committed. That will be added
in a separate commit.
- Since they aren't part of `CustomData`, poly offsets often have to be
copied manually.
- To simplify using `OffsetIndices` in many places, some functions and
structs in headers were moved to only compile in C++.
- All meshes created by Blender use the same order for faces and face
corners, but just in case, meshes with mismatched order are fixed by
versioning code.
- `MeshPolygon.totloop` is no longer editable in RNA. This API break is
necessary here unfortunately. It should be worth it in 3.6, since
that's the best way to allow loading meshes from 4.0, which is
important for an LTS version.
Pull Request: https://projects.blender.org/blender/blender/pulls/105938
2023-04-04 20:39:28 +02:00
|
|
|
}
|
|
|
|
|
},
|
2023-03-11 11:57:15 +11:00
|
|
|
[&]() {
|
2023-12-08 16:40:06 -05:00
|
|
|
if (mesh->key && mesh->key->totkey) {
|
|
|
|
|
const size_t stride = mesh->key->elemsize;
|
2023-03-11 11:57:15 +11:00
|
|
|
BArrayStore *bs = create ? BLI_array_store_at_size_ensure(
|
|
|
|
|
&um_arraystore.bs_stride[ARRAY_STORE_INDEX_SHAPE],
|
|
|
|
|
stride,
|
2023-03-23 00:52:51 +11:00
|
|
|
array_chunk_size_calc(stride)) :
|
2023-03-11 11:57:15 +11:00
|
|
|
nullptr;
|
|
|
|
|
if (create) {
|
|
|
|
|
um->store.keyblocks = static_cast<BArrayState **>(
|
2023-12-08 16:40:06 -05:00
|
|
|
MEM_mallocN(mesh->key->totkey * sizeof(*um->store.keyblocks), __func__));
|
2023-03-11 11:57:15 +11:00
|
|
|
}
|
2023-12-08 16:40:06 -05:00
|
|
|
KeyBlock *keyblock = static_cast<KeyBlock *>(mesh->key->block.first);
|
|
|
|
|
for (int i = 0; i < mesh->key->totkey; i++, keyblock = keyblock->next) {
|
2023-03-11 11:57:15 +11:00
|
|
|
if (create) {
|
2023-12-08 16:40:06 -05:00
|
|
|
BArrayState *state_reference = (um_ref && um_ref->mesh.key &&
|
|
|
|
|
(i < um_ref->mesh.key->totkey)) ?
|
2023-03-11 11:57:15 +11:00
|
|
|
um_ref->store.keyblocks[i] :
|
|
|
|
|
nullptr;
|
|
|
|
|
um->store.keyblocks[i] = BLI_array_store_state_add(
|
|
|
|
|
bs, keyblock->data, size_t(keyblock->totelem) * stride, state_reference);
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-03-11 11:57:15 +11:00
|
|
|
if (keyblock->data) {
|
|
|
|
|
MEM_freeN(keyblock->data);
|
|
|
|
|
keyblock->data = nullptr;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
},
|
|
|
|
|
[&]() {
|
2023-12-08 16:40:06 -05:00
|
|
|
if (mesh->mselect && mesh->totselect) {
|
2023-03-11 11:57:15 +11:00
|
|
|
BLI_assert(create == (um->store.mselect == nullptr));
|
|
|
|
|
if (create) {
|
|
|
|
|
BArrayState *state_reference = um_ref ? um_ref->store.mselect : nullptr;
|
2023-12-08 16:40:06 -05:00
|
|
|
const size_t stride = sizeof(*mesh->mselect);
|
2023-03-11 11:57:15 +11:00
|
|
|
BArrayStore *bs = BLI_array_store_at_size_ensure(
|
2023-03-23 00:52:51 +11:00
|
|
|
&um_arraystore.bs_stride[ARRAY_STORE_INDEX_MSEL],
|
|
|
|
|
stride,
|
|
|
|
|
array_chunk_size_calc(stride));
|
2023-03-11 11:57:15 +11:00
|
|
|
um->store.mselect = BLI_array_store_state_add(
|
2023-12-08 16:40:06 -05:00
|
|
|
bs, mesh->mselect, size_t(mesh->totselect) * stride, state_reference);
|
2023-03-11 11:57:15 +11:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-12-08 16:40:06 -05:00
|
|
|
/* keep mesh->totselect for validation */
|
|
|
|
|
MEM_freeN(mesh->mselect);
|
|
|
|
|
mesh->mselect = nullptr;
|
2023-03-11 11:57:15 +11:00
|
|
|
}
|
|
|
|
|
});
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
if (create) {
|
|
|
|
|
um_arraystore.users += 1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Move data from allocated arrays to de-duplicated states and clear arrays.
|
|
|
|
|
*/
|
|
|
|
|
static void um_arraystore_compact(UndoMesh *um, const UndoMesh *um_ref)
|
|
|
|
|
{
|
|
|
|
|
um_arraystore_compact_ex(um, um_ref, true);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void um_arraystore_compact_with_info(UndoMesh *um, const UndoMesh *um_ref)
|
|
|
|
|
{
|
|
|
|
|
# ifdef DEBUG_PRINT
|
2023-03-11 11:57:15 +11:00
|
|
|
size_t size_expanded_prev = 0, size_compacted_prev = 0;
|
|
|
|
|
|
|
|
|
|
for (int bs_index = 0; bs_index < ARRAY_STORE_INDEX_NUM; bs_index++) {
|
|
|
|
|
size_t size_expanded_prev_iter, size_compacted_prev_iter;
|
|
|
|
|
BLI_array_store_at_size_calc_memory_usage(
|
|
|
|
|
&um_arraystore.bs_stride[bs_index], &size_expanded_prev_iter, &size_compacted_prev_iter);
|
|
|
|
|
size_expanded_prev += size_expanded_prev_iter;
|
|
|
|
|
size_compacted_prev += size_compacted_prev_iter;
|
|
|
|
|
}
|
2016-05-30 15:31:31 +10:00
|
|
|
# endif
|
|
|
|
|
|
|
|
|
|
# ifdef DEBUG_TIME
|
2016-06-08 18:34:01 +10:00
|
|
|
TIMEIT_START(mesh_undo_compact);
|
2019-04-17 06:17:24 +02:00
|
|
|
# endif
|
2016-05-30 15:31:31 +10:00
|
|
|
|
|
|
|
|
um_arraystore_compact(um, um_ref);
|
|
|
|
|
|
|
|
|
|
# ifdef DEBUG_TIME
|
|
|
|
|
TIMEIT_END(mesh_undo_compact);
|
2019-04-17 06:17:24 +02:00
|
|
|
# endif
|
2016-05-30 15:31:31 +10:00
|
|
|
|
|
|
|
|
# ifdef DEBUG_PRINT
|
2019-04-17 06:17:24 +02:00
|
|
|
{
|
2023-03-11 11:57:15 +11:00
|
|
|
size_t size_expanded = 0, size_compacted = 0;
|
|
|
|
|
|
|
|
|
|
for (int bs_index = 0; bs_index < ARRAY_STORE_INDEX_NUM; bs_index++) {
|
|
|
|
|
size_t size_expanded_iter, size_compacted_iter;
|
|
|
|
|
BLI_array_store_at_size_calc_memory_usage(
|
|
|
|
|
&um_arraystore.bs_stride[bs_index], &size_expanded_iter, &size_compacted_iter);
|
|
|
|
|
size_expanded += size_expanded_iter;
|
|
|
|
|
size_compacted += size_compacted_iter;
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
const double percent_total = size_expanded ?
|
2023-01-11 13:03:53 +11:00
|
|
|
((double(size_compacted) / double(size_expanded)) * 100.0) :
|
2019-04-17 06:17:24 +02:00
|
|
|
-1.0;
|
|
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
size_t size_expanded_step = size_expanded - size_expanded_prev;
|
|
|
|
|
size_t size_compacted_step = size_compacted - size_compacted_prev;
|
|
|
|
|
const double percent_step = size_expanded_step ?
|
2023-01-11 13:03:53 +11:00
|
|
|
((double(size_compacted_step) / double(size_expanded_step)) *
|
2019-04-17 06:17:24 +02:00
|
|
|
100.0) :
|
|
|
|
|
-1.0;
|
|
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
printf("overall memory use: %.8f%% of expanded size\n", percent_total);
|
|
|
|
|
printf("step memory use: %.8f%% of expanded size\n", percent_step);
|
|
|
|
|
}
|
|
|
|
|
# endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# ifdef USE_ARRAY_STORE_THREAD
|
|
|
|
|
|
|
|
|
|
struct UMArrayData {
|
|
|
|
|
UndoMesh *um;
|
2022-10-07 10:40:44 -05:00
|
|
|
const UndoMesh *um_ref; /* can be nullptr */
|
2016-05-30 15:31:31 +10:00
|
|
|
};
|
2022-10-07 10:40:44 -05:00
|
|
|
static void um_arraystore_compact_cb(TaskPool *__restrict /*pool*/, void *taskdata)
|
2016-05-30 15:31:31 +10:00
|
|
|
{
|
2022-10-07 10:40:44 -05:00
|
|
|
UMArrayData *um_data = static_cast<UMArrayData *>(taskdata);
|
2016-05-30 15:31:31 +10:00
|
|
|
um_arraystore_compact_with_info(um_data->um, um_data->um_ref);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# endif /* USE_ARRAY_STORE_THREAD */
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Remove data we only expanded for temporary use.
|
|
|
|
|
*/
|
|
|
|
|
static void um_arraystore_expand_clear(UndoMesh *um)
|
|
|
|
|
{
|
2022-10-07 10:40:44 -05:00
|
|
|
um_arraystore_compact_ex(um, nullptr, false);
|
2016-05-30 15:31:31 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void um_arraystore_expand(UndoMesh *um)
|
|
|
|
|
{
|
2023-12-08 16:40:06 -05:00
|
|
|
Mesh *mesh = &um->mesh;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-12-20 02:21:48 +01:00
|
|
|
um_arraystore_cd_expand(um->store.vdata, &mesh->vert_data, mesh->verts_num);
|
|
|
|
|
um_arraystore_cd_expand(um->store.edata, &mesh->edge_data, mesh->edges_num);
|
2023-12-19 20:38:59 -05:00
|
|
|
um_arraystore_cd_expand(um->store.ldata, &mesh->corner_data, mesh->corners_num);
|
2023-12-08 16:40:06 -05:00
|
|
|
um_arraystore_cd_expand(um->store.pdata, &mesh->face_data, mesh->faces_num);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
if (um->store.keyblocks) {
|
2023-12-08 16:40:06 -05:00
|
|
|
const size_t stride = mesh->key->elemsize;
|
|
|
|
|
KeyBlock *keyblock = static_cast<KeyBlock *>(mesh->key->block.first);
|
|
|
|
|
for (int i = 0; i < mesh->key->totkey; i++, keyblock = keyblock->next) {
|
2016-05-30 15:31:31 +10:00
|
|
|
BArrayState *state = um->store.keyblocks[i];
|
|
|
|
|
size_t state_len;
|
|
|
|
|
keyblock->data = BLI_array_store_state_data_get_alloc(state, &state_len);
|
|
|
|
|
BLI_assert(keyblock->totelem == (state_len / stride));
|
|
|
|
|
UNUSED_VARS_NDEBUG(stride);
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-07-24 22:06:55 +02:00
|
|
|
if (um->store.face_offset_indices) {
|
2023-12-08 16:40:06 -05:00
|
|
|
const size_t stride = sizeof(*mesh->face_offset_indices);
|
2023-07-24 22:06:55 +02:00
|
|
|
BArrayState *state = um->store.face_offset_indices;
|
Mesh: Replace MPoly struct with offset indices
Implements #95967.
Currently the `MPoly` struct is 12 bytes, and stores the index of a
face's first corner and the number of corners/verts/edges. Polygons
and corners are always created in order by Blender, meaning each
face's corners will be after the previous face's corners. We can take
advantage of this fact and eliminate the redundancy in mesh face
storage by only storing a single integer corner offset for each face.
The size of the face is then encoded by the offset of the next face.
The size of a single integer is 4 bytes, so this reduces memory
usage by 3 times.
The same method is used for `CurvesGeometry`, so Blender already has
an abstraction to simplify using these offsets called `OffsetIndices`.
This class is used to easily retrieve a range of corner indices for
each face. This also gives the opportunity for sharing some logic with
curves.
Another benefit of the change is that the offsets and sizes stored in
`MPoly` can no longer disagree with each other. Storing faces in the
order of their corners can simplify some code too.
Face/polygon variables now use the `IndexRange` type, which comes with
quite a few utilities that can simplify code.
Some:
- The offset integer array has to be one longer than the face count to
avoid a branch for every face, which means the data is no longer part
of the mesh's `CustomData`.
- We lose the ability to "reference" an original mesh's offset array
until more reusable CoW from #104478 is committed. That will be added
in a separate commit.
- Since they aren't part of `CustomData`, poly offsets often have to be
copied manually.
- To simplify using `OffsetIndices` in many places, some functions and
structs in headers were moved to only compile in C++.
- All meshes created by Blender use the same order for faces and face
corners, but just in case, meshes with mismatched order are fixed by
versioning code.
- `MeshPolygon.totloop` is no longer editable in RNA. This API break is
necessary here unfortunately. It should be worth it in 3.6, since
that's the best way to allow loading meshes from 4.0, which is
important for an LTS version.
Pull Request: https://projects.blender.org/blender/blender/pulls/105938
2023-04-04 20:39:28 +02:00
|
|
|
size_t state_len;
|
2023-12-08 16:40:06 -05:00
|
|
|
mesh->face_offset_indices = static_cast<int *>(
|
Mesh: Replace MPoly struct with offset indices
Implements #95967.
Currently the `MPoly` struct is 12 bytes, and stores the index of a
face's first corner and the number of corners/verts/edges. Polygons
and corners are always created in order by Blender, meaning each
face's corners will be after the previous face's corners. We can take
advantage of this fact and eliminate the redundancy in mesh face
storage by only storing a single integer corner offset for each face.
The size of the face is then encoded by the offset of the next face.
The size of a single integer is 4 bytes, so this reduces memory
usage by 3 times.
The same method is used for `CurvesGeometry`, so Blender already has
an abstraction to simplify using these offsets called `OffsetIndices`.
This class is used to easily retrieve a range of corner indices for
each face. This also gives the opportunity for sharing some logic with
curves.
Another benefit of the change is that the offsets and sizes stored in
`MPoly` can no longer disagree with each other. Storing faces in the
order of their corners can simplify some code too.
Face/polygon variables now use the `IndexRange` type, which comes with
quite a few utilities that can simplify code.
Some:
- The offset integer array has to be one longer than the face count to
avoid a branch for every face, which means the data is no longer part
of the mesh's `CustomData`.
- We lose the ability to "reference" an original mesh's offset array
until more reusable CoW from #104478 is committed. That will be added
in a separate commit.
- Since they aren't part of `CustomData`, poly offsets often have to be
copied manually.
- To simplify using `OffsetIndices` in many places, some functions and
structs in headers were moved to only compile in C++.
- All meshes created by Blender use the same order for faces and face
corners, but just in case, meshes with mismatched order are fixed by
versioning code.
- `MeshPolygon.totloop` is no longer editable in RNA. This API break is
necessary here unfortunately. It should be worth it in 3.6, since
that's the best way to allow loading meshes from 4.0, which is
important for an LTS version.
Pull Request: https://projects.blender.org/blender/blender/pulls/105938
2023-04-04 20:39:28 +02:00
|
|
|
BLI_array_store_state_data_get_alloc(state, &state_len));
|
2023-12-08 16:40:06 -05:00
|
|
|
mesh->runtime->face_offsets_sharing_info = blender::implicit_sharing::info_for_mem_free(
|
|
|
|
|
mesh->face_offset_indices);
|
|
|
|
|
BLI_assert((mesh->faces_num + 1) == (state_len / stride));
|
Mesh: Replace MPoly struct with offset indices
Implements #95967.
Currently the `MPoly` struct is 12 bytes, and stores the index of a
face's first corner and the number of corners/verts/edges. Polygons
and corners are always created in order by Blender, meaning each
face's corners will be after the previous face's corners. We can take
advantage of this fact and eliminate the redundancy in mesh face
storage by only storing a single integer corner offset for each face.
The size of the face is then encoded by the offset of the next face.
The size of a single integer is 4 bytes, so this reduces memory
usage by 3 times.
The same method is used for `CurvesGeometry`, so Blender already has
an abstraction to simplify using these offsets called `OffsetIndices`.
This class is used to easily retrieve a range of corner indices for
each face. This also gives the opportunity for sharing some logic with
curves.
Another benefit of the change is that the offsets and sizes stored in
`MPoly` can no longer disagree with each other. Storing faces in the
order of their corners can simplify some code too.
Face/polygon variables now use the `IndexRange` type, which comes with
quite a few utilities that can simplify code.
Some:
- The offset integer array has to be one longer than the face count to
avoid a branch for every face, which means the data is no longer part
of the mesh's `CustomData`.
- We lose the ability to "reference" an original mesh's offset array
until more reusable CoW from #104478 is committed. That will be added
in a separate commit.
- Since they aren't part of `CustomData`, poly offsets often have to be
copied manually.
- To simplify using `OffsetIndices` in many places, some functions and
structs in headers were moved to only compile in C++.
- All meshes created by Blender use the same order for faces and face
corners, but just in case, meshes with mismatched order are fixed by
versioning code.
- `MeshPolygon.totloop` is no longer editable in RNA. This API break is
necessary here unfortunately. It should be worth it in 3.6, since
that's the best way to allow loading meshes from 4.0, which is
important for an LTS version.
Pull Request: https://projects.blender.org/blender/blender/pulls/105938
2023-04-04 20:39:28 +02:00
|
|
|
UNUSED_VARS_NDEBUG(stride);
|
|
|
|
|
}
|
2016-05-30 15:31:31 +10:00
|
|
|
if (um->store.mselect) {
|
2023-12-08 16:40:06 -05:00
|
|
|
const size_t stride = sizeof(*mesh->mselect);
|
2016-05-30 15:31:31 +10:00
|
|
|
BArrayState *state = um->store.mselect;
|
|
|
|
|
size_t state_len;
|
2023-12-08 16:40:06 -05:00
|
|
|
mesh->mselect = static_cast<MSelect *>(
|
|
|
|
|
BLI_array_store_state_data_get_alloc(state, &state_len));
|
|
|
|
|
BLI_assert(mesh->totselect == (state_len / stride));
|
2016-05-30 15:31:31 +10:00
|
|
|
UNUSED_VARS_NDEBUG(stride);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void um_arraystore_free(UndoMesh *um)
|
|
|
|
|
{
|
2023-12-08 16:40:06 -05:00
|
|
|
Mesh *mesh = &um->mesh;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-03-11 11:57:15 +11:00
|
|
|
um_arraystore_cd_free(um->store.vdata, ARRAY_STORE_INDEX_VERT);
|
|
|
|
|
um_arraystore_cd_free(um->store.edata, ARRAY_STORE_INDEX_EDGE);
|
|
|
|
|
um_arraystore_cd_free(um->store.ldata, ARRAY_STORE_INDEX_LOOP);
|
|
|
|
|
um_arraystore_cd_free(um->store.pdata, ARRAY_STORE_INDEX_POLY);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
if (um->store.keyblocks) {
|
2023-12-08 16:40:06 -05:00
|
|
|
const size_t stride = mesh->key->elemsize;
|
2023-03-11 11:57:15 +11:00
|
|
|
BArrayStore *bs = BLI_array_store_at_size_get(
|
|
|
|
|
&um_arraystore.bs_stride[ARRAY_STORE_INDEX_SHAPE], stride);
|
2023-12-08 16:40:06 -05:00
|
|
|
for (int i = 0; i < mesh->key->totkey; i++) {
|
2016-05-30 15:31:31 +10:00
|
|
|
BArrayState *state = um->store.keyblocks[i];
|
|
|
|
|
BLI_array_store_state_remove(bs, state);
|
|
|
|
|
}
|
|
|
|
|
MEM_freeN(um->store.keyblocks);
|
2022-10-07 10:40:44 -05:00
|
|
|
um->store.keyblocks = nullptr;
|
2016-05-30 15:31:31 +10:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-07-24 22:06:55 +02:00
|
|
|
if (um->store.face_offset_indices) {
|
2023-12-08 16:40:06 -05:00
|
|
|
const size_t stride = sizeof(*mesh->face_offset_indices);
|
Mesh: Replace MPoly struct with offset indices
Implements #95967.
Currently the `MPoly` struct is 12 bytes, and stores the index of a
face's first corner and the number of corners/verts/edges. Polygons
and corners are always created in order by Blender, meaning each
face's corners will be after the previous face's corners. We can take
advantage of this fact and eliminate the redundancy in mesh face
storage by only storing a single integer corner offset for each face.
The size of the face is then encoded by the offset of the next face.
The size of a single integer is 4 bytes, so this reduces memory
usage by 3 times.
The same method is used for `CurvesGeometry`, so Blender already has
an abstraction to simplify using these offsets called `OffsetIndices`.
This class is used to easily retrieve a range of corner indices for
each face. This also gives the opportunity for sharing some logic with
curves.
Another benefit of the change is that the offsets and sizes stored in
`MPoly` can no longer disagree with each other. Storing faces in the
order of their corners can simplify some code too.
Face/polygon variables now use the `IndexRange` type, which comes with
quite a few utilities that can simplify code.
Some:
- The offset integer array has to be one longer than the face count to
avoid a branch for every face, which means the data is no longer part
of the mesh's `CustomData`.
- We lose the ability to "reference" an original mesh's offset array
until more reusable CoW from #104478 is committed. That will be added
in a separate commit.
- Since they aren't part of `CustomData`, poly offsets often have to be
copied manually.
- To simplify using `OffsetIndices` in many places, some functions and
structs in headers were moved to only compile in C++.
- All meshes created by Blender use the same order for faces and face
corners, but just in case, meshes with mismatched order are fixed by
versioning code.
- `MeshPolygon.totloop` is no longer editable in RNA. This API break is
necessary here unfortunately. It should be worth it in 3.6, since
that's the best way to allow loading meshes from 4.0, which is
important for an LTS version.
Pull Request: https://projects.blender.org/blender/blender/pulls/105938
2023-04-04 20:39:28 +02:00
|
|
|
BArrayStore *bs = BLI_array_store_at_size_get(
|
|
|
|
|
&um_arraystore.bs_stride[ARRAY_STORE_INDEX_POLY_OFFSETS], stride);
|
2023-07-24 22:06:55 +02:00
|
|
|
BArrayState *state = um->store.face_offset_indices;
|
Mesh: Replace MPoly struct with offset indices
Implements #95967.
Currently the `MPoly` struct is 12 bytes, and stores the index of a
face's first corner and the number of corners/verts/edges. Polygons
and corners are always created in order by Blender, meaning each
face's corners will be after the previous face's corners. We can take
advantage of this fact and eliminate the redundancy in mesh face
storage by only storing a single integer corner offset for each face.
The size of the face is then encoded by the offset of the next face.
The size of a single integer is 4 bytes, so this reduces memory
usage by 3 times.
The same method is used for `CurvesGeometry`, so Blender already has
an abstraction to simplify using these offsets called `OffsetIndices`.
This class is used to easily retrieve a range of corner indices for
each face. This also gives the opportunity for sharing some logic with
curves.
Another benefit of the change is that the offsets and sizes stored in
`MPoly` can no longer disagree with each other. Storing faces in the
order of their corners can simplify some code too.
Face/polygon variables now use the `IndexRange` type, which comes with
quite a few utilities that can simplify code.
Some:
- The offset integer array has to be one longer than the face count to
avoid a branch for every face, which means the data is no longer part
of the mesh's `CustomData`.
- We lose the ability to "reference" an original mesh's offset array
until more reusable CoW from #104478 is committed. That will be added
in a separate commit.
- Since they aren't part of `CustomData`, poly offsets often have to be
copied manually.
- To simplify using `OffsetIndices` in many places, some functions and
structs in headers were moved to only compile in C++.
- All meshes created by Blender use the same order for faces and face
corners, but just in case, meshes with mismatched order are fixed by
versioning code.
- `MeshPolygon.totloop` is no longer editable in RNA. This API break is
necessary here unfortunately. It should be worth it in 3.6, since
that's the best way to allow loading meshes from 4.0, which is
important for an LTS version.
Pull Request: https://projects.blender.org/blender/blender/pulls/105938
2023-04-04 20:39:28 +02:00
|
|
|
BLI_array_store_state_remove(bs, state);
|
2023-07-24 22:06:55 +02:00
|
|
|
um->store.face_offset_indices = nullptr;
|
Mesh: Replace MPoly struct with offset indices
Implements #95967.
Currently the `MPoly` struct is 12 bytes, and stores the index of a
face's first corner and the number of corners/verts/edges. Polygons
and corners are always created in order by Blender, meaning each
face's corners will be after the previous face's corners. We can take
advantage of this fact and eliminate the redundancy in mesh face
storage by only storing a single integer corner offset for each face.
The size of the face is then encoded by the offset of the next face.
The size of a single integer is 4 bytes, so this reduces memory
usage by 3 times.
The same method is used for `CurvesGeometry`, so Blender already has
an abstraction to simplify using these offsets called `OffsetIndices`.
This class is used to easily retrieve a range of corner indices for
each face. This also gives the opportunity for sharing some logic with
curves.
Another benefit of the change is that the offsets and sizes stored in
`MPoly` can no longer disagree with each other. Storing faces in the
order of their corners can simplify some code too.
Face/polygon variables now use the `IndexRange` type, which comes with
quite a few utilities that can simplify code.
Some:
- The offset integer array has to be one longer than the face count to
avoid a branch for every face, which means the data is no longer part
of the mesh's `CustomData`.
- We lose the ability to "reference" an original mesh's offset array
until more reusable CoW from #104478 is committed. That will be added
in a separate commit.
- Since they aren't part of `CustomData`, poly offsets often have to be
copied manually.
- To simplify using `OffsetIndices` in many places, some functions and
structs in headers were moved to only compile in C++.
- All meshes created by Blender use the same order for faces and face
corners, but just in case, meshes with mismatched order are fixed by
versioning code.
- `MeshPolygon.totloop` is no longer editable in RNA. This API break is
necessary here unfortunately. It should be worth it in 3.6, since
that's the best way to allow loading meshes from 4.0, which is
important for an LTS version.
Pull Request: https://projects.blender.org/blender/blender/pulls/105938
2023-04-04 20:39:28 +02:00
|
|
|
}
|
2016-05-30 15:31:31 +10:00
|
|
|
if (um->store.mselect) {
|
2023-12-08 16:40:06 -05:00
|
|
|
const size_t stride = sizeof(*mesh->mselect);
|
2023-03-11 11:57:15 +11:00
|
|
|
BArrayStore *bs = BLI_array_store_at_size_get(&um_arraystore.bs_stride[ARRAY_STORE_INDEX_MSEL],
|
|
|
|
|
stride);
|
2016-05-30 15:31:31 +10:00
|
|
|
BArrayState *state = um->store.mselect;
|
|
|
|
|
BLI_array_store_state_remove(bs, state);
|
2022-10-07 10:40:44 -05:00
|
|
|
um->store.mselect = nullptr;
|
2016-05-30 15:31:31 +10:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
um_arraystore.users -= 1;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
BLI_assert(um_arraystore.users >= 0);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
if (um_arraystore.users == 0) {
|
|
|
|
|
# ifdef DEBUG_PRINT
|
|
|
|
|
printf("mesh undo store: freeing all data!\n");
|
|
|
|
|
# endif
|
2023-03-11 11:57:15 +11:00
|
|
|
for (int bs_index = 0; bs_index < ARRAY_STORE_INDEX_NUM; bs_index++) {
|
|
|
|
|
BLI_array_store_at_size_clear(&um_arraystore.bs_stride[bs_index]);
|
|
|
|
|
}
|
2016-05-30 15:31:31 +10:00
|
|
|
# ifdef USE_ARRAY_STORE_THREAD
|
|
|
|
|
BLI_task_pool_free(um_arraystore.task_pool);
|
2022-10-07 10:40:44 -05:00
|
|
|
um_arraystore.task_pool = nullptr;
|
2019-04-17 06:17:24 +02:00
|
|
|
# endif
|
|
|
|
|
}
|
2016-05-30 15:31:31 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/** \} */
|
|
|
|
|
|
2021-05-27 16:14:51 +10:00
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Array Store Utilities
|
|
|
|
|
* \{ */
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Create an array of #UndoMesh from `objects`.
|
|
|
|
|
*
|
|
|
|
|
* where each element in the resulting array is the most recently created
|
|
|
|
|
* undo-mesh for the object's mesh.
|
2022-10-07 10:40:44 -05:00
|
|
|
* When no undo-mesh can be found that array index is nullptr.
|
2021-05-27 16:14:51 +10:00
|
|
|
*
|
|
|
|
|
* This is used for de-duplicating memory between undo steps,
|
|
|
|
|
* failure to find the undo step will store a full duplicate in memory.
|
|
|
|
|
* define `DEBUG_PRINT` to check memory is de-duplicating as expected.
|
|
|
|
|
*/
|
|
|
|
|
static UndoMesh **mesh_undostep_reference_elems_from_objects(Object **object, int object_len)
|
|
|
|
|
{
|
2024-01-22 13:47:13 +01:00
|
|
|
/* Map: `Mesh.id.session_uid` -> `UndoMesh`. */
|
2021-05-27 16:14:51 +10:00
|
|
|
GHash *uuid_map = BLI_ghash_ptr_new_ex(__func__, object_len);
|
2022-10-07 10:40:44 -05:00
|
|
|
UndoMesh **um_references = static_cast<UndoMesh **>(
|
|
|
|
|
MEM_callocN(sizeof(UndoMesh *) * object_len, __func__));
|
2021-05-27 16:14:51 +10:00
|
|
|
for (int i = 0; i < object_len; i++) {
|
2023-12-08 16:40:06 -05:00
|
|
|
const Mesh *mesh = static_cast<const Mesh *>(object[i]->data);
|
2024-01-22 13:47:13 +01:00
|
|
|
BLI_ghash_insert(uuid_map, POINTER_FROM_INT(mesh->id.session_uid), &um_references[i]);
|
2021-05-27 16:14:51 +10:00
|
|
|
}
|
|
|
|
|
int uuid_map_len = object_len;
|
|
|
|
|
|
|
|
|
|
/* Loop backwards over all previous mesh undo data until either:
|
|
|
|
|
* - All elements have been found (where `um_references` we'll have every element set).
|
|
|
|
|
* - There are no undo steps left to look for. */
|
2022-10-07 10:40:44 -05:00
|
|
|
UndoMesh *um_iter = static_cast<UndoMesh *>(um_arraystore.local_links.last);
|
2021-05-27 16:34:41 +10:00
|
|
|
while (um_iter && (uuid_map_len != 0)) {
|
|
|
|
|
UndoMesh **um_p;
|
2024-01-22 13:47:13 +01:00
|
|
|
if ((um_p = static_cast<UndoMesh **>(
|
|
|
|
|
BLI_ghash_popkey(uuid_map, POINTER_FROM_INT(um_iter->mesh.id.session_uid), nullptr))))
|
2022-10-07 10:40:44 -05:00
|
|
|
{
|
2021-05-27 16:14:51 +10:00
|
|
|
*um_p = um_iter;
|
|
|
|
|
uuid_map_len--;
|
|
|
|
|
}
|
2021-05-27 16:34:41 +10:00
|
|
|
um_iter = um_iter->local_prev;
|
2021-05-27 16:14:51 +10:00
|
|
|
}
|
|
|
|
|
BLI_assert(uuid_map_len == BLI_ghash_len(uuid_map));
|
2022-10-07 10:40:44 -05:00
|
|
|
BLI_ghash_free(uuid_map, nullptr, nullptr);
|
2021-05-27 16:14:51 +10:00
|
|
|
if (uuid_map_len == object_len) {
|
|
|
|
|
MEM_freeN(um_references);
|
2022-10-07 10:40:44 -05:00
|
|
|
um_references = nullptr;
|
2021-05-27 16:14:51 +10:00
|
|
|
}
|
|
|
|
|
return um_references;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/** \} */
|
|
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
#endif /* USE_ARRAY_STORE */
|
|
|
|
|
|
|
|
|
|
/* for callbacks */
|
2016-05-25 19:12:43 +10:00
|
|
|
/* undo simply makes copies of a bmesh */
|
2021-05-27 16:14:51 +10:00
|
|
|
/**
|
|
|
|
|
* \param um_ref: The reference to use for de-duplicating memory between undo-steps.
|
|
|
|
|
*/
|
|
|
|
|
static void *undomesh_from_editmesh(UndoMesh *um, BMEditMesh *em, Key *key, UndoMesh *um_ref)
|
2016-05-25 19:12:43 +10:00
|
|
|
{
|
2018-03-19 14:17:59 +01:00
|
|
|
BLI_assert(BLI_array_is_zeroed(um, 1));
|
2016-05-30 15:31:31 +10:00
|
|
|
#ifdef USE_ARRAY_STORE_THREAD
|
|
|
|
|
/* changes this waits is low, but must have finished */
|
|
|
|
|
if (um_arraystore.task_pool) {
|
|
|
|
|
BLI_task_pool_work_and_wait(um_arraystore.task_pool);
|
|
|
|
|
}
|
|
|
|
|
#endif
|
2016-05-25 19:12:43 +10:00
|
|
|
/* make sure shape keys work */
|
2022-10-07 10:40:44 -05:00
|
|
|
if (key != nullptr) {
|
2023-12-08 16:40:06 -05:00
|
|
|
um->mesh.key = (Key *)BKE_id_copy_ex(
|
2022-10-07 10:40:44 -05:00
|
|
|
nullptr, &key->id, nullptr, LIB_ID_COPY_LOCALIZE | LIB_ID_COPY_NO_ANIMDATA);
|
2020-10-08 16:24:38 +02:00
|
|
|
}
|
|
|
|
|
else {
|
2023-12-08 16:40:06 -05:00
|
|
|
um->mesh.key = nullptr;
|
2020-10-08 16:24:38 +02:00
|
|
|
}
|
2016-05-25 19:12:43 +10:00
|
|
|
|
2022-03-08 13:48:31 +11:00
|
|
|
/* Uncomment for troubleshooting. */
|
|
|
|
|
// BM_mesh_validate(em->bm);
|
2016-05-25 19:12:43 +10:00
|
|
|
|
Mesh: Move hide flags to generic attributes
This commit moves the hide status of mesh vertices, edges, and faces
from the `ME_FLAG` to optional generic boolean attributes. Storing this
data as generic attributes can significantly simplify and improve code,
as described in T95965.
The attributes are called `.hide_vert`, `.hide_edge`, and `.hide_poly`,
using the attribute name semantics discussed in T97452. The `.` prefix
means they are "UI attributes", so they still contain original data
edited by users, but they aren't meant to be accessed procedurally by
the user in arbitrary situations. They are also be hidden in the
spreadsheet and the attribute list by default,
Until 4.0, the attributes are still written to and read from the mesh
in the old way, so neither forward nor backward compatibility are
affected. This means memory requirements will be increased by one byte
per element when the hide status is used. When the flags are removed
completely, requirements will decrease when hiding is unused.
Further notes:
* Some code can be further simplified to skip some processing when the
hide attributes don't exist.
* The data is still stored in flags for `BMesh`, necessitating some
complexity in the conversion to and from `Mesh`.
* Access to the "hide" property of mesh elements in RNA is slower.
The separate boolean arrays should be used where possible.
Ref T95965
Differential Revision: https://developer.blender.org/D14685
2022-08-11 12:54:24 -04:00
|
|
|
/* Copy the ID name characters to the mesh so code that depends on accessing the ID type can work
|
|
|
|
|
* on it. Necessary to use the attribute API. */
|
2023-12-08 16:40:06 -05:00
|
|
|
STRNCPY(um->mesh.id.name, "MEundomesh_from_editmesh");
|
Mesh: Move hide flags to generic attributes
This commit moves the hide status of mesh vertices, edges, and faces
from the `ME_FLAG` to optional generic boolean attributes. Storing this
data as generic attributes can significantly simplify and improve code,
as described in T95965.
The attributes are called `.hide_vert`, `.hide_edge`, and `.hide_poly`,
using the attribute name semantics discussed in T97452. The `.` prefix
means they are "UI attributes", so they still contain original data
edited by users, but they aren't meant to be accessed procedurally by
the user in arbitrary situations. They are also be hidden in the
spreadsheet and the attribute list by default,
Until 4.0, the attributes are still written to and read from the mesh
in the old way, so neither forward nor backward compatibility are
affected. This means memory requirements will be increased by one byte
per element when the hide status is used. When the flags are removed
completely, requirements will decrease when hiding is unused.
Further notes:
* Some code can be further simplified to skip some processing when the
hide attributes don't exist.
* The data is still stored in flags for `BMesh`, necessitating some
complexity in the conversion to and from `Mesh`.
* Access to the "hide" property of mesh elements in RNA is slower.
The separate boolean arrays should be used where possible.
Ref T95965
Differential Revision: https://developer.blender.org/D14685
2022-08-11 12:54:24 -04:00
|
|
|
|
2022-10-12 20:55:26 -05:00
|
|
|
/* Runtime data is necessary for some asserts in other code, and the overhead of creating it for
|
|
|
|
|
* undo meshes should be low. */
|
2023-12-08 16:40:06 -05:00
|
|
|
BLI_assert(um->mesh.runtime == nullptr);
|
|
|
|
|
um->mesh.runtime = new blender::bke::MeshRuntime();
|
2022-10-12 20:55:26 -05:00
|
|
|
|
2022-10-07 10:40:44 -05:00
|
|
|
CustomData_MeshMasks cd_mask_extra{};
|
|
|
|
|
cd_mask_extra.vmask = CD_MASK_SHAPE_KEYINDEX;
|
|
|
|
|
BMeshToMeshParams params{};
|
|
|
|
|
/* Undo code should not be manipulating 'G_MAIN->object' hooks/vertex-parent. */
|
|
|
|
|
params.calc_object_remap = false;
|
|
|
|
|
params.update_shapekey_indices = false;
|
|
|
|
|
params.cd_mask_extra = cd_mask_extra;
|
|
|
|
|
params.active_shapekey_to_mvert = true;
|
2023-12-08 16:40:06 -05:00
|
|
|
BM_mesh_bm_to_me(nullptr, em->bm, &um->mesh, ¶ms);
|
2016-05-25 19:12:43 +10:00
|
|
|
|
|
|
|
|
um->selectmode = em->selectmode;
|
|
|
|
|
um->shapenr = em->bm->shapenr;
|
|
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
#ifdef USE_ARRAY_STORE
|
|
|
|
|
{
|
2021-02-14 20:58:04 +11:00
|
|
|
/* Add ourselves. */
|
2021-05-27 16:34:41 +10:00
|
|
|
BLI_addtail(&um_arraystore.local_links, um);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
# ifdef USE_ARRAY_STORE_THREAD
|
2022-10-07 10:40:44 -05:00
|
|
|
if (um_arraystore.task_pool == nullptr) {
|
|
|
|
|
um_arraystore.task_pool = BLI_task_pool_create_background(nullptr, TASK_PRIORITY_LOW);
|
2016-05-30 15:31:31 +10:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2022-10-07 10:40:44 -05:00
|
|
|
UMArrayData *um_data = static_cast<UMArrayData *>(MEM_mallocN(sizeof(*um_data), __func__));
|
2016-05-30 15:31:31 +10:00
|
|
|
um_data->um = um;
|
|
|
|
|
um_data->um_ref = um_ref;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2022-10-07 10:40:44 -05:00
|
|
|
BLI_task_pool_push(um_arraystore.task_pool, um_arraystore_compact_cb, um_data, true, nullptr);
|
2016-05-30 15:31:31 +10:00
|
|
|
# else
|
|
|
|
|
um_arraystore_compact_with_info(um, um_ref);
|
|
|
|
|
# endif
|
|
|
|
|
}
|
2021-05-27 16:14:51 +10:00
|
|
|
#else
|
|
|
|
|
UNUSED_VARS(um_ref);
|
2016-05-30 15:31:31 +10:00
|
|
|
#endif
|
|
|
|
|
|
2016-05-25 19:12:43 +10:00
|
|
|
return um;
|
|
|
|
|
}
|
|
|
|
|
|
2022-02-22 09:57:07 +11:00
|
|
|
static void undomesh_to_editmesh(UndoMesh *um, Object *ob, BMEditMesh *em)
|
2016-05-25 19:12:43 +10:00
|
|
|
{
|
2018-03-19 14:17:59 +01:00
|
|
|
BMEditMesh *em_tmp;
|
2016-05-25 19:12:43 +10:00
|
|
|
BMesh *bm;
|
|
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
#ifdef USE_ARRAY_STORE
|
|
|
|
|
# ifdef USE_ARRAY_STORE_THREAD
|
|
|
|
|
/* changes this waits is low, but must have finished */
|
|
|
|
|
BLI_task_pool_work_and_wait(um_arraystore.task_pool);
|
|
|
|
|
# endif
|
|
|
|
|
|
|
|
|
|
# ifdef DEBUG_TIME
|
|
|
|
|
TIMEIT_START(mesh_undo_expand);
|
|
|
|
|
# endif
|
|
|
|
|
|
|
|
|
|
um_arraystore_expand(um);
|
|
|
|
|
|
|
|
|
|
# ifdef DEBUG_TIME
|
|
|
|
|
TIMEIT_END(mesh_undo_expand);
|
|
|
|
|
# endif
|
|
|
|
|
#endif /* USE_ARRAY_STORE */
|
|
|
|
|
|
2023-12-08 16:40:06 -05:00
|
|
|
const BMAllocTemplate allocsize = BMALLOC_TEMPLATE_FROM_ME(&um->mesh);
|
2016-05-25 19:12:43 +10:00
|
|
|
|
|
|
|
|
em->bm->shapenr = um->shapenr;
|
|
|
|
|
|
2021-07-13 15:05:39 +10:00
|
|
|
EDBM_mesh_free_data(em);
|
2016-05-25 19:12:43 +10:00
|
|
|
|
2022-10-07 10:40:44 -05:00
|
|
|
BMeshCreateParams create_params{};
|
|
|
|
|
create_params.use_toolflags = true;
|
|
|
|
|
bm = BM_mesh_create(&allocsize, &create_params);
|
2016-05-25 19:12:43 +10:00
|
|
|
|
2022-10-07 10:40:44 -05:00
|
|
|
BMeshFromMeshParams convert_params{};
|
|
|
|
|
/* Handled with tessellation. */
|
|
|
|
|
convert_params.calc_face_normal = false;
|
|
|
|
|
convert_params.calc_vert_normal = false;
|
|
|
|
|
convert_params.active_shapekey = um->shapenr;
|
2023-12-08 16:40:06 -05:00
|
|
|
BM_mesh_bm_from_me(bm, &um->mesh, &convert_params);
|
2016-05-25 19:12:43 +10:00
|
|
|
|
2022-01-22 18:02:12 +11:00
|
|
|
em_tmp = BKE_editmesh_create(bm);
|
|
|
|
|
*em = *em_tmp;
|
|
|
|
|
|
2022-01-21 16:14:00 -06:00
|
|
|
/* Calculate face normals and tessellation at once since it's multi-threaded. */
|
2023-12-14 12:08:21 +11:00
|
|
|
BKE_editmesh_looptris_and_normals_calc(em);
|
2021-07-13 15:05:39 +10:00
|
|
|
|
2016-05-25 19:12:43 +10:00
|
|
|
em->selectmode = um->selectmode;
|
|
|
|
|
bm->selectmode = um->selectmode;
|
|
|
|
|
|
2018-05-25 22:24:24 +05:30
|
|
|
bm->spacearr_dirty = BM_SPACEARR_DIRTY_ALL;
|
|
|
|
|
|
2016-05-25 19:12:43 +10:00
|
|
|
ob->shapenr = um->shapenr;
|
|
|
|
|
|
2024-04-18 13:52:20 +02:00
|
|
|
MEM_delete(em_tmp);
|
2016-05-30 15:31:31 +10:00
|
|
|
|
|
|
|
|
#ifdef USE_ARRAY_STORE
|
|
|
|
|
um_arraystore_expand_clear(um);
|
|
|
|
|
#endif
|
2016-05-25 19:12:43 +10:00
|
|
|
}
|
|
|
|
|
|
2018-03-19 14:17:59 +01:00
|
|
|
static void undomesh_free_data(UndoMesh *um)
|
2016-05-25 19:12:43 +10:00
|
|
|
{
|
2023-12-08 16:40:06 -05:00
|
|
|
Mesh *mesh = &um->mesh;
|
2016-05-30 15:31:31 +10:00
|
|
|
|
|
|
|
|
#ifdef USE_ARRAY_STORE
|
|
|
|
|
|
|
|
|
|
# ifdef USE_ARRAY_STORE_THREAD
|
|
|
|
|
/* changes this waits is low, but must have finished */
|
|
|
|
|
BLI_task_pool_work_and_wait(um_arraystore.task_pool);
|
|
|
|
|
# endif
|
|
|
|
|
|
|
|
|
|
/* we need to expand so any allocations in custom-data are freed with the mesh */
|
|
|
|
|
um_arraystore_expand(um);
|
|
|
|
|
|
2021-05-27 16:34:41 +10:00
|
|
|
BLI_assert(BLI_findindex(&um_arraystore.local_links, um) != -1);
|
|
|
|
|
BLI_remlink(&um_arraystore.local_links, um);
|
|
|
|
|
|
2016-05-30 15:31:31 +10:00
|
|
|
um_arraystore_free(um);
|
|
|
|
|
#endif
|
|
|
|
|
|
2023-12-08 16:40:06 -05:00
|
|
|
if (mesh->key) {
|
|
|
|
|
BKE_key_free_data(mesh->key);
|
|
|
|
|
MEM_freeN(mesh->key);
|
2016-05-25 19:12:43 +10:00
|
|
|
}
|
|
|
|
|
|
2023-12-08 16:40:06 -05:00
|
|
|
BKE_mesh_free_data_for_undo(mesh);
|
2016-05-25 19:12:43 +10:00
|
|
|
}
|
|
|
|
|
|
2018-03-19 14:17:59 +01:00
|
|
|
static Object *editmesh_object_from_context(bContext *C)
|
2016-05-30 15:31:31 +10:00
|
|
|
{
|
2022-09-14 21:33:51 +02:00
|
|
|
Scene *scene = CTX_data_scene(C);
|
2020-02-04 18:26:57 +11:00
|
|
|
ViewLayer *view_layer = CTX_data_view_layer(C);
|
2022-09-14 21:33:51 +02:00
|
|
|
BKE_view_layer_synced_ensure(scene, view_layer);
|
2022-09-01 10:00:53 +02:00
|
|
|
Object *obedit = BKE_view_layer_edit_object_get(view_layer);
|
2016-05-30 15:31:31 +10:00
|
|
|
if (obedit && obedit->type == OB_MESH) {
|
2023-12-08 16:40:06 -05:00
|
|
|
const Mesh *mesh = static_cast<Mesh *>(obedit->data);
|
2024-03-21 23:18:49 +01:00
|
|
|
if (mesh->runtime->edit_mesh != nullptr) {
|
2018-03-19 14:17:59 +01:00
|
|
|
return obedit;
|
|
|
|
|
}
|
2016-05-30 15:31:31 +10:00
|
|
|
}
|
2022-10-07 10:40:44 -05:00
|
|
|
return nullptr;
|
2016-05-30 15:31:31 +10:00
|
|
|
}
|
|
|
|
|
|
2018-03-19 14:17:59 +01:00
|
|
|
/** \} */
|
|
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Implements ED Undo System
|
2018-04-16 16:27:55 +02:00
|
|
|
*
|
|
|
|
|
* \note This is similar for all edit-mode types.
|
2018-03-19 14:17:59 +01:00
|
|
|
* \{ */
|
|
|
|
|
|
2022-10-07 10:40:44 -05:00
|
|
|
struct MeshUndoStep_Elem {
|
2018-04-16 16:27:55 +02:00
|
|
|
UndoRefID_Object obedit_ref;
|
|
|
|
|
UndoMesh data;
|
2022-10-07 10:40:44 -05:00
|
|
|
};
|
2018-04-16 16:27:55 +02:00
|
|
|
|
2022-10-07 10:40:44 -05:00
|
|
|
struct MeshUndoStep {
|
2018-03-19 14:17:59 +01:00
|
|
|
UndoStep step;
|
2023-10-27 11:39:49 +11:00
|
|
|
/** See #ED_undo_object_editmode_validate_scene_from_windows code comment for details. */
|
|
|
|
|
UndoRefID_Scene scene_ref;
|
2018-04-16 16:27:55 +02:00
|
|
|
MeshUndoStep_Elem *elems;
|
|
|
|
|
uint elems_len;
|
2022-10-07 10:40:44 -05:00
|
|
|
};
|
2018-03-19 14:17:59 +01:00
|
|
|
|
|
|
|
|
static bool mesh_undosys_poll(bContext *C)
|
2016-05-25 19:12:43 +10:00
|
|
|
{
|
2022-10-07 10:40:44 -05:00
|
|
|
return editmesh_object_from_context(C) != nullptr;
|
2018-03-19 14:17:59 +01:00
|
|
|
}
|
|
|
|
|
|
2022-10-07 10:40:44 -05:00
|
|
|
static bool mesh_undosys_step_encode(bContext *C, Main *bmain, UndoStep *us_p)
|
2018-03-19 14:17:59 +01:00
|
|
|
{
|
|
|
|
|
MeshUndoStep *us = (MeshUndoStep *)us_p;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-02-07 20:27:11 +11:00
|
|
|
/* Important not to use the 3D view when getting objects because all objects
|
|
|
|
|
* outside of this list will be moved out of edit-mode when reading back undo steps. */
|
2023-10-27 11:39:49 +11:00
|
|
|
Scene *scene = CTX_data_scene(C);
|
2018-04-16 16:27:55 +02:00
|
|
|
ViewLayer *view_layer = CTX_data_view_layer(C);
|
2023-10-27 11:39:49 +11:00
|
|
|
const ToolSettings *ts = scene->toolsettings;
|
2024-03-22 16:24:30 +01:00
|
|
|
blender::Vector<Object *> objects = ED_undo_editmode_objects_from_view_layer(scene, view_layer);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-10-27 11:39:49 +11:00
|
|
|
us->scene_ref.ptr = scene;
|
2022-10-07 10:40:44 -05:00
|
|
|
us->elems = static_cast<MeshUndoStep_Elem *>(
|
2024-03-22 16:24:30 +01:00
|
|
|
MEM_callocN(sizeof(*us->elems) * objects.size(), __func__));
|
|
|
|
|
us->elems_len = objects.size();
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2022-10-07 10:40:44 -05:00
|
|
|
UndoMesh **um_references = nullptr;
|
2021-05-27 16:14:51 +10:00
|
|
|
|
|
|
|
|
#ifdef USE_ARRAY_STORE
|
2024-03-22 16:24:30 +01:00
|
|
|
um_references = mesh_undostep_reference_elems_from_objects(objects.data(), objects.size());
|
2021-05-27 16:14:51 +10:00
|
|
|
#endif
|
|
|
|
|
|
2024-03-22 16:24:30 +01:00
|
|
|
for (uint i = 0; i < objects.size(); i++) {
|
2018-04-16 16:27:55 +02:00
|
|
|
Object *ob = objects[i];
|
|
|
|
|
MeshUndoStep_Elem *elem = &us->elems[i];
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-04-16 16:27:55 +02:00
|
|
|
elem->obedit_ref.ptr = ob;
|
2023-12-08 16:40:06 -05:00
|
|
|
Mesh *mesh = static_cast<Mesh *>(elem->obedit_ref.ptr->data);
|
2024-04-18 13:52:20 +02:00
|
|
|
BMEditMesh *em = mesh->runtime->edit_mesh.get();
|
|
|
|
|
undomesh_from_editmesh(&elem->data, em, mesh->key, um_references ? um_references[i] : nullptr);
|
2019-11-07 16:52:03 +11:00
|
|
|
em->needs_flush_to_id = 1;
|
2018-04-16 16:27:55 +02:00
|
|
|
us->step.data_size += elem->data.undo_size;
|
2022-03-03 17:32:07 +05:30
|
|
|
elem->data.uv_selectmode = ts->uv_selectmode;
|
2021-05-27 16:14:51 +10:00
|
|
|
|
|
|
|
|
#ifdef USE_ARRAY_STORE
|
|
|
|
|
/** As this is only data storage it is safe to set the session ID here. */
|
2024-01-22 13:47:13 +01:00
|
|
|
elem->data.mesh.id.session_uid = mesh->id.session_uid;
|
2021-05-27 16:14:51 +10:00
|
|
|
#endif
|
2018-04-16 16:27:55 +02:00
|
|
|
}
|
2019-11-07 16:52:03 +11:00
|
|
|
|
2022-10-07 10:40:44 -05:00
|
|
|
if (um_references != nullptr) {
|
2021-05-27 16:14:51 +10:00
|
|
|
MEM_freeN(um_references);
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-07 16:52:03 +11:00
|
|
|
bmain->is_memfile_undo_flush_needed = true;
|
|
|
|
|
|
2018-03-19 14:17:59 +01:00
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2022-10-07 10:40:44 -05:00
|
|
|
static void mesh_undosys_step_decode(
|
|
|
|
|
bContext *C, Main *bmain, UndoStep *us_p, const eUndoStepDir /*dir*/, bool /*is_final*/)
|
2018-03-19 14:17:59 +01:00
|
|
|
{
|
|
|
|
|
MeshUndoStep *us = (MeshUndoStep *)us_p;
|
2023-10-27 11:39:49 +11:00
|
|
|
Scene *scene = CTX_data_scene(C);
|
|
|
|
|
ViewLayer *view_layer = CTX_data_view_layer(C);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-10-27 11:39:49 +11:00
|
|
|
ED_undo_object_editmode_validate_scene_from_windows(
|
|
|
|
|
CTX_wm_manager(C), us->scene_ref.ptr, &scene, &view_layer);
|
2019-02-07 20:27:11 +11:00
|
|
|
ED_undo_object_editmode_restore_helper(
|
2023-10-27 11:39:49 +11:00
|
|
|
scene, view_layer, &us->elems[0].obedit_ref.ptr, us->elems_len, sizeof(*us->elems));
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2021-02-25 15:43:30 +11:00
|
|
|
BLI_assert(BKE_object_is_in_editmode(us->elems[0].obedit_ref.ptr));
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-04-16 16:27:55 +02:00
|
|
|
for (uint i = 0; i < us->elems_len; i++) {
|
|
|
|
|
MeshUndoStep_Elem *elem = &us->elems[i];
|
|
|
|
|
Object *obedit = elem->obedit_ref.ptr;
|
2023-12-08 16:40:06 -05:00
|
|
|
Mesh *mesh = static_cast<Mesh *>(obedit->data);
|
2024-03-21 23:18:49 +01:00
|
|
|
if (mesh->runtime->edit_mesh == nullptr) {
|
2018-04-16 16:27:55 +02:00
|
|
|
/* Should never fail, may not crash but can give odd behavior. */
|
|
|
|
|
CLOG_ERROR(&LOG,
|
|
|
|
|
"name='%s', failed to enter edit-mode for object '%s', undo state invalid",
|
|
|
|
|
us_p->name,
|
|
|
|
|
obedit->id.name);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2024-04-18 13:52:20 +02:00
|
|
|
BMEditMesh *em = mesh->runtime->edit_mesh.get();
|
2022-02-22 09:57:07 +11:00
|
|
|
undomesh_to_editmesh(&elem->data, obedit, em);
|
2019-11-07 16:52:03 +11:00
|
|
|
em->needs_flush_to_id = 1;
|
2023-12-08 16:40:06 -05:00
|
|
|
DEG_id_tag_update(&mesh->id, ID_RECALC_GEOMETRY);
|
2018-04-16 16:27:55 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-04-16 16:27:55 +02:00
|
|
|
/* The first element is always active */
|
|
|
|
|
ED_undo_object_set_active_or_warn(
|
2023-10-27 11:39:49 +11:00
|
|
|
scene, view_layer, us->elems[0].obedit_ref.ptr, us_p->name, &LOG);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-10-27 11:39:49 +11:00
|
|
|
/* Check after setting active (unless undoing into another scene). */
|
|
|
|
|
BLI_assert(mesh_undosys_poll(C) || (scene != CTX_data_scene(C)));
|
2021-02-25 15:43:30 +11:00
|
|
|
|
2019-03-13 17:37:43 +11:00
|
|
|
scene->toolsettings->selectmode = us->elems[0].data.selectmode;
|
2022-03-03 17:32:07 +05:30
|
|
|
scene->toolsettings->uv_selectmode = us->elems[0].data.uv_selectmode;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-11-07 16:52:03 +11:00
|
|
|
bmain->is_memfile_undo_flush_needed = true;
|
|
|
|
|
|
2022-10-07 10:40:44 -05:00
|
|
|
WM_event_add_notifier(C, NC_GEOM | ND_DATA, nullptr);
|
2018-03-19 14:17:59 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mesh_undosys_step_free(UndoStep *us_p)
|
|
|
|
|
{
|
|
|
|
|
MeshUndoStep *us = (MeshUndoStep *)us_p;
|
2018-04-16 16:27:55 +02:00
|
|
|
|
|
|
|
|
for (uint i = 0; i < us->elems_len; i++) {
|
|
|
|
|
MeshUndoStep_Elem *elem = &us->elems[i];
|
|
|
|
|
undomesh_free_data(&elem->data);
|
|
|
|
|
}
|
|
|
|
|
MEM_freeN(us->elems);
|
2016-05-25 19:12:43 +10:00
|
|
|
}
|
2018-03-19 14:17:59 +01:00
|
|
|
|
|
|
|
|
static void mesh_undosys_foreach_ID_ref(UndoStep *us_p,
|
|
|
|
|
UndoTypeForEachIDRefFn foreach_ID_ref_fn,
|
|
|
|
|
void *user_data)
|
|
|
|
|
{
|
|
|
|
|
MeshUndoStep *us = (MeshUndoStep *)us_p;
|
2018-04-16 16:27:55 +02:00
|
|
|
|
2023-10-27 11:39:49 +11:00
|
|
|
foreach_ID_ref_fn(user_data, ((UndoRefID *)&us->scene_ref));
|
2018-04-16 16:27:55 +02:00
|
|
|
for (uint i = 0; i < us->elems_len; i++) {
|
|
|
|
|
MeshUndoStep_Elem *elem = &us->elems[i];
|
|
|
|
|
foreach_ID_ref_fn(user_data, ((UndoRefID *)&elem->obedit_ref));
|
|
|
|
|
}
|
2018-03-19 14:17:59 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void ED_mesh_undosys_type(UndoType *ut)
|
|
|
|
|
{
|
|
|
|
|
ut->name = "Edit Mesh";
|
|
|
|
|
ut->poll = mesh_undosys_poll;
|
|
|
|
|
ut->step_encode = mesh_undosys_step_encode;
|
|
|
|
|
ut->step_decode = mesh_undosys_step_decode;
|
|
|
|
|
ut->step_free = mesh_undosys_step_free;
|
|
|
|
|
|
|
|
|
|
ut->step_foreach_ID_ref = mesh_undosys_foreach_ID_ref;
|
2016-05-25 19:12:43 +10:00
|
|
|
|
2021-01-06 18:06:11 +01:00
|
|
|
ut->flags = UNDOTYPE_FLAG_NEED_CONTEXT_FOR_ENCODE;
|
2018-03-19 14:17:59 +01:00
|
|
|
|
|
|
|
|
ut->step_size = sizeof(MeshUndoStep);
|
2016-05-25 19:12:43 +10:00
|
|
|
}
|
2018-03-19 14:17:59 +01:00
|
|
|
|
|
|
|
|
/** \} */
|