When allocating new `CustomData` layers, often we do redundant initialization of arrays. For example, it's common that values are allocated, set to their default value, and then set to some other value. This is wasteful, and it negates the benefits of optimizations to the allocator like D15082. There are two reasons for this. The first is array-of-structs storage that makes it annoying to initialize values manually, and the second is confusing options in the Custom Data API. This patch addresses the latter. The `CustomData` "alloc type" options are rearranged. Now, besides the options that use existing layers, there are two remaining: * `CD_SET_DEFAULT` sets the default value. * Usually zeroes, but for colors this is white (how it was before). * Should be used when you add the layer but don't set all values. * `CD_CONSTRUCT` refers to the "default construct" C++ term. * Only necessary or defined for non-trivial types like vertex groups. * Doesn't do anything for trivial types like `int` or `float3`. * Should be used every other time, when all values will be set. The attribute API's `AttributeInit` types are updated as well. To update code, replace `CD_CALLOC` with `CD_SET_DEFAULT` and `CD_DEFAULT` with `CD_CONSTRUCT`. This doesn't cause any functional changes yet. Follow-up commits will change to avoid initializing new layers where the correctness is clear. Differential Revision: https://developer.blender.org/D15617
237 lines
5.5 KiB
C
237 lines
5.5 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later
|
|
* Copyright 2001-2002 NaN Holding BV. All rights reserved. */
|
|
|
|
/** \file
|
|
* \ingroup bli
|
|
* \brief Efficient memory allocation for many small chunks.
|
|
* \section aboutmemarena Memory Arena
|
|
*
|
|
* Memory arena's are commonly used when the program
|
|
* needs to quickly allocate lots of little bits of data,
|
|
* which are all freed at the same moment.
|
|
*
|
|
* \note Memory can't be freed during the arenas lifetime.
|
|
*/
|
|
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
|
|
#include "MEM_guardedalloc.h"
|
|
|
|
#include "BLI_asan.h"
|
|
#include "BLI_memarena.h"
|
|
#include "BLI_strict_flags.h"
|
|
#include "BLI_utildefines.h"
|
|
|
|
#ifdef WITH_MEM_VALGRIND
|
|
# include "valgrind/memcheck.h"
|
|
#else
|
|
# define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) UNUSED_VARS(pool, rzB, is_zeroed)
|
|
# define VALGRIND_DESTROY_MEMPOOL(pool) UNUSED_VARS(pool)
|
|
# define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) UNUSED_VARS(pool, addr, size)
|
|
# define VALGRIND_MOVE_MEMPOOL(pool_a, pool_b) UNUSED_VARS(pool_a, pool_b)
|
|
#endif
|
|
|
|
struct MemBuf {
|
|
struct MemBuf *next;
|
|
uchar data[0];
|
|
};
|
|
|
|
struct MemArena {
|
|
unsigned char *curbuf;
|
|
const char *name;
|
|
struct MemBuf *bufs;
|
|
|
|
size_t bufsize, cursize;
|
|
size_t align;
|
|
|
|
bool use_calloc;
|
|
};
|
|
|
|
static void memarena_buf_free_all(struct MemBuf *mb)
|
|
{
|
|
while (mb != NULL) {
|
|
struct MemBuf *mb_next = mb->next;
|
|
|
|
/* Unpoison memory because MEM_freeN might overwrite it. */
|
|
BLI_asan_unpoison(mb, (uint)MEM_allocN_len(mb));
|
|
|
|
MEM_freeN(mb);
|
|
mb = mb_next;
|
|
}
|
|
}
|
|
|
|
MemArena *BLI_memarena_new(const size_t bufsize, const char *name)
|
|
{
|
|
MemArena *ma = MEM_callocN(sizeof(*ma), "memarena");
|
|
ma->bufsize = bufsize;
|
|
ma->align = 8;
|
|
ma->name = name;
|
|
|
|
VALGRIND_CREATE_MEMPOOL(ma, 0, false);
|
|
|
|
return ma;
|
|
}
|
|
|
|
void BLI_memarena_use_calloc(MemArena *ma)
|
|
{
|
|
ma->use_calloc = 1;
|
|
}
|
|
|
|
void BLI_memarena_use_malloc(MemArena *ma)
|
|
{
|
|
ma->use_calloc = 0;
|
|
}
|
|
|
|
void BLI_memarena_use_align(struct MemArena *ma, const size_t align)
|
|
{
|
|
/* Align must be a power of two. */
|
|
BLI_assert((align & (align - 1)) == 0);
|
|
|
|
ma->align = align;
|
|
}
|
|
|
|
void BLI_memarena_free(MemArena *ma)
|
|
{
|
|
memarena_buf_free_all(ma->bufs);
|
|
|
|
VALGRIND_DESTROY_MEMPOOL(ma);
|
|
|
|
MEM_freeN(ma);
|
|
}
|
|
|
|
/** Pad num up by \a amt (must be power of two). */
|
|
#define PADUP(num, amt) (((num) + ((amt)-1)) & ~((amt)-1))
|
|
|
|
/** Align alloc'ed memory (needed if `align > 8`). */
|
|
static void memarena_curbuf_align(MemArena *ma)
|
|
{
|
|
unsigned char *tmp;
|
|
|
|
tmp = (unsigned char *)PADUP((intptr_t)ma->curbuf, (int)ma->align);
|
|
ma->cursize -= (size_t)(tmp - ma->curbuf);
|
|
ma->curbuf = tmp;
|
|
}
|
|
|
|
void *BLI_memarena_alloc(MemArena *ma, size_t size)
|
|
{
|
|
void *ptr;
|
|
|
|
/* Ensure proper alignment by rounding size up to multiple of 8. */
|
|
size = PADUP(size, ma->align);
|
|
|
|
if (UNLIKELY(size > ma->cursize)) {
|
|
if (size > ma->bufsize - (ma->align - 1)) {
|
|
ma->cursize = PADUP(size + 1, ma->align);
|
|
}
|
|
else {
|
|
ma->cursize = ma->bufsize;
|
|
}
|
|
|
|
struct MemBuf *mb = (ma->use_calloc ? MEM_callocN : MEM_mallocN)(sizeof(*mb) + ma->cursize,
|
|
ma->name);
|
|
ma->curbuf = mb->data;
|
|
mb->next = ma->bufs;
|
|
ma->bufs = mb;
|
|
|
|
BLI_asan_poison(ma->curbuf, ma->cursize);
|
|
|
|
memarena_curbuf_align(ma);
|
|
}
|
|
|
|
ptr = ma->curbuf;
|
|
ma->curbuf += size;
|
|
ma->cursize -= size;
|
|
|
|
VALGRIND_MEMPOOL_ALLOC(ma, ptr, size);
|
|
|
|
BLI_asan_unpoison(ptr, size);
|
|
|
|
return ptr;
|
|
}
|
|
|
|
void *BLI_memarena_calloc(MemArena *ma, size_t size)
|
|
{
|
|
void *ptr;
|
|
|
|
/* No need to use this function call if we're calloc'ing by default. */
|
|
BLI_assert(ma->use_calloc == false);
|
|
|
|
ptr = BLI_memarena_alloc(ma, size);
|
|
BLI_assert(ptr != NULL);
|
|
memset(ptr, 0, size);
|
|
|
|
return ptr;
|
|
}
|
|
|
|
void BLI_memarena_merge(MemArena *ma_dst, MemArena *ma_src)
|
|
{
|
|
/* Memory arenas must be compatible. */
|
|
BLI_assert(ma_dst != ma_src);
|
|
BLI_assert(ma_dst->align == ma_src->align);
|
|
BLI_assert(ma_dst->use_calloc == ma_src->use_calloc);
|
|
BLI_assert(ma_dst->bufsize == ma_src->bufsize);
|
|
|
|
if (ma_src->bufs == NULL) {
|
|
return;
|
|
}
|
|
|
|
if (UNLIKELY(ma_dst->bufs == NULL)) {
|
|
BLI_assert(ma_dst->curbuf == NULL);
|
|
ma_dst->bufs = ma_src->bufs;
|
|
ma_dst->curbuf = ma_src->curbuf;
|
|
ma_dst->cursize = ma_src->cursize;
|
|
}
|
|
else {
|
|
/* Keep the 'ma_dst->curbuf' for simplicity.
|
|
* Insert buffers after the first. */
|
|
if (ma_dst->bufs->next != NULL) {
|
|
/* Loop over `ma_src` instead of `ma_dst` since it's likely the destination is larger
|
|
* when used for accumulating from multiple sources. */
|
|
struct MemBuf *mb_src = ma_src->bufs;
|
|
mb_src = ma_src->bufs;
|
|
while (mb_src && mb_src->next) {
|
|
mb_src = mb_src->next;
|
|
}
|
|
mb_src->next = ma_dst->bufs->next;
|
|
}
|
|
ma_dst->bufs->next = ma_src->bufs;
|
|
}
|
|
|
|
ma_src->bufs = NULL;
|
|
ma_src->curbuf = NULL;
|
|
ma_src->cursize = 0;
|
|
|
|
VALGRIND_MOVE_MEMPOOL(ma_src, ma_dst);
|
|
VALGRIND_CREATE_MEMPOOL(ma_src, 0, false);
|
|
}
|
|
|
|
void BLI_memarena_clear(MemArena *ma)
|
|
{
|
|
if (ma->bufs) {
|
|
unsigned char *curbuf_prev;
|
|
size_t curbuf_used;
|
|
|
|
if (ma->bufs->next) {
|
|
memarena_buf_free_all(ma->bufs->next);
|
|
ma->bufs->next = NULL;
|
|
}
|
|
|
|
curbuf_prev = ma->curbuf;
|
|
ma->curbuf = ma->bufs->data;
|
|
memarena_curbuf_align(ma);
|
|
|
|
/* restore to original size */
|
|
curbuf_used = (size_t)(curbuf_prev - ma->curbuf);
|
|
ma->cursize += curbuf_used;
|
|
|
|
if (ma->use_calloc) {
|
|
memset(ma->curbuf, 0, curbuf_used);
|
|
}
|
|
BLI_asan_poison(ma->curbuf, ma->cursize);
|
|
}
|
|
|
|
VALGRIND_DESTROY_MEMPOOL(ma);
|
|
VALGRIND_CREATE_MEMPOOL(ma, 0, false);
|
|
}
|