Cleanup: BLI_array_store comments, use const variables

This commit is contained in:
Campbell Barton
2023-03-23 10:52:40 +11:00
parent 274c554f4c
commit 2de2db0f79
2 changed files with 106 additions and 108 deletions

View File

@@ -5,10 +5,11 @@
* \brief Array storage to minimize duplication.
*
* This is done by splitting arrays into chunks and using copy-on-write (COW),
* to de-duplicate chunks,
* from the users perspective this is an implementation detail.
* to de-duplicate chunks, from the users perspective this is an implementation detail.
*
* Overview
* ========
*
* Data Structure
* --------------
*
@@ -16,51 +17,52 @@
*
* \note The only 2 structures here which are referenced externally are the.
*
* - BArrayStore: The whole array store.
* - BArrayState: Represents a single state (array) of data.
* - #BArrayStore: The whole array store.
* - #BArrayState: Represents a single state (array) of data.
* These can be add using a reference state,
* while this could be considered the previous or parent state.
* no relationship is kept,
* so the caller is free to add any state from the same BArrayStore as a reference.
* so the caller is free to add any state from the same #BArrayStore as a reference.
*
* <pre>
* <+> BArrayStore: root data-structure,
* <+> #BArrayStore: root data-structure,
* | can store many 'states', which share memory.
* |
* | This can store many arrays, however they must share the same 'stride'.
* | Arrays of different types will need to use a new BArrayStore.
* | Arrays of different types will need to use a new #BArrayStore.
* |
* +- <+> states (Collection of BArrayState's):
* +- <+> states (Collection of #BArrayState's):
* | | Each represents an array added by the user of this API.
* | | and references a chunk_list (each state is a chunk_list user).
* | | Note that the list order has no significance.
* | |
* | +- <+> chunk_list (BChunkList):
* | +- <+> chunk_list (#BChunkList):
* | | The chunks that make up this state.
* | | Each state is a chunk_list user,
* | | avoids duplicating lists when there is no change between states.
* | |
* | +- chunk_refs (List of BChunkRef): Each chunk_ref links to a BChunk.
* | +- chunk_refs (List of #BChunkRef): Each chunk_ref links to a #BChunk.
* | Each reference is a chunk user,
* | avoids duplicating smaller chunks of memory found in multiple states.
* |
* +- info (BArrayInfo):
* +- info (#BArrayInfo):
* | Sizes and offsets for this array-store.
* | Also caches some variables for reuse.
* |
* +- <+> memory (BArrayMemory):
* | Memory pools for storing BArrayStore data.
* +- <+> memory (#BArrayMemory):
* | Memory pools for storing #BArrayStore data.
* |
* +- chunk_list (Pool of BChunkList):
* | All chunk_lists, (reference counted, used by BArrayState).
* +- chunk_list (Pool of #BChunkList):
* | All chunk_lists, (reference counted, used by #BArrayState).
* |
* +- chunk_ref (Pool of BChunkRef):
* | All chunk_refs (link between BChunkList & BChunk).
* +- chunk_ref (Pool of #BChunkRef):
* | All chunk_refs (link between #BChunkList & #BChunk).
* |
* +- chunks (Pool of BChunk):
* All chunks, (reference counted, used by BChunkList).
* +- chunks (Pool of #BChunk):
* All chunks, (reference counted, used by #BChunkList).
* These have their headers hashed for reuse so we can quickly check for duplicates.
* </pre>
*
* De-Duplication
* --------------
*
@@ -71,7 +73,7 @@
* For identical arrays this is all that's needed.
*
* De-duplication is performed on any remaining chunks, by hashing the first few bytes of the chunk
* (see: BCHUNK_HASH_TABLE_ACCUMULATE_STEPS).
* (see: #BCHUNK_HASH_TABLE_ACCUMULATE_STEPS).
*
* \note This is cached for reuse since the referenced data never changes.
*
@@ -93,9 +95,9 @@
#include "BLI_strict_flags.h"
#include "BLI_array_store.h" /* own include */
#include "BLI_array_store.h" /* Own include. */
/* only for BLI_array_store_is_valid */
/* Only for #BLI_array_store_is_valid. */
#include "BLI_ghash.h"
/* -------------------------------------------------------------------- */
@@ -169,7 +171,7 @@
#endif
/**
* Calculate the key once and reuse it
* Calculate the key once and reuse it.
*/
#define USE_HASH_TABLE_KEY_CACHE
#ifdef USE_HASH_TABLE_KEY_CACHE
@@ -219,7 +221,7 @@
# define BCHUNK_SIZE_MAX_MUL 2
#endif /* USE_MERGE_CHUNKS */
/** Slow (keep disabled), but handy for debugging */
/** Slow (keep disabled), but handy for debugging. */
// #define USE_VALIDATE_LIST_SIZE
// #define USE_VALIDATE_LIST_DATA_PARTIAL
@@ -238,9 +240,9 @@ typedef struct BArrayInfo {
size_t chunk_stride;
// uint chunk_count; /* UNUSED (other values are derived from this) */
/* pre-calculated */
/* Pre-calculated. */
size_t chunk_byte_size;
/* min/max limits (inclusive) */
/* Min/max limits (inclusive) */
size_t chunk_byte_size_min;
size_t chunk_byte_size_max;
/**
@@ -255,19 +257,19 @@ typedef struct BArrayInfo {
} BArrayInfo;
typedef struct BArrayMemory {
BLI_mempool *chunk_list; /* BChunkList */
BLI_mempool *chunk_ref; /* BChunkRef */
BLI_mempool *chunk; /* BChunk */
BLI_mempool *chunk_list; /* #BChunkList. */
BLI_mempool *chunk_ref; /* #BChunkRef. */
BLI_mempool *chunk; /* #BChunk. */
} BArrayMemory;
/**
* Main storage for all states
* Main storage for all states.
*/
struct BArrayStore {
/* static */
/* Static. */
BArrayInfo info;
/* memory storage */
/** Memory storage. */
BArrayMemory memory;
/**
@@ -287,14 +289,14 @@ struct BArrayStore {
* it makes it easier to trace invalid usage, so leave as-is for now.
*/
struct BArrayState {
/** linked list in #BArrayStore.states */
/** linked list in #BArrayStore.states. */
struct BArrayState *next, *prev;
/** Shared chunk list, this reference must hold a #BChunkList::users. */
struct BChunkList *chunk_list;
};
typedef struct BChunkList {
/** List of #BChunkRef's */
/** List of #BChunkRef's. */
ListBase chunk_refs;
/** Result of `BLI_listbase_count(chunks)`, store for reuse. */
uint chunk_refs_len;
@@ -466,15 +468,15 @@ static void bchunk_list_ensure_min_size_last(const BArrayInfo *info,
{
BChunkRef *cref = chunk_list->chunk_refs.last;
if (cref && cref->prev) {
/* both are decref'd after use (end of this block) */
/* Both are decref'd after use (end of this block) */
BChunk *chunk_curr = cref->link;
BChunk *chunk_prev = cref->prev->link;
if (MIN2(chunk_prev->data_len, chunk_curr->data_len) < info->chunk_byte_size_min) {
const size_t data_merge_len = chunk_prev->data_len + chunk_curr->data_len;
/* we could pass, but no need */
/* We could pass, but no need. */
if (data_merge_len <= info->chunk_byte_size_max) {
/* we have enough space to merge */
/* We have enough space to merge. */
/* Remove last from the linked-list. */
BLI_assert(chunk_list->chunk_refs.last != chunk_list->chunk_refs.first);
@@ -498,10 +500,10 @@ static void bchunk_list_ensure_min_size_last(const BArrayInfo *info,
*
* if we do, the code below works (test by setting 'BCHUNK_SIZE_MAX_MUL = 1.2') */
/* keep chunk on the left hand side a regular size */
/* Keep chunk on the left hand side a regular size. */
const size_t split = info->chunk_byte_size;
/* merge and split */
/* Merge and split. */
const size_t data_prev_len = split;
const size_t data_curr_len = data_merge_len - split;
uchar *data_prev = MEM_mallocN(data_prev_len, __func__);
@@ -510,10 +512,10 @@ static void bchunk_list_ensure_min_size_last(const BArrayInfo *info,
if (data_prev_len <= chunk_prev->data_len) {
const size_t data_curr_shrink_len = chunk_prev->data_len - data_prev_len;
/* setup 'data_prev' */
/* Setup 'data_prev'. */
memcpy(data_prev, chunk_prev->data, data_prev_len);
/* setup 'data_curr' */
/* Setup 'data_curr'. */
memcpy(data_curr, &chunk_prev->data[data_prev_len], data_curr_shrink_len);
memcpy(&data_curr[data_curr_shrink_len], chunk_curr->data, chunk_curr->data_len);
}
@@ -523,11 +525,11 @@ static void bchunk_list_ensure_min_size_last(const BArrayInfo *info,
const size_t data_prev_grow_len = data_prev_len - chunk_prev->data_len;
/* setup 'data_prev' */
/* Setup 'data_prev'. */
memcpy(data_prev, chunk_prev->data, chunk_prev->data_len);
memcpy(&data_prev[chunk_prev->data_len], chunk_curr->data, data_prev_grow_len);
/* setup 'data_curr' */
/* Setup 'data_curr'. */
memcpy(data_curr, &chunk_curr->data[data_prev_grow_len], data_curr_len);
}
@@ -538,7 +540,7 @@ static void bchunk_list_ensure_min_size_last(const BArrayInfo *info,
cref->link->users += 1;
}
/* free zero users */
/* Free zero users. */
bchunk_decref(bs_mem, chunk_curr);
bchunk_decref(bs_mem, chunk_prev);
}
@@ -563,8 +565,7 @@ static void bchunk_list_calc_trim_len(const BArrayInfo *info,
size_t data_trim_len = data_len;
#ifdef USE_MERGE_CHUNKS
/* avoid creating too-small chunks
* more efficient than merging after */
/* Avoid creating too-small chunks more efficient than merging after. */
if (data_len > info->chunk_byte_size) {
data_last_chunk_len = (data_trim_len % info->chunk_byte_size);
data_trim_len = data_trim_len - data_last_chunk_len;
@@ -626,7 +627,7 @@ static void bchunk_list_append_data(const BArrayInfo *info,
if (MIN2(chunk_prev->data_len, data_len) < info->chunk_byte_size_min) {
const size_t data_merge_len = chunk_prev->data_len + data_len;
/* realloc for single user */
/* Re-allocate for single user. */
if (cref->link->users == 1) {
uchar *data_merge = MEM_reallocN((void *)cref->link->data, data_merge_len);
memcpy(&data_merge[chunk_prev->data_len], data, data_len);
@@ -651,7 +652,7 @@ static void bchunk_list_append_data(const BArrayInfo *info,
BChunk *chunk = bchunk_new_copydata(bs_mem, data, data_len);
bchunk_list_append_only(bs_mem, chunk_list, chunk);
/* don't run this, instead preemptively avoid creating a chunk only to merge it (above). */
/* Don't run this, instead preemptively avoid creating a chunk only to merge it (above). */
#if 0
# ifdef USE_MERGE_CHUNKS
bchunk_list_ensure_min_size_last(info, bs_mem, chunk_list);
@@ -698,8 +699,7 @@ static void bchunk_list_append_data_n(const BArrayInfo *info,
}
}
else {
/* if we didn't write any chunks previously,
* we may need to merge with the last. */
/* If we didn't write any chunks previously, we may need to merge with the last. */
if (data_last_chunk_len) {
bchunk_list_append_data(info, bs_mem, chunk_list, data, data_last_chunk_len);
// i_prev = data_len; /* UNUSED */
@@ -760,7 +760,7 @@ static void bchunk_list_fill_from_array(const BArrayInfo *info,
}
#endif
/* works but better avoid redundant re-alloc */
/* Works but better avoid redundant re-allocation. */
#if 0
# ifdef USE_MERGE_CHUNKS
bchunk_list_ensure_min_size_last(info, bs_mem, chunk_list);
@@ -774,7 +774,7 @@ static void bchunk_list_fill_from_array(const BArrayInfo *info,
/** \} */
/*
* Internal Table Lookup Functions
* Internal Table Lookup Functions.
*/
/* -------------------------------------------------------------------- */
@@ -790,7 +790,7 @@ BLI_INLINE hash_key hash_data_single(const uchar p)
return ((HASH_INIT << 5) + HASH_INIT) + (hash_key)(*((signed char *)&p));
}
/* hash bytes, from BLI_ghashutil_strhash_n */
/* Hash bytes, from #BLI_ghashutil_strhash_n. */
static hash_key hash_data(const uchar *key, size_t n)
{
const signed char *p;
@@ -817,14 +817,14 @@ static void hash_array_from_data(const BArrayInfo *info,
}
}
else {
/* fast-path for bytes */
/* Fast-path for bytes. */
for (size_t i = 0; i < data_slice_len; i++) {
hash_array[i] = hash_data_single(data_slice[i]);
}
}
}
/*
/**
* Similar to hash_array_from_data,
* but able to step into the next chunk if we run-out of data.
*/
@@ -849,7 +849,7 @@ static void hash_array_from_cref(const BArrayInfo *info,
} while ((i < hash_array_len) && (cref != NULL));
/* If this isn't equal, the caller didn't properly check
* that there was enough data left in all chunks */
* that there was enough data left in all chunks. */
BLI_assert(i == hash_array_len);
}
@@ -886,11 +886,11 @@ static void hash_accum_single(hash_key *hash_array, const size_t hash_array_len,
{
BLI_assert(iter_steps <= hash_array_len);
if (UNLIKELY(!(iter_steps <= hash_array_len))) {
/* while this shouldn't happen, avoid crashing */
/* While this shouldn't happen, avoid crashing. */
iter_steps = hash_array_len;
}
/* We can increase this value each step to avoid accumulating quite as much
* while getting the same results as hash_accum */
* while getting the same results as hash_accum. */
size_t iter_steps_sub = iter_steps;
while (iter_steps != 0) {
@@ -906,11 +906,11 @@ static void hash_accum_single(hash_key *hash_array, const size_t hash_array_len,
static hash_key key_from_chunk_ref(const BArrayInfo *info,
const BChunkRef *cref,
/* avoid reallocating each time */
/* Avoid reallocating each time. */
hash_key *hash_store,
const size_t hash_store_len)
{
/* in C, will fill in a reusable array */
/* In C, will fill in a reusable array. */
BChunk *chunk = cref->link;
BLI_assert((info->accum_read_ahead_bytes * info->chunk_stride) != 0);
@@ -921,14 +921,14 @@ static hash_key key_from_chunk_ref(const BArrayInfo *info,
key = chunk->key;
if (key != HASH_TABLE_KEY_UNSET) {
/* Using key cache!
* avoids calculating every time */
* avoids calculating every time. */
}
else {
hash_array_from_cref(info, cref, info->accum_read_ahead_bytes, hash_store);
hash_accum_single(hash_store, hash_store_len, info->accum_steps);
key = hash_store[0];
/* cache the key */
/* Cache the key. */
if (UNLIKELY(key == HASH_TABLE_KEY_UNSET)) {
key = HASH_TABLE_KEY_FALLBACK;
}
@@ -941,7 +941,7 @@ static hash_key key_from_chunk_ref(const BArrayInfo *info,
# endif
return key;
}
/* corner case - we're too small, calculate the key each time. */
/* Corner case - we're too small, calculate the key each time. */
hash_array_from_cref(info, cref, info->accum_read_ahead_bytes, hash_store);
hash_accum_single(hash_store, hash_store_len, info->accum_steps);
@@ -964,8 +964,8 @@ static const BChunkRef *table_lookup(const BArrayInfo *info,
const size_t offset,
const hash_key *table_hash_array)
{
hash_key key = table_hash_array[((offset - i_table_start) / info->chunk_stride)];
uint key_index = (uint)(key % (hash_key)table_len);
const hash_key key = table_hash_array[((offset - i_table_start) / info->chunk_stride)];
const uint key_index = (uint)(key % (hash_key)table_len);
const BTableRef *tref = table[key_index];
if (tref != NULL) {
const size_t size_left = data_len - offset;
@@ -978,7 +978,7 @@ static const BChunkRef *table_lookup(const BArrayInfo *info,
BChunk *chunk_test = cref->link;
if (chunk_test->data_len <= size_left) {
if (bchunk_data_compare_unchecked(chunk_test, data, data_len, offset)) {
/* we could remove the chunk from the table, to avoid multiple hits */
/* We could remove the chunk from the table, to avoid multiple hits. */
return cref;
}
}
@@ -990,7 +990,7 @@ static const BChunkRef *table_lookup(const BArrayInfo *info,
#else /* USE_HASH_TABLE_ACCUMULATE */
/* NON USE_HASH_TABLE_ACCUMULATE code (simply hash each chunk) */
/* NON USE_HASH_TABLE_ACCUMULATE code (simply hash each chunk). */
static hash_key key_from_chunk_ref(const BArrayInfo *info, const BChunkRef *cref)
{
@@ -1002,10 +1002,10 @@ static hash_key key_from_chunk_ref(const BArrayInfo *info, const BChunkRef *cref
key = chunk->key;
if (key != HASH_TABLE_KEY_UNSET) {
/* Using key cache!
* avoids calculating every time */
* avoids calculating every time. */
}
else {
/* cache the key */
/* Cache the key. */
key = hash_data(chunk->data, data_hash_len);
if (key == HASH_TABLE_KEY_UNSET) {
key = HASH_TABLE_KEY_FALLBACK;
@@ -1030,9 +1030,9 @@ static const BChunkRef *table_lookup(const BArrayInfo *info,
{
const size_t data_hash_len = BCHUNK_HASH_LEN * info->chunk_stride; /* TODO: cache. */
size_t size_left = data_len - offset;
hash_key key = hash_data(&data[offset], MIN2(data_hash_len, size_left));
uint key_index = (uint)(key % (hash_key)table_len);
const size_t size_left = data_len - offset;
const hash_key key = hash_data(&data[offset], MIN2(data_hash_len, size_left));
const uint key_index = (uint)(key % (hash_key)table_len);
for (BTableRef *tref = table[key_index]; tref; tref = tref->next) {
const BChunkRef *cref = tref->cref;
# ifdef USE_HASH_TABLE_KEY_CACHE
@@ -1042,7 +1042,7 @@ static const BChunkRef *table_lookup(const BArrayInfo *info,
BChunk *chunk_test = cref->link;
if (chunk_test->data_len <= size_left) {
if (bchunk_data_compare_unchecked(chunk_test, data, data_len, offset)) {
/* we could remove the chunk from the table, to avoid multiple hits */
/* We could remove the chunk from the table, to avoid multiple hits. */
return cref;
}
}
@@ -1118,7 +1118,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
#endif /* USE_FASTPATH_CHUNKS_FIRST */
/* Copy until we have a mismatch */
/* Copy until we have a mismatch. */
BChunkList *chunk_list = bchunk_list_new(bs_mem, data_len_original);
if (cref_match_first != NULL) {
size_t chunk_size_step = 0;
@@ -1134,7 +1134,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
}
cref = cref->next;
}
/* happens when bytes are removed from the end of the array */
/* Happens when bytes are removed from the end of the array. */
if (chunk_size_step == data_len_original) {
return chunk_list;
}
@@ -1148,17 +1148,16 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
/* ------------------------------------------------------------------------
* Fast-Path for end chunks
*
* Check for trailing chunks
* Check for trailing chunks.
*/
/* In this case use 'chunk_list_reference_last' to define the last index
* index_match_last = -1 */
* `index_match_last = -1`. */
/* warning, from now on don't use len(data)
* since we want to ignore chunks already matched */
/* Warning, from now on don't use len(data) since we want to ignore chunks already matched. */
size_t data_len = data_len_original;
#define data_len_original invalid_usage
#ifdef data_len_original /* quiet warning */
#ifdef data_len_original /* Quiet warning. */
#endif
const BChunkRef *chunk_list_reference_last = NULL;
@@ -1198,7 +1197,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
#ifdef USE_ALIGN_CHUNKS_TEST
if (chunk_list->total_expanded_size == chunk_list_reference->total_expanded_size) {
/* if we're already a quarter aligned */
/* If we're already a quarter aligned. */
if (data_len - i_prev <= chunk_list->total_expanded_size / 4) {
use_aligned = true;
}
@@ -1212,7 +1211,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
* ----------------------- */
if (use_aligned) {
/* Copy matching chunks, creates using the same 'layout' as the reference */
/* Copy matching chunks, creates using the same 'layout' as the reference. */
const BChunkRef *cref = cref_match_first ? cref_match_first->next :
chunk_list_reference->chunk_refs.first;
while (i_prev != data_len) {
@@ -1241,12 +1240,12 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
(chunk_list_reference->chunk_refs.first != NULL)) {
/* --------------------------------------------------------------------
* Non-Aligned Chunk De-Duplication */
* Non-Aligned Chunk De-Duplication. */
/* only create a table if we have at least one chunk to search
/* Only create a table if we have at least one chunk to search
* otherwise just make a new one.
*
* Support re-arranged chunks */
* Support re-arranged chunks. */
#ifdef USE_HASH_TABLE_ACCUMULATE
size_t i_table_start = i_prev;
@@ -1257,7 +1256,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
hash_accum(table_hash_array, table_hash_array_len, info->accum_steps);
#else
/* dummy vars */
/* Dummy vars. */
uint i_table_start = 0;
hash_key *table_hash_array = NULL;
#endif
@@ -1272,8 +1271,8 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
const size_t table_len = chunk_list_reference_remaining_len * BCHUNK_HASH_TABLE_MUL;
BTableRef **table = MEM_callocN(table_len * sizeof(*table), __func__);
/* table_make - inline
* include one matching chunk, to allow for repeating values */
/* Table_make - inline
* include one matching chunk, to allow for repeating values. */
{
#ifdef USE_HASH_TABLE_ACCUMULATE
const size_t hash_store_len = info->accum_read_ahead_len;
@@ -1315,7 +1314,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
hash_store_len
#endif
);
uint key_index = (uint)(key % (hash_key)table_len);
const uint key_index = (uint)(key % (hash_key)table_len);
BTableRef *tref_prev = table[key_index];
BLI_assert(table_ref_stack_n < chunk_list_reference_remaining_len);
#ifdef USE_HASH_TABLE_DEDUPLICATE
@@ -1361,7 +1360,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
MEM_freeN(hash_store);
#endif
}
/* done making the table */
/* Done making the table. */
BLI_assert(i_prev <= data_len);
for (size_t i = i_prev; i < data_len;) {
@@ -1376,7 +1375,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
i_prev = i;
}
/* now add the reference chunk */
/* Now add the reference chunk. */
{
BChunk *chunk_found = cref_found->link;
i += chunk_found->data_len;
@@ -1387,7 +1386,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
ASSERT_CHUNKLIST_SIZE(chunk_list, i_prev);
ASSERT_CHUNKLIST_DATA(chunk_list, data);
/* its likely that the next chunk in the list will be a match, so check it! */
/* Its likely that the next chunk in the list will be a match, so check it! */
while (!ELEM(cref_found->next, NULL, chunk_list_reference_last)) {
cref_found = cref_found->next;
BChunk *chunk_found = cref_found->link;
@@ -1397,7 +1396,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
* repeating memory where it would be useful to re-use chunks. */
i += chunk_found->data_len;
bchunk_list_append(info, bs_mem, chunk_list, chunk_found);
/* chunk_found may be freed! */
/* Chunk_found may be freed! */
i_prev = i;
BLI_assert(i_prev <= data_len);
ASSERT_CHUNKLIST_SIZE(chunk_list, i_prev);
@@ -1440,14 +1439,13 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
#ifdef USE_FASTPATH_CHUNKS_LAST
if (chunk_list_reference_last != NULL) {
/* write chunk_list_reference_last since it hasn't been written yet */
/* Write chunk_list_reference_last since it hasn't been written yet. */
const BChunkRef *cref = chunk_list_reference_last;
while (cref != NULL) {
BChunk *chunk = cref->link;
// BLI_assert(bchunk_data_compare(chunk, data, data_len, i_prev));
i_prev += chunk->data_len;
/* use simple since we assume the references chunks
* have already been sized correctly. */
/* Use simple since we assume the references chunks have already been sized correctly. */
bchunk_list_append_only(bs_mem, chunk_list, chunk);
ASSERT_CHUNKLIST_DATA(chunk_list, data);
cref = cref->next;
@@ -1459,7 +1457,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
BLI_assert(i_prev == data_len_original);
/* check we're the correct size and that we didn't accidentally modify the reference */
/* Check we're the correct size and that we didn't accidentally modify the reference. */
ASSERT_CHUNKLIST_SIZE(chunk_list, data_len_original);
ASSERT_CHUNKLIST_SIZE(chunk_list_reference, chunk_list_reference->total_expanded_size);
@@ -1467,7 +1465,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
return chunk_list;
}
/* end private API */
/* End private API. */
/** \} */
@@ -1521,7 +1519,7 @@ BArrayStore *BLI_array_store_create(uint stride, uint chunk_count)
bs->memory.chunk_list = BLI_mempool_create(sizeof(BChunkList), 0, 512, BLI_MEMPOOL_NOP);
bs->memory.chunk_ref = BLI_mempool_create(sizeof(BChunkRef), 0, 512, BLI_MEMPOOL_NOP);
/* allow iteration to simplify freeing, otherwise its not needed
/* Allow iteration to simplify freeing, otherwise its not needed
* (we could loop over all states as an alternative). */
bs->memory.chunk = BLI_mempool_create(sizeof(BChunk), 0, 512, BLI_MEMPOOL_ALLOW_ITER);
@@ -1532,7 +1530,7 @@ BArrayStore *BLI_array_store_create(uint stride, uint chunk_count)
static void array_store_free_data(BArrayStore *bs)
{
/* free chunk data */
/* Free chunk data. */
{
BLI_mempool_iter iter;
BChunk *chunk;
@@ -1543,7 +1541,7 @@ static void array_store_free_data(BArrayStore *bs)
}
}
/* free states */
/* Free states. */
for (BArrayState *state = bs->states.first, *state_next; state; state = state_next) {
state_next = state->next;
MEM_freeN(state);
@@ -1611,7 +1609,7 @@ BArrayState *BLI_array_store_state_add(BArrayStore *bs,
const size_t data_len,
const BArrayState *state_reference)
{
/* ensure we're aligned to the stride */
/* Ensure we're aligned to the stride. */
BLI_assert((data_len % bs->info.chunk_stride) == 0);
#ifdef USE_PARANOID_CHECKS
@@ -1626,7 +1624,7 @@ BArrayState *BLI_array_store_state_add(BArrayStore *bs,
&bs->memory,
(const uchar *)data,
data_len,
/* re-use reference chunks */
/* Re-use reference chunks. */
state_reference->chunk_list);
}
else {
@@ -1703,7 +1701,7 @@ void *BLI_array_store_state_data_get_alloc(BArrayState *state, size_t *r_data_le
/** \name Debugging API (for testing).
* \{ */
/* only for test validation */
/* Only for test validation. */
static size_t bchunk_list_size(const BChunkList *chunk_list)
{
size_t total_expanded_size = 0;
@@ -1731,7 +1729,7 @@ bool BLI_array_store_is_valid(BArrayStore *bs)
}
#ifdef USE_MERGE_CHUNKS
/* ensure we merge all chunks that could be merged */
/* Ensure we merge all chunks that could be merged. */
if (chunk_list->total_expanded_size > bs->info.chunk_byte_size_min) {
LISTBASE_FOREACH (BChunkRef *, cref, &chunk_list->chunk_refs) {
if (cref->link->data_len < bs->info.chunk_byte_size_min) {
@@ -1770,7 +1768,7 @@ bool BLI_array_store_is_valid(BArrayStore *bs)
} \
((void)0)
/* count chunk_list's */
/* Count chunk_list's. */
GHash *chunk_list_map = BLI_ghash_ptr_new(__func__);
GHash *chunk_map = BLI_ghash_ptr_new(__func__);
@@ -1791,7 +1789,7 @@ bool BLI_array_store_is_valid(BArrayStore *bs)
goto user_finally;
}
/* count chunk's */
/* Count chunk's. */
GHASH_ITER (gh_iter, chunk_list_map) {
const struct BChunkList *chunk_list = BLI_ghashIterator_getKey(&gh_iter);
LISTBASE_FOREACH (const BChunkRef *, cref, &chunk_list->chunk_refs) {

View File

@@ -81,7 +81,7 @@ static size_t array_chunk_size_calc(const size_t stride)
{
/* Return a chunk size that targets a size in bytes,
* this is done so boolean arrays don't add so much overhead and
* larger arrays aren't unreasonably big, see: #105205. */
* larger arrays aren't so big as to waste memory, see: #105205. */
return std::max(ARRAY_CHUNK_NUM_MIN, ARRAY_CHUNK_SIZE_IN_BYTES / power_of_2_max_i(stride));
}