2023-08-16 00:20:26 +10:00
|
|
|
/* SPDX-FileCopyrightText: 2023 Blender Authors
|
2023-05-31 16:19:06 +02:00
|
|
|
*
|
|
|
|
|
* SPDX-License-Identifier: GPL-2.0-or-later */
|
2020-06-16 16:35:57 +02:00
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
|
|
/** \file
|
2022-03-19 08:26:29 +01:00
|
|
|
* \ingroup bli
|
2020-06-16 16:35:57 +02:00
|
|
|
*
|
2021-03-21 19:31:24 +01:00
|
|
|
* A`GVectorArray` is a container for a fixed amount of dynamically growing vectors with a generic
|
|
|
|
|
* data type. Its main use case is to store many small vectors with few separate allocations. Using
|
|
|
|
|
* this structure is generally more efficient than allocating each vector separately.
|
2020-06-16 16:35:57 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include "BLI_array.hh"
|
2022-03-19 08:26:29 +01:00
|
|
|
#include "BLI_generic_virtual_vector_array.hh"
|
2020-06-16 16:35:57 +02:00
|
|
|
#include "BLI_linear_allocator.hh"
|
|
|
|
|
|
2022-03-19 08:26:29 +01:00
|
|
|
namespace blender {
|
2020-06-16 16:35:57 +02:00
|
|
|
|
2021-03-21 19:31:24 +01:00
|
|
|
/* An array of vectors containing elements of a generic type. */
|
2020-06-16 16:35:57 +02:00
|
|
|
class GVectorArray : NonCopyable, NonMovable {
|
|
|
|
|
private:
|
2021-03-21 19:31:24 +01:00
|
|
|
struct Item {
|
|
|
|
|
void *start = nullptr;
|
|
|
|
|
int64_t length = 0;
|
|
|
|
|
int64_t capacity = 0;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/* Use a linear allocator to pack many small vectors together. Currently, memory from reallocated
|
|
|
|
|
* vectors is not reused. This can be improved in the future. */
|
2020-07-03 14:20:42 +02:00
|
|
|
LinearAllocator<> allocator_;
|
2021-03-21 19:31:24 +01:00
|
|
|
/* The data type of individual elements. */
|
|
|
|
|
const CPPType &type_;
|
|
|
|
|
/* The size of an individual element. This is inlined from `type_.size()` for easier access. */
|
|
|
|
|
const int64_t element_size_;
|
|
|
|
|
/* The individual vectors. */
|
|
|
|
|
Array<Item> items_;
|
2020-06-16 16:35:57 +02:00
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
GVectorArray() = delete;
|
|
|
|
|
|
2021-03-21 19:31:24 +01:00
|
|
|
GVectorArray(const CPPType &type, int64_t array_size);
|
2020-06-16 16:35:57 +02:00
|
|
|
|
2021-03-21 19:31:24 +01:00
|
|
|
~GVectorArray();
|
2020-06-16 16:35:57 +02:00
|
|
|
|
2021-03-21 19:31:24 +01:00
|
|
|
int64_t size() const
|
2020-06-16 16:35:57 +02:00
|
|
|
{
|
2021-03-21 19:31:24 +01:00
|
|
|
return items_.size();
|
2020-06-16 16:35:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool is_empty() const
|
|
|
|
|
{
|
2021-03-21 19:31:24 +01:00
|
|
|
return items_.is_empty();
|
2020-06-16 16:35:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const CPPType &type() const
|
|
|
|
|
{
|
2020-07-03 14:20:42 +02:00
|
|
|
return type_;
|
2020-06-16 16:35:57 +02:00
|
|
|
}
|
|
|
|
|
|
2021-03-21 19:31:24 +01:00
|
|
|
void append(int64_t index, const void *value);
|
2020-06-16 16:35:57 +02:00
|
|
|
|
2021-03-21 19:31:24 +01:00
|
|
|
/* Add multiple elements to a single vector. */
|
|
|
|
|
void extend(int64_t index, const GVArray &values);
|
|
|
|
|
void extend(int64_t index, GSpan values);
|
2020-06-22 15:48:08 +02:00
|
|
|
|
2021-03-21 19:31:24 +01:00
|
|
|
/* Add multiple elements to multiple vectors. */
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
void extend(const IndexMask &mask, const GVVectorArray &values);
|
|
|
|
|
void extend(const IndexMask &mask, const GVectorArray &values);
|
2020-06-22 15:48:08 +02:00
|
|
|
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
void clear(const IndexMask &mask);
|
2021-08-20 11:42:31 +02:00
|
|
|
|
2021-03-21 19:31:24 +01:00
|
|
|
GMutableSpan operator[](int64_t index);
|
|
|
|
|
GSpan operator[](int64_t index) const;
|
2020-06-16 16:35:57 +02:00
|
|
|
|
|
|
|
|
private:
|
2021-03-21 19:31:24 +01:00
|
|
|
void realloc_to_at_least(Item &item, int64_t min_capacity);
|
2020-06-16 16:35:57 +02:00
|
|
|
};
|
|
|
|
|
|
2021-03-21 19:31:24 +01:00
|
|
|
/* A non-owning typed mutable reference to an `GVectorArray`. It simplifies access when the type of
|
|
|
|
|
* the data is known at compile time. */
|
|
|
|
|
template<typename T> class GVectorArray_TypedMutableRef {
|
2020-06-16 16:35:57 +02:00
|
|
|
private:
|
2020-07-03 14:20:42 +02:00
|
|
|
GVectorArray *vector_array_;
|
2020-06-16 16:35:57 +02:00
|
|
|
|
|
|
|
|
public:
|
2021-03-21 19:31:24 +01:00
|
|
|
GVectorArray_TypedMutableRef(GVectorArray &vector_array) : vector_array_(&vector_array)
|
2020-06-16 16:35:57 +02:00
|
|
|
{
|
2021-03-21 19:31:24 +01:00
|
|
|
BLI_assert(vector_array_->type().is<T>());
|
2020-06-16 16:35:57 +02:00
|
|
|
}
|
|
|
|
|
|
2021-03-21 19:31:24 +01:00
|
|
|
int64_t size() const
|
|
|
|
|
{
|
|
|
|
|
return vector_array_->size();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool is_empty() const
|
|
|
|
|
{
|
|
|
|
|
return vector_array_->is_empty();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void append(const int64_t index, const T &value)
|
2020-06-16 16:35:57 +02:00
|
|
|
{
|
2020-07-03 14:20:42 +02:00
|
|
|
vector_array_->append(index, &value);
|
2020-06-16 16:35:57 +02:00
|
|
|
}
|
|
|
|
|
|
2021-03-21 19:31:24 +01:00
|
|
|
void extend(const int64_t index, const Span<T> values)
|
2020-06-22 15:48:08 +02:00
|
|
|
{
|
2020-07-03 14:20:42 +02:00
|
|
|
vector_array_->extend(index, values);
|
2020-06-22 15:48:08 +02:00
|
|
|
}
|
|
|
|
|
|
2021-03-21 19:31:24 +01:00
|
|
|
void extend(const int64_t index, const VArray<T> &values)
|
2020-06-22 15:48:08 +02:00
|
|
|
{
|
Geometry Nodes: refactor virtual array system
Goals of this refactor:
* Simplify creating virtual arrays.
* Simplify passing virtual arrays around.
* Simplify converting between typed and generic virtual arrays.
* Reduce memory allocations.
As a quick reminder, a virtual arrays is a data structure that behaves like an
array (i.e. it can be accessed using an index). However, it may not actually
be stored as array internally. The two most important implementations
of virtual arrays are those that correspond to an actual plain array and those
that have the same value for every index. However, many more
implementations exist for various reasons (interfacing with legacy attributes,
unified iterator over all points in multiple splines, ...).
With this refactor the core types (`VArray`, `GVArray`, `VMutableArray` and
`GVMutableArray`) can be used like "normal values". They typically live
on the stack. Before, they were usually inside a `std::unique_ptr`. This makes
passing them around much easier. Creation of new virtual arrays is also
much simpler now due to some constructors. Memory allocations are
reduced by making use of small object optimization inside the core types.
Previously, `VArray` was a class with virtual methods that had to be overridden
to change the behavior of a the virtual array. Now,`VArray` has a fixed size
and has no virtual methods. Instead it contains a `VArrayImpl` that is
similar to the old `VArray`. `VArrayImpl` should rarely ever be used directly,
unless a new virtual array implementation is added.
To support the small object optimization for many `VArrayImpl` classes,
a new `blender::Any` type is added. It is similar to `std::any` with two
additional features. It has an adjustable inline buffer size and alignment.
The inline buffer size of `std::any` can't be relied on and is usually too
small for our use case here. Furthermore, `blender::Any` can store
additional user-defined type information without increasing the
stack size.
Differential Revision: https://developer.blender.org/D12986
2021-11-16 10:15:51 +01:00
|
|
|
vector_array_->extend(index, values);
|
2020-06-22 15:48:08 +02:00
|
|
|
}
|
|
|
|
|
|
2021-03-21 19:31:24 +01:00
|
|
|
MutableSpan<T> operator[](const int64_t index)
|
2020-06-16 16:35:57 +02:00
|
|
|
{
|
2021-03-21 19:31:24 +01:00
|
|
|
return (*vector_array_)[index].typed<T>();
|
2020-06-16 16:35:57 +02:00
|
|
|
}
|
2021-03-21 19:31:24 +01:00
|
|
|
};
|
2020-06-16 16:35:57 +02:00
|
|
|
|
2021-03-21 19:31:24 +01:00
|
|
|
/* A generic virtual vector array implementation for a `GVectorArray`. */
|
2021-04-17 15:13:20 +02:00
|
|
|
class GVVectorArray_For_GVectorArray : public GVVectorArray {
|
2021-03-21 19:31:24 +01:00
|
|
|
private:
|
|
|
|
|
const GVectorArray &vector_array_;
|
|
|
|
|
|
|
|
|
|
public:
|
2021-04-17 15:13:20 +02:00
|
|
|
GVVectorArray_For_GVectorArray(const GVectorArray &vector_array)
|
2021-03-21 19:31:24 +01:00
|
|
|
: GVVectorArray(vector_array.type(), vector_array.size()), vector_array_(vector_array)
|
2020-06-16 16:35:57 +02:00
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-21 19:31:24 +01:00
|
|
|
protected:
|
|
|
|
|
int64_t get_vector_size_impl(const int64_t index) const override
|
2020-06-16 16:35:57 +02:00
|
|
|
{
|
2021-03-21 19:31:24 +01:00
|
|
|
return vector_array_[index].size();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void get_vector_element_impl(const int64_t index,
|
|
|
|
|
const int64_t index_in_vector,
|
|
|
|
|
void *r_value) const override
|
|
|
|
|
{
|
2021-06-28 13:13:52 +02:00
|
|
|
type_->copy_assign(vector_array_[index][index_in_vector], r_value);
|
2020-06-16 16:35:57 +02:00
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
2022-03-19 08:26:29 +01:00
|
|
|
} // namespace blender
|