2023-08-16 00:20:26 +10:00
|
|
|
/* SPDX-FileCopyrightText: 2023 Blender Authors
|
2023-05-31 16:19:06 +02:00
|
|
|
*
|
|
|
|
|
* SPDX-License-Identifier: GPL-2.0-or-later */
|
2021-09-09 12:54:20 +02:00
|
|
|
|
|
|
|
|
#include "FN_multi_function_procedure_executor.hh"
|
|
|
|
|
|
|
|
|
|
#include "BLI_stack.hh"
|
|
|
|
|
|
2023-01-07 17:32:28 +01:00
|
|
|
namespace blender::fn::multi_function {
|
2021-09-09 12:54:20 +02:00
|
|
|
|
2023-01-07 17:32:28 +01:00
|
|
|
ProcedureExecutor::ProcedureExecutor(const Procedure &procedure) : procedure_(procedure)
|
2021-09-09 12:54:20 +02:00
|
|
|
{
|
2023-01-07 17:32:28 +01:00
|
|
|
SignatureBuilder builder("Procedure Executor", signature_);
|
2021-09-09 12:54:20 +02:00
|
|
|
|
2023-01-14 15:42:52 +01:00
|
|
|
for (const ConstParameter ¶m : procedure.params()) {
|
2023-01-07 17:32:28 +01:00
|
|
|
builder.add("Parameter", ParamType(param.type, param.variable->data_type()));
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
this->set_signature(&signature_);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
using IndicesSplitVectors = std::array<Vector<int64_t>, 2>;
|
|
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
enum class ValueType {
|
|
|
|
|
GVArray = 0,
|
|
|
|
|
Span = 1,
|
|
|
|
|
GVVectorArray = 2,
|
|
|
|
|
GVectorArray = 3,
|
|
|
|
|
OneSingle = 4,
|
|
|
|
|
OneVector = 5,
|
|
|
|
|
};
|
|
|
|
|
constexpr int tot_variable_value_types = 6;
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* During evaluation, a variable may be stored in various different forms, depending on what
|
|
|
|
|
* instructions do with the variables.
|
|
|
|
|
*/
|
|
|
|
|
struct VariableValue {
|
|
|
|
|
ValueType type;
|
|
|
|
|
|
2023-03-29 16:50:54 +02:00
|
|
|
VariableValue(ValueType type) : type(type) {}
|
2021-09-09 12:54:20 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/* This variable is the unmodified virtual array from the caller. */
|
|
|
|
|
struct VariableValue_GVArray : public VariableValue {
|
2025-01-26 20:08:04 +01:00
|
|
|
static constexpr ValueType static_type = ValueType::GVArray;
|
2021-09-09 12:54:20 +02:00
|
|
|
const GVArray &data;
|
|
|
|
|
|
|
|
|
|
VariableValue_GVArray(const GVArray &data) : VariableValue(static_type), data(data)
|
|
|
|
|
{
|
Geometry Nodes: refactor virtual array system
Goals of this refactor:
* Simplify creating virtual arrays.
* Simplify passing virtual arrays around.
* Simplify converting between typed and generic virtual arrays.
* Reduce memory allocations.
As a quick reminder, a virtual arrays is a data structure that behaves like an
array (i.e. it can be accessed using an index). However, it may not actually
be stored as array internally. The two most important implementations
of virtual arrays are those that correspond to an actual plain array and those
that have the same value for every index. However, many more
implementations exist for various reasons (interfacing with legacy attributes,
unified iterator over all points in multiple splines, ...).
With this refactor the core types (`VArray`, `GVArray`, `VMutableArray` and
`GVMutableArray`) can be used like "normal values". They typically live
on the stack. Before, they were usually inside a `std::unique_ptr`. This makes
passing them around much easier. Creation of new virtual arrays is also
much simpler now due to some constructors. Memory allocations are
reduced by making use of small object optimization inside the core types.
Previously, `VArray` was a class with virtual methods that had to be overridden
to change the behavior of a the virtual array. Now,`VArray` has a fixed size
and has no virtual methods. Instead it contains a `VArrayImpl` that is
similar to the old `VArray`. `VArrayImpl` should rarely ever be used directly,
unless a new virtual array implementation is added.
To support the small object optimization for many `VArrayImpl` classes,
a new `blender::Any` type is added. It is similar to `std::any` with two
additional features. It has an adjustable inline buffer size and alignment.
The inline buffer size of `std::any` can't be relied on and is usually too
small for our use case here. Furthermore, `blender::Any` can store
additional user-defined type information without increasing the
stack size.
Differential Revision: https://developer.blender.org/D12986
2021-11-16 10:15:51 +01:00
|
|
|
BLI_assert(data);
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/* This variable has a different value for every index. Some values may be uninitialized. The span
|
|
|
|
|
* may be owned by the caller. */
|
|
|
|
|
struct VariableValue_Span : public VariableValue {
|
2025-01-26 20:08:04 +01:00
|
|
|
static constexpr ValueType static_type = ValueType::Span;
|
2021-09-09 12:54:20 +02:00
|
|
|
void *data;
|
|
|
|
|
bool owned;
|
|
|
|
|
|
|
|
|
|
VariableValue_Span(void *data, bool owned) : VariableValue(static_type), data(data), owned(owned)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/* This variable is the unmodified virtual vector array from the caller. */
|
|
|
|
|
struct VariableValue_GVVectorArray : public VariableValue {
|
2025-01-26 20:08:04 +01:00
|
|
|
static constexpr ValueType static_type = ValueType::GVVectorArray;
|
2021-09-09 12:54:20 +02:00
|
|
|
const GVVectorArray &data;
|
|
|
|
|
|
|
|
|
|
VariableValue_GVVectorArray(const GVVectorArray &data) : VariableValue(static_type), data(data)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/* This variable has a different vector for every index. */
|
|
|
|
|
struct VariableValue_GVectorArray : public VariableValue {
|
2025-01-26 20:08:04 +01:00
|
|
|
static constexpr ValueType static_type = ValueType::GVectorArray;
|
2021-09-09 12:54:20 +02:00
|
|
|
GVectorArray &data;
|
|
|
|
|
bool owned;
|
|
|
|
|
|
|
|
|
|
VariableValue_GVectorArray(GVectorArray &data, bool owned)
|
|
|
|
|
: VariableValue(static_type), data(data), owned(owned)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/* This variable has the same value for every index. */
|
|
|
|
|
struct VariableValue_OneSingle : public VariableValue {
|
2025-01-26 20:08:04 +01:00
|
|
|
static constexpr ValueType static_type = ValueType::OneSingle;
|
2021-09-09 12:54:20 +02:00
|
|
|
void *data;
|
|
|
|
|
bool is_initialized = false;
|
|
|
|
|
|
2023-03-29 16:50:54 +02:00
|
|
|
VariableValue_OneSingle(void *data) : VariableValue(static_type), data(data) {}
|
2021-09-09 12:54:20 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/* This variable has the same vector for every index. */
|
|
|
|
|
struct VariableValue_OneVector : public VariableValue {
|
2025-01-26 20:08:04 +01:00
|
|
|
static constexpr ValueType static_type = ValueType::OneVector;
|
2021-09-09 12:54:20 +02:00
|
|
|
GVectorArray &data;
|
|
|
|
|
|
2023-03-29 16:50:54 +02:00
|
|
|
VariableValue_OneVector(GVectorArray &data) : VariableValue(static_type), data(data) {}
|
2021-09-09 12:54:20 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static_assert(std::is_trivially_destructible_v<VariableValue_GVArray>);
|
|
|
|
|
static_assert(std::is_trivially_destructible_v<VariableValue_Span>);
|
|
|
|
|
static_assert(std::is_trivially_destructible_v<VariableValue_GVVectorArray>);
|
|
|
|
|
static_assert(std::is_trivially_destructible_v<VariableValue_GVectorArray>);
|
|
|
|
|
static_assert(std::is_trivially_destructible_v<VariableValue_OneSingle>);
|
|
|
|
|
static_assert(std::is_trivially_destructible_v<VariableValue_OneVector>);
|
|
|
|
|
|
|
|
|
|
class VariableState;
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* The #ValueAllocator is responsible for providing memory for variables and their values. It also
|
|
|
|
|
* manages the reuse of buffers to improve performance.
|
|
|
|
|
*/
|
|
|
|
|
class ValueAllocator : NonCopyable, NonMovable {
|
|
|
|
|
private:
|
2021-12-12 10:33:05 +01:00
|
|
|
/**
|
|
|
|
|
* Allocate with 64 byte alignment for better reusability of buffers and improved cache
|
|
|
|
|
* performance.
|
|
|
|
|
*/
|
2025-01-26 20:08:04 +01:00
|
|
|
static constexpr int min_alignment = 64;
|
2021-09-09 12:54:20 +02:00
|
|
|
|
2021-12-12 10:33:05 +01:00
|
|
|
/** All buffers in the free-lists below have been allocated with this allocator. */
|
|
|
|
|
LinearAllocator<> &linear_allocator_;
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Use stacks so that the most recently used buffers are reused first. This improves cache
|
|
|
|
|
* efficiency.
|
|
|
|
|
*/
|
|
|
|
|
std::array<Stack<VariableValue *>, tot_variable_value_types> variable_value_free_lists_;
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* The integer key is the size of one element (e.g. 4 for an integer buffer). All buffers are
|
|
|
|
|
* aligned to #min_alignment bytes.
|
|
|
|
|
*/
|
2022-06-25 19:41:44 +02:00
|
|
|
Stack<void *> small_span_buffers_free_list_;
|
|
|
|
|
Map<int, Stack<void *>> span_buffers_free_lists_;
|
2021-09-09 12:54:20 +02:00
|
|
|
|
2021-12-12 10:33:05 +01:00
|
|
|
/** Cache buffers for single values of different types. */
|
2025-01-26 20:08:04 +01:00
|
|
|
static constexpr int small_value_max_size = 16;
|
|
|
|
|
static constexpr int small_value_max_alignment = 8;
|
2022-06-25 18:52:55 +02:00
|
|
|
Stack<void *> small_single_value_free_list_;
|
2021-12-12 10:33:05 +01:00
|
|
|
Map<const CPPType *, Stack<void *>> single_value_free_lists_;
|
2021-09-09 12:54:20 +02:00
|
|
|
|
2021-12-12 10:33:05 +01:00
|
|
|
public:
|
2023-03-29 16:50:54 +02:00
|
|
|
ValueAllocator(LinearAllocator<> &linear_allocator) : linear_allocator_(linear_allocator) {}
|
2021-09-09 12:54:20 +02:00
|
|
|
|
|
|
|
|
VariableValue_GVArray *obtain_GVArray(const GVArray &varray)
|
|
|
|
|
{
|
|
|
|
|
return this->obtain<VariableValue_GVArray>(varray);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VariableValue_GVVectorArray *obtain_GVVectorArray(const GVVectorArray &varray)
|
|
|
|
|
{
|
|
|
|
|
return this->obtain<VariableValue_GVVectorArray>(varray);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VariableValue_Span *obtain_Span_not_owned(void *buffer)
|
|
|
|
|
{
|
|
|
|
|
return this->obtain<VariableValue_Span>(buffer, false);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VariableValue_Span *obtain_Span(const CPPType &type, int size)
|
|
|
|
|
{
|
|
|
|
|
void *buffer = nullptr;
|
|
|
|
|
|
2025-04-14 17:48:17 +02:00
|
|
|
const int64_t element_size = type.size;
|
|
|
|
|
const int64_t alignment = type.alignment;
|
2021-09-09 12:54:20 +02:00
|
|
|
|
|
|
|
|
if (alignment > min_alignment) {
|
2025-06-02 17:13:56 -04:00
|
|
|
/* In this rare case we fall back to not reusing existing buffers. */
|
2021-12-12 10:33:05 +01:00
|
|
|
buffer = linear_allocator_.allocate(element_size * size, alignment);
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
|
|
|
|
else {
|
2022-06-25 19:41:44 +02:00
|
|
|
Stack<void *> *stack = type.can_exist_in_buffer(small_value_max_size,
|
|
|
|
|
small_value_max_alignment) ?
|
|
|
|
|
&small_span_buffers_free_list_ :
|
|
|
|
|
span_buffers_free_lists_.lookup_ptr(element_size);
|
2021-09-09 12:54:20 +02:00
|
|
|
if (stack == nullptr || stack->is_empty()) {
|
2022-06-25 19:41:44 +02:00
|
|
|
buffer = linear_allocator_.allocate(
|
|
|
|
|
std::max<int64_t>(element_size, small_value_max_size) * size, min_alignment);
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
/* Reuse existing buffer. */
|
|
|
|
|
buffer = stack->pop();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return this->obtain<VariableValue_Span>(buffer, true);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VariableValue_GVectorArray *obtain_GVectorArray_not_owned(GVectorArray &data)
|
|
|
|
|
{
|
|
|
|
|
return this->obtain<VariableValue_GVectorArray>(data, false);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VariableValue_GVectorArray *obtain_GVectorArray(const CPPType &type, int size)
|
|
|
|
|
{
|
|
|
|
|
GVectorArray *vector_array = new GVectorArray(type, size);
|
|
|
|
|
return this->obtain<VariableValue_GVectorArray>(*vector_array, true);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VariableValue_OneSingle *obtain_OneSingle(const CPPType &type)
|
|
|
|
|
{
|
2022-06-25 18:52:55 +02:00
|
|
|
const bool is_small = type.can_exist_in_buffer(small_value_max_size,
|
|
|
|
|
small_value_max_alignment);
|
|
|
|
|
Stack<void *> &stack = is_small ? small_single_value_free_list_ :
|
|
|
|
|
single_value_free_lists_.lookup_or_add_default(&type);
|
2021-12-12 10:33:05 +01:00
|
|
|
void *buffer;
|
|
|
|
|
if (stack.is_empty()) {
|
2022-06-25 18:52:55 +02:00
|
|
|
buffer = linear_allocator_.allocate(
|
2025-04-14 17:48:17 +02:00
|
|
|
std::max<int>(small_value_max_size, type.size),
|
|
|
|
|
std::max<int>(small_value_max_alignment, type.alignment));
|
2021-12-12 10:33:05 +01:00
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
buffer = stack.pop();
|
|
|
|
|
}
|
2021-09-09 12:54:20 +02:00
|
|
|
return this->obtain<VariableValue_OneSingle>(buffer);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VariableValue_OneVector *obtain_OneVector(const CPPType &type)
|
|
|
|
|
{
|
|
|
|
|
GVectorArray *vector_array = new GVectorArray(type, 1);
|
|
|
|
|
return this->obtain<VariableValue_OneVector>(*vector_array);
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-07 17:32:28 +01:00
|
|
|
void release_value(VariableValue *value, const DataType &data_type)
|
2021-09-09 12:54:20 +02:00
|
|
|
{
|
|
|
|
|
switch (value->type) {
|
|
|
|
|
case ValueType::GVArray: {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::Span: {
|
|
|
|
|
auto *value_typed = static_cast<VariableValue_Span *>(value);
|
|
|
|
|
if (value_typed->owned) {
|
|
|
|
|
const CPPType &type = data_type.single_type();
|
|
|
|
|
/* Assumes all values in the buffer are uninitialized already. */
|
2022-06-25 19:41:44 +02:00
|
|
|
Stack<void *> &buffers = type.can_exist_in_buffer(small_value_max_size,
|
|
|
|
|
small_value_max_alignment) ?
|
|
|
|
|
small_span_buffers_free_list_ :
|
2025-04-14 17:48:17 +02:00
|
|
|
span_buffers_free_lists_.lookup_or_add_default(type.size);
|
2021-09-09 12:54:20 +02:00
|
|
|
buffers.push(value_typed->data);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::GVVectorArray: {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::GVectorArray: {
|
|
|
|
|
auto *value_typed = static_cast<VariableValue_GVectorArray *>(value);
|
|
|
|
|
if (value_typed->owned) {
|
|
|
|
|
delete &value_typed->data;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::OneSingle: {
|
|
|
|
|
auto *value_typed = static_cast<VariableValue_OneSingle *>(value);
|
2021-12-12 10:33:05 +01:00
|
|
|
const CPPType &type = data_type.single_type();
|
2021-09-09 12:54:20 +02:00
|
|
|
if (value_typed->is_initialized) {
|
|
|
|
|
type.destruct(value_typed->data);
|
|
|
|
|
}
|
2022-06-25 18:52:55 +02:00
|
|
|
const bool is_small = type.can_exist_in_buffer(small_value_max_size,
|
|
|
|
|
small_value_max_alignment);
|
|
|
|
|
if (is_small) {
|
|
|
|
|
small_single_value_free_list_.push(value_typed->data);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
single_value_free_lists_.lookup_or_add_default(&type).push(value_typed->data);
|
|
|
|
|
}
|
2021-09-09 12:54:20 +02:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::OneVector: {
|
|
|
|
|
auto *value_typed = static_cast<VariableValue_OneVector *>(value);
|
|
|
|
|
delete &value_typed->data;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-09-25 18:33:28 +10:00
|
|
|
Stack<VariableValue *> &stack = variable_value_free_lists_[int(value->type)];
|
2021-09-09 12:54:20 +02:00
|
|
|
stack.push(value);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
private:
|
|
|
|
|
template<typename T, typename... Args> T *obtain(Args &&...args)
|
|
|
|
|
{
|
|
|
|
|
static_assert(std::is_base_of_v<VariableValue, T>);
|
2022-09-25 18:33:28 +10:00
|
|
|
Stack<VariableValue *> &stack = variable_value_free_lists_[int(T::static_type)];
|
2021-09-09 12:54:20 +02:00
|
|
|
if (stack.is_empty()) {
|
2021-12-12 10:33:05 +01:00
|
|
|
void *buffer = linear_allocator_.allocate(sizeof(T), alignof(T));
|
2021-09-09 12:54:20 +02:00
|
|
|
return new (buffer) T(std::forward<Args>(args)...);
|
|
|
|
|
}
|
|
|
|
|
return new (stack.pop()) T(std::forward<Args>(args)...);
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* This class keeps track of a single variable during evaluation.
|
|
|
|
|
*/
|
|
|
|
|
class VariableState : NonCopyable, NonMovable {
|
2022-06-19 14:25:21 +02:00
|
|
|
public:
|
2021-09-09 12:54:20 +02:00
|
|
|
/** The current value of the variable. The storage format may change over time. */
|
2022-06-19 14:25:21 +02:00
|
|
|
VariableValue *value_ = nullptr;
|
2021-09-09 12:54:20 +02:00
|
|
|
/** Number of indices that are currently initialized in this variable. */
|
2022-06-19 14:25:21 +02:00
|
|
|
int tot_initialized_ = 0;
|
2021-09-09 12:54:20 +02:00
|
|
|
/* This a non-owning pointer to either span buffer or #GVectorArray or null. */
|
|
|
|
|
void *caller_provided_storage_ = nullptr;
|
|
|
|
|
|
2023-01-07 17:32:28 +01:00
|
|
|
void destruct_value(ValueAllocator &value_allocator, const DataType &data_type)
|
2021-09-09 12:54:20 +02:00
|
|
|
{
|
|
|
|
|
value_allocator.release_value(value_, data_type);
|
2022-06-19 14:25:21 +02:00
|
|
|
value_ = nullptr;
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* True if this contains only one value for all indices, i.e. the value for all indices is
|
|
|
|
|
* the same. */
|
|
|
|
|
bool is_one() const
|
|
|
|
|
{
|
2022-06-19 14:25:21 +02:00
|
|
|
if (value_ == nullptr) {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
2021-09-09 12:54:20 +02:00
|
|
|
switch (value_->type) {
|
|
|
|
|
case ValueType::GVArray:
|
|
|
|
|
return this->value_as<VariableValue_GVArray>()->data.is_single();
|
|
|
|
|
case ValueType::Span:
|
|
|
|
|
return tot_initialized_ == 0;
|
|
|
|
|
case ValueType::GVVectorArray:
|
|
|
|
|
return this->value_as<VariableValue_GVVectorArray>()->data.is_single_vector();
|
|
|
|
|
case ValueType::GVectorArray:
|
|
|
|
|
return tot_initialized_ == 0;
|
|
|
|
|
case ValueType::OneSingle:
|
|
|
|
|
return true;
|
|
|
|
|
case ValueType::OneVector:
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
BLI_assert_unreachable();
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
bool is_fully_initialized(const IndexMask &full_mask)
|
2021-09-09 12:54:20 +02:00
|
|
|
{
|
|
|
|
|
return tot_initialized_ == full_mask.size();
|
|
|
|
|
}
|
|
|
|
|
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
bool is_fully_uninitialized(const IndexMask &full_mask)
|
2021-09-09 12:54:20 +02:00
|
|
|
{
|
|
|
|
|
UNUSED_VARS(full_mask);
|
|
|
|
|
return tot_initialized_ == 0;
|
|
|
|
|
}
|
|
|
|
|
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
void add_as_input(ParamsBuilder ¶ms, const IndexMask &mask, const DataType &data_type) const
|
2021-09-09 12:54:20 +02:00
|
|
|
{
|
|
|
|
|
/* Sanity check to make sure that enough values are initialized. */
|
|
|
|
|
BLI_assert(mask.size() <= tot_initialized_);
|
2022-06-19 14:25:21 +02:00
|
|
|
BLI_assert(value_ != nullptr);
|
2021-09-09 12:54:20 +02:00
|
|
|
|
|
|
|
|
switch (value_->type) {
|
|
|
|
|
case ValueType::GVArray: {
|
|
|
|
|
params.add_readonly_single_input(this->value_as<VariableValue_GVArray>()->data);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::Span: {
|
|
|
|
|
const void *data = this->value_as<VariableValue_Span>()->data;
|
|
|
|
|
const GSpan span{data_type.single_type(), data, mask.min_array_size()};
|
|
|
|
|
params.add_readonly_single_input(span);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::GVVectorArray: {
|
|
|
|
|
params.add_readonly_vector_input(this->value_as<VariableValue_GVVectorArray>()->data);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::GVectorArray: {
|
|
|
|
|
params.add_readonly_vector_input(this->value_as<VariableValue_GVectorArray>()->data);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::OneSingle: {
|
|
|
|
|
const auto *value_typed = this->value_as<VariableValue_OneSingle>();
|
|
|
|
|
BLI_assert(value_typed->is_initialized);
|
|
|
|
|
const GPointer gpointer{data_type.single_type(), value_typed->data};
|
|
|
|
|
params.add_readonly_single_input(gpointer);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::OneVector: {
|
|
|
|
|
params.add_readonly_vector_input(this->value_as<VariableValue_OneVector>()->data[0]);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
void ensure_is_mutable(const IndexMask &full_mask,
|
2023-01-07 17:32:28 +01:00
|
|
|
const DataType &data_type,
|
2021-09-09 12:54:20 +02:00
|
|
|
ValueAllocator &value_allocator)
|
|
|
|
|
{
|
2022-06-19 14:25:21 +02:00
|
|
|
if (value_ != nullptr && ELEM(value_->type, ValueType::Span, ValueType::GVectorArray)) {
|
2021-09-09 12:54:20 +02:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const int array_size = full_mask.min_array_size();
|
|
|
|
|
|
|
|
|
|
switch (data_type.category()) {
|
2023-01-07 17:32:28 +01:00
|
|
|
case DataType::Single: {
|
2021-09-09 12:54:20 +02:00
|
|
|
const CPPType &type = data_type.single_type();
|
|
|
|
|
VariableValue_Span *new_value = nullptr;
|
|
|
|
|
if (caller_provided_storage_ == nullptr) {
|
|
|
|
|
new_value = value_allocator.obtain_Span(type, array_size);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
/* Reuse the storage provided caller when possible. */
|
|
|
|
|
new_value = value_allocator.obtain_Span_not_owned(caller_provided_storage_);
|
|
|
|
|
}
|
2022-06-19 14:25:21 +02:00
|
|
|
if (value_ != nullptr) {
|
|
|
|
|
if (value_->type == ValueType::GVArray) {
|
|
|
|
|
/* Fill new buffer with data from virtual array. */
|
|
|
|
|
this->value_as<VariableValue_GVArray>()->data.materialize_to_uninitialized(
|
|
|
|
|
full_mask, new_value->data);
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
2022-06-19 14:25:21 +02:00
|
|
|
else if (value_->type == ValueType::OneSingle) {
|
|
|
|
|
auto *old_value_typed_ = this->value_as<VariableValue_OneSingle>();
|
|
|
|
|
if (old_value_typed_->is_initialized) {
|
|
|
|
|
/* Fill the buffer with a single value. */
|
|
|
|
|
type.fill_construct_indices(old_value_typed_->data, new_value->data, full_mask);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
BLI_assert_unreachable();
|
|
|
|
|
}
|
|
|
|
|
value_allocator.release_value(value_, data_type);
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
|
|
|
|
value_ = new_value;
|
|
|
|
|
break;
|
|
|
|
|
}
|
2023-01-07 17:32:28 +01:00
|
|
|
case DataType::Vector: {
|
2021-09-09 12:54:20 +02:00
|
|
|
const CPPType &type = data_type.vector_base_type();
|
|
|
|
|
VariableValue_GVectorArray *new_value = nullptr;
|
|
|
|
|
if (caller_provided_storage_ == nullptr) {
|
|
|
|
|
new_value = value_allocator.obtain_GVectorArray(type, array_size);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
new_value = value_allocator.obtain_GVectorArray_not_owned(
|
2022-09-25 17:39:45 +02:00
|
|
|
*static_cast<GVectorArray *>(caller_provided_storage_));
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
2022-06-19 14:25:21 +02:00
|
|
|
if (value_ != nullptr) {
|
|
|
|
|
if (value_->type == ValueType::GVVectorArray) {
|
|
|
|
|
/* Fill new vector array with data from virtual vector array. */
|
|
|
|
|
new_value->data.extend(full_mask, this->value_as<VariableValue_GVVectorArray>()->data);
|
|
|
|
|
}
|
|
|
|
|
else if (value_->type == ValueType::OneVector) {
|
|
|
|
|
/* Fill all indices with the same value. */
|
|
|
|
|
const GSpan vector = this->value_as<VariableValue_OneVector>()->data[0];
|
|
|
|
|
new_value->data.extend(full_mask, GVVectorArray_For_SingleGSpan{vector, array_size});
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
BLI_assert_unreachable();
|
|
|
|
|
}
|
|
|
|
|
value_allocator.release_value(value_, data_type);
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
|
|
|
|
value_ = new_value;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-07 17:32:28 +01:00
|
|
|
void add_as_mutable(ParamsBuilder ¶ms,
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
const IndexMask &mask,
|
|
|
|
|
const IndexMask &full_mask,
|
2023-01-07 17:32:28 +01:00
|
|
|
const DataType &data_type,
|
2021-09-09 12:54:20 +02:00
|
|
|
ValueAllocator &value_allocator)
|
|
|
|
|
{
|
|
|
|
|
/* Sanity check to make sure that enough values are initialized. */
|
|
|
|
|
BLI_assert(mask.size() <= tot_initialized_);
|
|
|
|
|
|
|
|
|
|
this->ensure_is_mutable(full_mask, data_type, value_allocator);
|
2022-06-19 14:25:21 +02:00
|
|
|
BLI_assert(value_ != nullptr);
|
2021-09-09 12:54:20 +02:00
|
|
|
|
|
|
|
|
switch (value_->type) {
|
|
|
|
|
case ValueType::Span: {
|
|
|
|
|
void *data = this->value_as<VariableValue_Span>()->data;
|
|
|
|
|
const GMutableSpan span{data_type.single_type(), data, mask.min_array_size()};
|
|
|
|
|
params.add_single_mutable(span);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::GVectorArray: {
|
|
|
|
|
params.add_vector_mutable(this->value_as<VariableValue_GVectorArray>()->data);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::GVArray:
|
|
|
|
|
case ValueType::GVVectorArray:
|
|
|
|
|
case ValueType::OneSingle:
|
|
|
|
|
case ValueType::OneVector: {
|
|
|
|
|
BLI_assert_unreachable();
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-07 17:32:28 +01:00
|
|
|
void add_as_output(ParamsBuilder ¶ms,
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
const IndexMask &mask,
|
|
|
|
|
const IndexMask &full_mask,
|
2023-01-07 17:32:28 +01:00
|
|
|
const DataType &data_type,
|
2021-09-09 12:54:20 +02:00
|
|
|
ValueAllocator &value_allocator)
|
|
|
|
|
{
|
|
|
|
|
/* Sanity check to make sure that enough values are not initialized. */
|
|
|
|
|
BLI_assert(mask.size() <= full_mask.size() - tot_initialized_);
|
|
|
|
|
this->ensure_is_mutable(full_mask, data_type, value_allocator);
|
2022-06-19 14:25:21 +02:00
|
|
|
BLI_assert(value_ != nullptr);
|
2021-09-09 12:54:20 +02:00
|
|
|
|
|
|
|
|
switch (value_->type) {
|
|
|
|
|
case ValueType::Span: {
|
|
|
|
|
void *data = this->value_as<VariableValue_Span>()->data;
|
|
|
|
|
const GMutableSpan span{data_type.single_type(), data, mask.min_array_size()};
|
|
|
|
|
params.add_uninitialized_single_output(span);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::GVectorArray: {
|
|
|
|
|
params.add_vector_output(this->value_as<VariableValue_GVectorArray>()->data);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::GVArray:
|
|
|
|
|
case ValueType::GVVectorArray:
|
|
|
|
|
case ValueType::OneSingle:
|
|
|
|
|
case ValueType::OneVector: {
|
|
|
|
|
BLI_assert_unreachable();
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
tot_initialized_ += mask.size();
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-07 17:32:28 +01:00
|
|
|
void add_as_input__one(ParamsBuilder ¶ms, const DataType &data_type) const
|
2021-09-09 12:54:20 +02:00
|
|
|
{
|
|
|
|
|
BLI_assert(this->is_one());
|
2022-06-19 14:25:21 +02:00
|
|
|
BLI_assert(value_ != nullptr);
|
2021-09-09 12:54:20 +02:00
|
|
|
|
|
|
|
|
switch (value_->type) {
|
|
|
|
|
case ValueType::GVArray: {
|
|
|
|
|
params.add_readonly_single_input(this->value_as<VariableValue_GVArray>()->data);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::GVVectorArray: {
|
|
|
|
|
params.add_readonly_vector_input(this->value_as<VariableValue_GVVectorArray>()->data);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::OneSingle: {
|
|
|
|
|
const auto *value_typed = this->value_as<VariableValue_OneSingle>();
|
|
|
|
|
BLI_assert(value_typed->is_initialized);
|
|
|
|
|
GPointer ptr{data_type.single_type(), value_typed->data};
|
|
|
|
|
params.add_readonly_single_input(ptr);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::OneVector: {
|
|
|
|
|
params.add_readonly_vector_input(this->value_as<VariableValue_OneVector>()->data);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::Span:
|
|
|
|
|
case ValueType::GVectorArray: {
|
|
|
|
|
BLI_assert_unreachable();
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-07 17:32:28 +01:00
|
|
|
void ensure_is_mutable__one(const DataType &data_type, ValueAllocator &value_allocator)
|
2021-09-09 12:54:20 +02:00
|
|
|
{
|
|
|
|
|
BLI_assert(this->is_one());
|
2022-06-19 14:25:21 +02:00
|
|
|
if (value_ != nullptr && ELEM(value_->type, ValueType::OneSingle, ValueType::OneVector)) {
|
2021-09-09 12:54:20 +02:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch (data_type.category()) {
|
2023-01-07 17:32:28 +01:00
|
|
|
case DataType::Single: {
|
2021-09-09 12:54:20 +02:00
|
|
|
const CPPType &type = data_type.single_type();
|
|
|
|
|
VariableValue_OneSingle *new_value = value_allocator.obtain_OneSingle(type);
|
2022-06-19 14:25:21 +02:00
|
|
|
if (value_ != nullptr) {
|
|
|
|
|
if (value_->type == ValueType::GVArray) {
|
|
|
|
|
this->value_as<VariableValue_GVArray>()->data.get_internal_single_to_uninitialized(
|
|
|
|
|
new_value->data);
|
|
|
|
|
new_value->is_initialized = true;
|
|
|
|
|
}
|
|
|
|
|
else if (value_->type == ValueType::Span) {
|
|
|
|
|
BLI_assert(tot_initialized_ == 0);
|
|
|
|
|
/* Nothing to do, the single value is uninitialized already. */
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
BLI_assert_unreachable();
|
|
|
|
|
}
|
|
|
|
|
value_allocator.release_value(value_, data_type);
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
|
|
|
|
value_ = new_value;
|
|
|
|
|
break;
|
|
|
|
|
}
|
2023-01-07 17:32:28 +01:00
|
|
|
case DataType::Vector: {
|
2021-09-09 12:54:20 +02:00
|
|
|
const CPPType &type = data_type.vector_base_type();
|
|
|
|
|
VariableValue_OneVector *new_value = value_allocator.obtain_OneVector(type);
|
2022-06-19 14:25:21 +02:00
|
|
|
if (value_ != nullptr) {
|
|
|
|
|
if (value_->type == ValueType::GVVectorArray) {
|
|
|
|
|
const GVVectorArray &old_vector_array =
|
|
|
|
|
this->value_as<VariableValue_GVVectorArray>()->data;
|
|
|
|
|
new_value->data.extend(IndexRange(1), old_vector_array);
|
|
|
|
|
}
|
|
|
|
|
else if (value_->type == ValueType::GVectorArray) {
|
|
|
|
|
BLI_assert(tot_initialized_ == 0);
|
|
|
|
|
/* Nothing to do. */
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
BLI_assert_unreachable();
|
|
|
|
|
}
|
|
|
|
|
value_allocator.release_value(value_, data_type);
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
|
|
|
|
value_ = new_value;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-07 17:32:28 +01:00
|
|
|
void add_as_mutable__one(ParamsBuilder ¶ms,
|
|
|
|
|
const DataType &data_type,
|
2021-09-09 12:54:20 +02:00
|
|
|
ValueAllocator &value_allocator)
|
|
|
|
|
{
|
|
|
|
|
BLI_assert(this->is_one());
|
|
|
|
|
this->ensure_is_mutable__one(data_type, value_allocator);
|
2022-06-19 14:25:21 +02:00
|
|
|
BLI_assert(value_ != nullptr);
|
2021-09-09 12:54:20 +02:00
|
|
|
|
|
|
|
|
switch (value_->type) {
|
|
|
|
|
case ValueType::OneSingle: {
|
|
|
|
|
auto *value_typed = this->value_as<VariableValue_OneSingle>();
|
|
|
|
|
BLI_assert(value_typed->is_initialized);
|
|
|
|
|
params.add_single_mutable(GMutableSpan{data_type.single_type(), value_typed->data, 1});
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::OneVector: {
|
|
|
|
|
params.add_vector_mutable(this->value_as<VariableValue_OneVector>()->data);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::GVArray:
|
|
|
|
|
case ValueType::Span:
|
|
|
|
|
case ValueType::GVVectorArray:
|
|
|
|
|
case ValueType::GVectorArray: {
|
|
|
|
|
BLI_assert_unreachable();
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-07 17:32:28 +01:00
|
|
|
void add_as_output__one(ParamsBuilder ¶ms,
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
const IndexMask &mask,
|
2023-01-07 17:32:28 +01:00
|
|
|
const DataType &data_type,
|
2021-09-09 12:54:20 +02:00
|
|
|
ValueAllocator &value_allocator)
|
|
|
|
|
{
|
|
|
|
|
BLI_assert(this->is_one());
|
|
|
|
|
this->ensure_is_mutable__one(data_type, value_allocator);
|
2022-06-19 14:25:21 +02:00
|
|
|
BLI_assert(value_ != nullptr);
|
2021-09-09 12:54:20 +02:00
|
|
|
|
|
|
|
|
switch (value_->type) {
|
|
|
|
|
case ValueType::OneSingle: {
|
|
|
|
|
auto *value_typed = this->value_as<VariableValue_OneSingle>();
|
|
|
|
|
BLI_assert(!value_typed->is_initialized);
|
|
|
|
|
params.add_uninitialized_single_output(
|
|
|
|
|
GMutableSpan{data_type.single_type(), value_typed->data, 1});
|
|
|
|
|
/* It becomes initialized when the multi-function is called. */
|
|
|
|
|
value_typed->is_initialized = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::OneVector: {
|
|
|
|
|
auto *value_typed = this->value_as<VariableValue_OneVector>();
|
|
|
|
|
BLI_assert(value_typed->data[0].is_empty());
|
|
|
|
|
params.add_vector_output(value_typed->data);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::GVArray:
|
|
|
|
|
case ValueType::Span:
|
|
|
|
|
case ValueType::GVVectorArray:
|
|
|
|
|
case ValueType::GVectorArray: {
|
|
|
|
|
BLI_assert_unreachable();
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
tot_initialized_ += mask.size();
|
|
|
|
|
}
|
|
|
|
|
|
2021-12-12 10:33:05 +01:00
|
|
|
/**
|
|
|
|
|
* Destruct the masked elements in this variable.
|
2021-12-13 16:22:20 +11:00
|
|
|
* \return True when all elements of this variable are initialized and the variable state can be
|
|
|
|
|
* released.
|
2021-12-12 10:33:05 +01:00
|
|
|
*/
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
bool destruct(const IndexMask &mask,
|
|
|
|
|
const IndexMask &full_mask,
|
2023-01-07 17:32:28 +01:00
|
|
|
const DataType &data_type,
|
2021-09-09 12:54:20 +02:00
|
|
|
ValueAllocator &value_allocator)
|
|
|
|
|
{
|
2022-06-19 14:25:21 +02:00
|
|
|
BLI_assert(value_ != nullptr);
|
2021-09-09 12:54:20 +02:00
|
|
|
int new_tot_initialized = tot_initialized_ - mask.size();
|
|
|
|
|
|
|
|
|
|
/* Sanity check to make sure that enough indices can be destructed. */
|
|
|
|
|
BLI_assert(new_tot_initialized >= 0);
|
|
|
|
|
|
|
|
|
|
switch (value_->type) {
|
|
|
|
|
case ValueType::GVArray: {
|
2022-04-21 15:29:07 +02:00
|
|
|
if (mask.size() < full_mask.size()) {
|
2021-09-09 12:54:20 +02:00
|
|
|
/* Not all elements are destructed. Since we can't work on the original array, we have to
|
|
|
|
|
* create a copy first. */
|
|
|
|
|
this->ensure_is_mutable(full_mask, data_type, value_allocator);
|
|
|
|
|
BLI_assert(value_->type == ValueType::Span);
|
|
|
|
|
const CPPType &type = data_type.single_type();
|
|
|
|
|
type.destruct_indices(this->value_as<VariableValue_Span>()->data, mask);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::Span: {
|
|
|
|
|
const CPPType &type = data_type.single_type();
|
|
|
|
|
type.destruct_indices(this->value_as<VariableValue_Span>()->data, mask);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::GVVectorArray: {
|
2022-04-21 15:29:07 +02:00
|
|
|
if (mask.size() < full_mask.size()) {
|
2021-09-09 12:54:20 +02:00
|
|
|
/* Not all elements are cleared. Since we can't work on the original vector array, we
|
|
|
|
|
* have to create a copy first. A possible future optimization is to create the partial
|
|
|
|
|
* copy directly. */
|
|
|
|
|
this->ensure_is_mutable(full_mask, data_type, value_allocator);
|
|
|
|
|
BLI_assert(value_->type == ValueType::GVectorArray);
|
|
|
|
|
this->value_as<VariableValue_GVectorArray>()->data.clear(mask);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::GVectorArray: {
|
|
|
|
|
this->value_as<VariableValue_GVectorArray>()->data.clear(mask);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::OneSingle: {
|
|
|
|
|
auto *value_typed = this->value_as<VariableValue_OneSingle>();
|
|
|
|
|
BLI_assert(value_typed->is_initialized);
|
2021-12-12 10:33:05 +01:00
|
|
|
UNUSED_VARS_NDEBUG(value_typed);
|
2021-09-09 12:54:20 +02:00
|
|
|
if (mask.size() == tot_initialized_) {
|
2022-04-21 15:29:07 +02:00
|
|
|
const CPPType &type = data_type.single_type();
|
|
|
|
|
type.destruct(value_typed->data);
|
|
|
|
|
value_typed->is_initialized = false;
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::OneVector: {
|
|
|
|
|
auto *value_typed = this->value_as<VariableValue_OneVector>();
|
|
|
|
|
if (mask.size() == tot_initialized_) {
|
2022-04-21 15:29:07 +02:00
|
|
|
value_typed->data.clear(IndexRange(1));
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
tot_initialized_ = new_tot_initialized;
|
2022-04-21 15:29:07 +02:00
|
|
|
|
|
|
|
|
const bool should_self_destruct = new_tot_initialized == 0 &&
|
|
|
|
|
caller_provided_storage_ == nullptr;
|
|
|
|
|
return should_self_destruct;
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
|
|
|
|
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
void indices_split(const IndexMask &mask, IndicesSplitVectors &r_indices)
|
2021-09-09 12:54:20 +02:00
|
|
|
{
|
|
|
|
|
BLI_assert(mask.size() <= tot_initialized_);
|
2022-06-19 14:25:21 +02:00
|
|
|
BLI_assert(value_ != nullptr);
|
2021-09-09 12:54:20 +02:00
|
|
|
|
|
|
|
|
switch (value_->type) {
|
|
|
|
|
case ValueType::GVArray: {
|
Geometry Nodes: refactor virtual array system
Goals of this refactor:
* Simplify creating virtual arrays.
* Simplify passing virtual arrays around.
* Simplify converting between typed and generic virtual arrays.
* Reduce memory allocations.
As a quick reminder, a virtual arrays is a data structure that behaves like an
array (i.e. it can be accessed using an index). However, it may not actually
be stored as array internally. The two most important implementations
of virtual arrays are those that correspond to an actual plain array and those
that have the same value for every index. However, many more
implementations exist for various reasons (interfacing with legacy attributes,
unified iterator over all points in multiple splines, ...).
With this refactor the core types (`VArray`, `GVArray`, `VMutableArray` and
`GVMutableArray`) can be used like "normal values". They typically live
on the stack. Before, they were usually inside a `std::unique_ptr`. This makes
passing them around much easier. Creation of new virtual arrays is also
much simpler now due to some constructors. Memory allocations are
reduced by making use of small object optimization inside the core types.
Previously, `VArray` was a class with virtual methods that had to be overridden
to change the behavior of a the virtual array. Now,`VArray` has a fixed size
and has no virtual methods. Instead it contains a `VArrayImpl` that is
similar to the old `VArray`. `VArrayImpl` should rarely ever be used directly,
unless a new virtual array implementation is added.
To support the small object optimization for many `VArrayImpl` classes,
a new `blender::Any` type is added. It is similar to `std::any` with two
additional features. It has an adjustable inline buffer size and alignment.
The inline buffer size of `std::any` can't be relied on and is usually too
small for our use case here. Furthermore, `blender::Any` can store
additional user-defined type information without increasing the
stack size.
Differential Revision: https://developer.blender.org/D12986
2021-11-16 10:15:51 +01:00
|
|
|
const VArray<bool> varray = this->value_as<VariableValue_GVArray>()->data.typed<bool>();
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
mask.foreach_index([&](const int64_t i) { r_indices[varray[i]].append(i); });
|
2021-09-09 12:54:20 +02:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::Span: {
|
2022-09-25 17:39:45 +02:00
|
|
|
const Span<bool> span(
|
|
|
|
|
static_cast<const bool *>(this->value_as<VariableValue_Span>()->data),
|
|
|
|
|
mask.min_array_size());
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
mask.foreach_index([&](const int64_t i) { r_indices[span[i]].append(i); });
|
2021-09-09 12:54:20 +02:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::OneSingle: {
|
|
|
|
|
auto *value_typed = this->value_as<VariableValue_OneSingle>();
|
|
|
|
|
BLI_assert(value_typed->is_initialized);
|
2022-09-25 17:39:45 +02:00
|
|
|
const bool condition = *static_cast<const bool *>(value_typed->data);
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
Vector<int64_t> &indices = r_indices[condition];
|
2023-06-28 08:48:00 +02:00
|
|
|
indices.reserve(indices.size() + mask.size());
|
|
|
|
|
mask.foreach_index_optimized<int64_t>([&](const int64_t i) { indices.append(i); });
|
2021-09-09 12:54:20 +02:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case ValueType::GVVectorArray:
|
|
|
|
|
case ValueType::GVectorArray:
|
|
|
|
|
case ValueType::OneVector: {
|
|
|
|
|
BLI_assert_unreachable();
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
template<typename T> T *value_as()
|
|
|
|
|
{
|
2022-06-19 14:25:21 +02:00
|
|
|
BLI_assert(value_ != nullptr);
|
2021-09-09 12:54:20 +02:00
|
|
|
BLI_assert(value_->type == T::static_type);
|
|
|
|
|
return static_cast<T *>(value_);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
template<typename T> const T *value_as() const
|
|
|
|
|
{
|
2022-06-19 14:25:21 +02:00
|
|
|
BLI_assert(value_ != nullptr);
|
2021-09-09 12:54:20 +02:00
|
|
|
BLI_assert(value_->type == T::static_type);
|
|
|
|
|
return static_cast<T *>(value_);
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/** Keeps track of the states of all variables during evaluation. */
|
|
|
|
|
class VariableStates {
|
|
|
|
|
private:
|
|
|
|
|
ValueAllocator value_allocator_;
|
2023-01-07 17:32:28 +01:00
|
|
|
const Procedure &procedure_;
|
|
|
|
|
/** The state of every variable, indexed by #Variable::index_in_procedure(). */
|
2022-06-19 14:25:21 +02:00
|
|
|
Array<VariableState> variable_states_;
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
const IndexMask &full_mask_;
|
2021-09-09 12:54:20 +02:00
|
|
|
|
|
|
|
|
public:
|
2022-06-19 14:25:21 +02:00
|
|
|
VariableStates(LinearAllocator<> &linear_allocator,
|
2023-01-07 17:32:28 +01:00
|
|
|
const Procedure &procedure,
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
const IndexMask &full_mask)
|
2022-06-19 14:25:21 +02:00
|
|
|
: value_allocator_(linear_allocator),
|
|
|
|
|
procedure_(procedure),
|
|
|
|
|
variable_states_(procedure.variables().size()),
|
|
|
|
|
full_mask_(full_mask)
|
2021-09-09 12:54:20 +02:00
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
~VariableStates()
|
|
|
|
|
{
|
2022-06-19 14:25:21 +02:00
|
|
|
for (const int variable_i : procedure_.variables().index_range()) {
|
|
|
|
|
VariableState &state = variable_states_[variable_i];
|
|
|
|
|
if (state.value_ != nullptr) {
|
2023-01-07 17:32:28 +01:00
|
|
|
const Variable *variable = procedure_.variables()[variable_i];
|
2022-06-19 14:25:21 +02:00
|
|
|
state.destruct_value(value_allocator_, variable->data_type());
|
|
|
|
|
}
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ValueAllocator &value_allocator()
|
|
|
|
|
{
|
|
|
|
|
return value_allocator_;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const IndexMask &full_mask() const
|
|
|
|
|
{
|
|
|
|
|
return full_mask_;
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-07 17:32:28 +01:00
|
|
|
void add_initial_variable_states(const ProcedureExecutor &fn,
|
|
|
|
|
const Procedure &procedure,
|
2023-01-14 15:42:52 +01:00
|
|
|
Params ¶ms)
|
2021-09-09 12:54:20 +02:00
|
|
|
{
|
|
|
|
|
for (const int param_index : fn.param_indices()) {
|
2023-01-07 17:32:28 +01:00
|
|
|
ParamType param_type = fn.param_type(param_index);
|
|
|
|
|
const Variable *variable = procedure.params()[param_index].variable;
|
2021-09-09 12:54:20 +02:00
|
|
|
|
|
|
|
|
auto add_state = [&](VariableValue *value,
|
|
|
|
|
bool input_is_initialized,
|
|
|
|
|
void *caller_provided_storage = nullptr) {
|
|
|
|
|
const int tot_initialized = input_is_initialized ? full_mask_.size() : 0;
|
2022-06-19 14:25:21 +02:00
|
|
|
const int variable_i = variable->index_in_procedure();
|
|
|
|
|
VariableState &variable_state = variable_states_[variable_i];
|
|
|
|
|
BLI_assert(variable_state.value_ == nullptr);
|
|
|
|
|
variable_state.value_ = value;
|
|
|
|
|
variable_state.tot_initialized_ = tot_initialized;
|
|
|
|
|
variable_state.caller_provided_storage_ = caller_provided_storage;
|
2021-09-09 12:54:20 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
switch (param_type.category()) {
|
2023-01-07 17:32:28 +01:00
|
|
|
case ParamCategory::SingleInput: {
|
2021-09-09 12:54:20 +02:00
|
|
|
const GVArray &data = params.readonly_single_input(param_index);
|
|
|
|
|
add_state(value_allocator_.obtain_GVArray(data), true);
|
|
|
|
|
break;
|
|
|
|
|
}
|
2023-01-07 17:32:28 +01:00
|
|
|
case ParamCategory::VectorInput: {
|
2021-09-09 12:54:20 +02:00
|
|
|
const GVVectorArray &data = params.readonly_vector_input(param_index);
|
|
|
|
|
add_state(value_allocator_.obtain_GVVectorArray(data), true);
|
|
|
|
|
break;
|
|
|
|
|
}
|
2023-01-07 17:32:28 +01:00
|
|
|
case ParamCategory::SingleOutput: {
|
2021-09-09 12:54:20 +02:00
|
|
|
GMutableSpan data = params.uninitialized_single_output(param_index);
|
|
|
|
|
add_state(value_allocator_.obtain_Span_not_owned(data.data()), false, data.data());
|
|
|
|
|
break;
|
|
|
|
|
}
|
2023-01-07 17:32:28 +01:00
|
|
|
case ParamCategory::VectorOutput: {
|
2021-09-09 12:54:20 +02:00
|
|
|
GVectorArray &data = params.vector_output(param_index);
|
|
|
|
|
add_state(value_allocator_.obtain_GVectorArray_not_owned(data), false, &data);
|
|
|
|
|
break;
|
|
|
|
|
}
|
2023-01-07 17:32:28 +01:00
|
|
|
case ParamCategory::SingleMutable: {
|
2021-09-09 12:54:20 +02:00
|
|
|
GMutableSpan data = params.single_mutable(param_index);
|
|
|
|
|
add_state(value_allocator_.obtain_Span_not_owned(data.data()), true, data.data());
|
|
|
|
|
break;
|
|
|
|
|
}
|
2023-01-07 17:32:28 +01:00
|
|
|
case ParamCategory::VectorMutable: {
|
2021-09-09 12:54:20 +02:00
|
|
|
GVectorArray &data = params.vector_mutable(param_index);
|
|
|
|
|
add_state(value_allocator_.obtain_GVectorArray_not_owned(data), true, &data);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void add_as_param(VariableState &variable_state,
|
2023-01-07 17:32:28 +01:00
|
|
|
ParamsBuilder ¶ms,
|
|
|
|
|
const ParamType ¶m_type,
|
2021-09-09 12:54:20 +02:00
|
|
|
const IndexMask &mask)
|
|
|
|
|
{
|
2023-01-07 17:32:28 +01:00
|
|
|
const DataType data_type = param_type.data_type();
|
2021-09-09 12:54:20 +02:00
|
|
|
switch (param_type.interface_type()) {
|
2023-01-07 17:32:28 +01:00
|
|
|
case ParamType::Input: {
|
2021-09-09 12:54:20 +02:00
|
|
|
variable_state.add_as_input(params, mask, data_type);
|
|
|
|
|
break;
|
|
|
|
|
}
|
2023-01-07 17:32:28 +01:00
|
|
|
case ParamType::Mutable: {
|
2021-09-09 12:54:20 +02:00
|
|
|
variable_state.add_as_mutable(params, mask, full_mask_, data_type, value_allocator_);
|
|
|
|
|
break;
|
|
|
|
|
}
|
2023-01-07 17:32:28 +01:00
|
|
|
case ParamType::Output: {
|
2021-09-09 12:54:20 +02:00
|
|
|
variable_state.add_as_output(params, mask, full_mask_, data_type, value_allocator_);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void add_as_param__one(VariableState &variable_state,
|
2023-01-07 17:32:28 +01:00
|
|
|
ParamsBuilder ¶ms,
|
|
|
|
|
const ParamType ¶m_type,
|
2021-09-09 12:54:20 +02:00
|
|
|
const IndexMask &mask)
|
|
|
|
|
{
|
2023-01-07 17:32:28 +01:00
|
|
|
const DataType data_type = param_type.data_type();
|
2021-09-09 12:54:20 +02:00
|
|
|
switch (param_type.interface_type()) {
|
2023-01-07 17:32:28 +01:00
|
|
|
case ParamType::Input: {
|
2021-09-09 12:54:20 +02:00
|
|
|
variable_state.add_as_input__one(params, data_type);
|
|
|
|
|
break;
|
|
|
|
|
}
|
2023-01-07 17:32:28 +01:00
|
|
|
case ParamType::Mutable: {
|
2021-09-09 12:54:20 +02:00
|
|
|
variable_state.add_as_mutable__one(params, data_type, value_allocator_);
|
|
|
|
|
break;
|
|
|
|
|
}
|
2023-01-07 17:32:28 +01:00
|
|
|
case ParamType::Output: {
|
2021-09-09 12:54:20 +02:00
|
|
|
variable_state.add_as_output__one(params, mask, data_type, value_allocator_);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-07 17:32:28 +01:00
|
|
|
void destruct(const Variable &variable, const IndexMask &mask)
|
2021-09-09 12:54:20 +02:00
|
|
|
{
|
|
|
|
|
VariableState &variable_state = this->get_variable_state(variable);
|
2021-12-12 10:33:05 +01:00
|
|
|
if (variable_state.destruct(mask, full_mask_, variable.data_type(), value_allocator_)) {
|
2022-06-19 14:25:21 +02:00
|
|
|
variable_state.destruct_value(value_allocator_, variable.data_type());
|
2021-12-12 10:33:05 +01:00
|
|
|
}
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
|
|
|
|
|
2023-01-07 17:32:28 +01:00
|
|
|
VariableState &get_variable_state(const Variable &variable)
|
2021-09-09 12:54:20 +02:00
|
|
|
{
|
2022-06-19 14:25:21 +02:00
|
|
|
const int variable_i = variable.index_in_procedure();
|
|
|
|
|
VariableState &variable_state = variable_states_[variable_i];
|
|
|
|
|
return variable_state;
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
2023-01-07 16:51:26 +01:00
|
|
|
static bool evaluate_as_one(Span<VariableState *> param_variable_states,
|
2021-09-09 12:54:20 +02:00
|
|
|
const IndexMask &mask,
|
|
|
|
|
const IndexMask &full_mask)
|
|
|
|
|
{
|
|
|
|
|
if (mask.size() < full_mask.size()) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
for (VariableState *state : param_variable_states) {
|
2022-06-19 14:25:21 +02:00
|
|
|
if (state != nullptr && state->value_ != nullptr && !state->is_one()) {
|
2021-09-09 12:54:20 +02:00
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2022-06-19 14:25:21 +02:00
|
|
|
static void gather_parameter_variable_states(const MultiFunction &fn,
|
2023-01-07 17:32:28 +01:00
|
|
|
const CallInstruction &instruction,
|
2022-06-19 14:25:21 +02:00
|
|
|
VariableStates &variable_states,
|
|
|
|
|
MutableSpan<VariableState *> r_param_variable_states)
|
2021-09-09 12:54:20 +02:00
|
|
|
{
|
|
|
|
|
for (const int param_index : fn.param_indices()) {
|
2023-01-07 17:32:28 +01:00
|
|
|
const Variable *variable = instruction.params()[param_index];
|
2021-09-14 14:52:44 +02:00
|
|
|
if (variable == nullptr) {
|
2022-06-19 14:25:21 +02:00
|
|
|
r_param_variable_states[param_index] = nullptr;
|
2021-09-14 14:52:44 +02:00
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
VariableState &variable_state = variable_states.get_variable_state(*variable);
|
2022-06-19 14:25:21 +02:00
|
|
|
r_param_variable_states[param_index] = &variable_state;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void fill_params__one(const MultiFunction &fn,
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
const IndexMask &mask,
|
2023-01-07 17:32:28 +01:00
|
|
|
ParamsBuilder ¶ms,
|
2022-06-19 14:25:21 +02:00
|
|
|
VariableStates &variable_states,
|
|
|
|
|
const Span<VariableState *> param_variable_states)
|
|
|
|
|
{
|
|
|
|
|
for (const int param_index : fn.param_indices()) {
|
2023-01-07 17:32:28 +01:00
|
|
|
const ParamType param_type = fn.param_type(param_index);
|
2022-06-19 14:25:21 +02:00
|
|
|
VariableState *variable_state = param_variable_states[param_index];
|
|
|
|
|
if (variable_state == nullptr) {
|
|
|
|
|
params.add_ignored_single_output();
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
variable_states.add_as_param__one(*variable_state, params, param_type, mask);
|
2021-09-14 14:52:44 +02:00
|
|
|
}
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
2022-06-19 14:25:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void fill_params(const MultiFunction &fn,
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
const IndexMask &mask,
|
2023-01-07 17:32:28 +01:00
|
|
|
ParamsBuilder ¶ms,
|
2022-06-19 14:25:21 +02:00
|
|
|
VariableStates &variable_states,
|
|
|
|
|
const Span<VariableState *> param_variable_states)
|
|
|
|
|
{
|
|
|
|
|
for (const int param_index : fn.param_indices()) {
|
2023-01-07 17:32:28 +01:00
|
|
|
const ParamType param_type = fn.param_type(param_index);
|
2022-06-19 14:25:21 +02:00
|
|
|
VariableState *variable_state = param_variable_states[param_index];
|
|
|
|
|
if (variable_state == nullptr) {
|
|
|
|
|
params.add_ignored_single_output();
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
variable_states.add_as_param(*variable_state, params, param_type, mask);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-07 17:32:28 +01:00
|
|
|
static void execute_call_instruction(const CallInstruction &instruction,
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
const IndexMask &mask,
|
2022-06-19 14:25:21 +02:00
|
|
|
VariableStates &variable_states,
|
2023-01-07 17:32:28 +01:00
|
|
|
const Context &context)
|
2022-06-19 14:25:21 +02:00
|
|
|
{
|
|
|
|
|
const MultiFunction &fn = instruction.fn();
|
|
|
|
|
|
|
|
|
|
Vector<VariableState *> param_variable_states;
|
|
|
|
|
param_variable_states.resize(fn.param_amount());
|
|
|
|
|
gather_parameter_variable_states(fn, instruction, variable_states, param_variable_states);
|
2021-09-09 12:54:20 +02:00
|
|
|
|
|
|
|
|
/* If all inputs to the function are constant, it's enough to call the function only once instead
|
|
|
|
|
* of for every index. */
|
2023-01-07 16:51:26 +01:00
|
|
|
if (evaluate_as_one(param_variable_states, mask, variable_states.full_mask())) {
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
static const IndexMask one_mask(1);
|
|
|
|
|
ParamsBuilder params(fn, &one_mask);
|
2022-06-19 14:25:21 +02:00
|
|
|
fill_params__one(fn, mask, params, variable_states, param_variable_states);
|
2021-09-09 12:54:20 +02:00
|
|
|
|
2021-09-26 23:27:57 +02:00
|
|
|
try {
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
fn.call(one_mask, params, context);
|
2021-09-26 23:27:57 +02:00
|
|
|
}
|
|
|
|
|
catch (...) {
|
|
|
|
|
/* Multi-functions must not throw exceptions. */
|
|
|
|
|
BLI_assert_unreachable();
|
|
|
|
|
}
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
|
|
|
|
else {
|
2023-01-07 17:32:28 +01:00
|
|
|
ParamsBuilder params(fn, &mask);
|
2022-06-19 14:25:21 +02:00
|
|
|
fill_params(fn, mask, params, variable_states, param_variable_states);
|
2021-09-09 12:54:20 +02:00
|
|
|
|
2021-09-26 23:27:57 +02:00
|
|
|
try {
|
Geometry Nodes: refactor multi-threading in field evaluation
Previously, there was a fixed grain size for all multi-functions. That was
not sufficient because some functions could benefit a lot from smaller
grain sizes.
This refactors adds a new `MultiFunction::call_auto` method which has the
same effect as just calling `MultiFunction::call` but additionally figures
out how to execute the specific multi-function efficiently. It determines
a good grain size and decides whether the mask indices should be shifted
or not.
Most multi-function evaluations benefit from this, but medium sized work
loads (1000 - 50000 elements) benefit from it the most. Especially when
expensive multi-functions (e.g. noise) is involved. This is because for
smaller work loads, threading is rarely used and for larger work loads
threading worked fine before already.
With this patch, multi-functions can specify execution hints, that allow
the caller to execute it most efficiently. These execution hints still
have to be added to more functions.
Some performance measurements of a field evaluation involving noise and
math nodes, ordered by the number of elements being evaluated:
```
1,000,000: 133 ms -> 120 ms
100,000: 30 ms -> 18 ms
10,000: 20 ms -> 2.7 ms
1,000: 4 ms -> 0.5 ms
100: 0.5 ms -> 0.4 ms
```
2021-11-26 11:05:47 +01:00
|
|
|
fn.call_auto(mask, params, context);
|
2021-09-26 23:27:57 +02:00
|
|
|
}
|
|
|
|
|
catch (...) {
|
|
|
|
|
/* Multi-functions must not throw exceptions. */
|
|
|
|
|
BLI_assert_unreachable();
|
|
|
|
|
}
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/** An index mask, that might own the indices if necessary. */
|
|
|
|
|
struct InstructionIndices {
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
std::unique_ptr<IndexMaskMemory> memory;
|
2021-09-09 12:54:20 +02:00
|
|
|
IndexMask referenced_indices;
|
|
|
|
|
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
const IndexMask &mask() const
|
2021-09-09 12:54:20 +02:00
|
|
|
{
|
|
|
|
|
return this->referenced_indices;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/** Contains information about the next instruction that should be executed. */
|
|
|
|
|
struct NextInstructionInfo {
|
2023-01-07 17:32:28 +01:00
|
|
|
const Instruction *instruction = nullptr;
|
2021-09-09 12:54:20 +02:00
|
|
|
InstructionIndices indices;
|
|
|
|
|
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
const IndexMask &mask() const
|
2021-09-09 12:54:20 +02:00
|
|
|
{
|
|
|
|
|
return this->indices.mask();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
operator bool() const
|
|
|
|
|
{
|
|
|
|
|
return this->instruction != nullptr;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Keeps track of the next instruction for all indices and decides in which order instructions are
|
|
|
|
|
* evaluated.
|
|
|
|
|
*/
|
|
|
|
|
class InstructionScheduler {
|
|
|
|
|
private:
|
2022-06-19 14:25:21 +02:00
|
|
|
Stack<NextInstructionInfo> next_instructions_;
|
2021-09-09 12:54:20 +02:00
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
InstructionScheduler() = default;
|
|
|
|
|
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
void add_referenced_indices(const Instruction &instruction, const IndexMask &mask)
|
2021-09-09 12:54:20 +02:00
|
|
|
{
|
|
|
|
|
if (mask.is_empty()) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
InstructionIndices new_indices;
|
|
|
|
|
new_indices.referenced_indices = mask;
|
2022-06-19 14:25:21 +02:00
|
|
|
next_instructions_.push({&instruction, std::move(new_indices)});
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
|
|
|
|
|
2023-01-07 17:32:28 +01:00
|
|
|
void add_owned_indices(const Instruction &instruction, Vector<int64_t> indices)
|
2021-09-09 12:54:20 +02:00
|
|
|
{
|
|
|
|
|
if (indices.is_empty()) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
InstructionIndices new_indices;
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
new_indices.memory = std::make_unique<IndexMaskMemory>();
|
|
|
|
|
new_indices.referenced_indices = IndexMask::from_indices<int64_t>(indices,
|
|
|
|
|
*new_indices.memory);
|
2022-06-19 14:25:21 +02:00
|
|
|
next_instructions_.push({&instruction, std::move(new_indices)});
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
|
|
|
|
|
2022-06-19 14:25:21 +02:00
|
|
|
bool is_done() const
|
2021-09-09 12:54:20 +02:00
|
|
|
{
|
2022-06-19 14:25:21 +02:00
|
|
|
return next_instructions_.is_empty();
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
|
|
|
|
|
2022-06-19 14:25:21 +02:00
|
|
|
const NextInstructionInfo &peek() const
|
2021-09-09 12:54:20 +02:00
|
|
|
{
|
2022-06-19 14:25:21 +02:00
|
|
|
BLI_assert(!this->is_done());
|
|
|
|
|
return next_instructions_.peek();
|
|
|
|
|
}
|
2021-09-09 12:54:20 +02:00
|
|
|
|
2023-01-07 17:32:28 +01:00
|
|
|
void update_instruction_pointer(const Instruction &instruction)
|
2022-06-19 14:25:21 +02:00
|
|
|
{
|
|
|
|
|
next_instructions_.peek().instruction = &instruction;
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
|
|
|
|
|
2022-06-19 14:25:21 +02:00
|
|
|
NextInstructionInfo pop()
|
2021-09-09 12:54:20 +02:00
|
|
|
{
|
2022-06-19 14:25:21 +02:00
|
|
|
return next_instructions_.pop();
|
2021-09-09 12:54:20 +02:00
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
void ProcedureExecutor::call(const IndexMask &full_mask, Params params, Context context) const
|
2021-09-09 12:54:20 +02:00
|
|
|
{
|
|
|
|
|
BLI_assert(procedure_.validate());
|
|
|
|
|
|
2022-06-19 14:25:21 +02:00
|
|
|
AlignedBuffer<512, 64> local_buffer;
|
2021-12-12 10:33:05 +01:00
|
|
|
LinearAllocator<> linear_allocator;
|
2022-06-19 14:25:21 +02:00
|
|
|
linear_allocator.provide_buffer(local_buffer);
|
2021-09-09 12:54:20 +02:00
|
|
|
|
2022-06-19 14:25:21 +02:00
|
|
|
VariableStates variable_states{linear_allocator, procedure_, full_mask};
|
2021-09-09 12:54:20 +02:00
|
|
|
variable_states.add_initial_variable_states(*this, procedure_, params);
|
|
|
|
|
|
|
|
|
|
InstructionScheduler scheduler;
|
|
|
|
|
scheduler.add_referenced_indices(*procedure_.entry(), full_mask);
|
|
|
|
|
|
|
|
|
|
/* Loop until all indices got to a return instruction. */
|
2022-06-19 14:25:21 +02:00
|
|
|
while (!scheduler.is_done()) {
|
|
|
|
|
const NextInstructionInfo &instr_info = scheduler.peek();
|
2023-01-07 17:32:28 +01:00
|
|
|
const Instruction &instruction = *instr_info.instruction;
|
2021-09-09 12:54:20 +02:00
|
|
|
switch (instruction.type()) {
|
2023-01-07 17:32:28 +01:00
|
|
|
case InstructionType::Call: {
|
|
|
|
|
const CallInstruction &call_instruction = static_cast<const CallInstruction &>(
|
2021-09-09 12:54:20 +02:00
|
|
|
instruction);
|
|
|
|
|
execute_call_instruction(call_instruction, instr_info.mask(), variable_states, context);
|
2022-06-19 14:25:21 +02:00
|
|
|
scheduler.update_instruction_pointer(*call_instruction.next());
|
2021-09-09 12:54:20 +02:00
|
|
|
break;
|
|
|
|
|
}
|
2023-01-07 17:32:28 +01:00
|
|
|
case InstructionType::Branch: {
|
|
|
|
|
const BranchInstruction &branch_instruction = static_cast<const BranchInstruction &>(
|
2021-09-09 12:54:20 +02:00
|
|
|
instruction);
|
2023-01-07 17:32:28 +01:00
|
|
|
const Variable *condition_var = branch_instruction.condition();
|
2021-09-09 12:54:20 +02:00
|
|
|
VariableState &variable_state = variable_states.get_variable_state(*condition_var);
|
|
|
|
|
|
|
|
|
|
IndicesSplitVectors new_indices;
|
|
|
|
|
variable_state.indices_split(instr_info.mask(), new_indices);
|
2022-06-19 14:25:21 +02:00
|
|
|
scheduler.pop();
|
2021-09-09 12:54:20 +02:00
|
|
|
scheduler.add_owned_indices(*branch_instruction.branch_false(), new_indices[false]);
|
|
|
|
|
scheduler.add_owned_indices(*branch_instruction.branch_true(), new_indices[true]);
|
|
|
|
|
break;
|
|
|
|
|
}
|
2023-01-07 17:32:28 +01:00
|
|
|
case InstructionType::Destruct: {
|
|
|
|
|
const DestructInstruction &destruct_instruction = static_cast<const DestructInstruction &>(
|
|
|
|
|
instruction);
|
|
|
|
|
const Variable *variable = destruct_instruction.variable();
|
2021-09-09 12:54:20 +02:00
|
|
|
variable_states.destruct(*variable, instr_info.mask());
|
2022-06-19 14:25:21 +02:00
|
|
|
scheduler.update_instruction_pointer(*destruct_instruction.next());
|
2021-09-09 12:54:20 +02:00
|
|
|
break;
|
|
|
|
|
}
|
2023-01-07 17:32:28 +01:00
|
|
|
case InstructionType::Dummy: {
|
|
|
|
|
const DummyInstruction &dummy_instruction = static_cast<const DummyInstruction &>(
|
2021-09-09 12:54:20 +02:00
|
|
|
instruction);
|
2022-06-19 14:25:21 +02:00
|
|
|
scheduler.update_instruction_pointer(*dummy_instruction.next());
|
2021-09-09 12:54:20 +02:00
|
|
|
break;
|
|
|
|
|
}
|
2023-01-07 17:32:28 +01:00
|
|
|
case InstructionType::Return: {
|
2021-09-09 12:54:20 +02:00
|
|
|
/* Don't insert the indices back into the scheduler. */
|
2022-06-19 14:25:21 +02:00
|
|
|
scheduler.pop();
|
2021-09-09 12:54:20 +02:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (const int param_index : this->param_indices()) {
|
2023-01-07 17:32:28 +01:00
|
|
|
const ParamType param_type = this->param_type(param_index);
|
|
|
|
|
const Variable *variable = procedure_.params()[param_index].variable;
|
2021-09-09 12:54:20 +02:00
|
|
|
VariableState &variable_state = variable_states.get_variable_state(*variable);
|
|
|
|
|
switch (param_type.interface_type()) {
|
2023-01-07 17:32:28 +01:00
|
|
|
case ParamType::Input: {
|
2021-09-09 12:54:20 +02:00
|
|
|
/* Input variables must be destructed in the end. */
|
|
|
|
|
BLI_assert(variable_state.is_fully_uninitialized(full_mask));
|
|
|
|
|
break;
|
|
|
|
|
}
|
2023-01-07 17:32:28 +01:00
|
|
|
case ParamType::Mutable:
|
|
|
|
|
case ParamType::Output: {
|
2021-09-09 12:54:20 +02:00
|
|
|
/* Mutable and output variables must be initialized in the end. */
|
|
|
|
|
BLI_assert(variable_state.is_fully_initialized(full_mask));
|
|
|
|
|
/* Make sure that the data is in the memory provided by the caller. */
|
|
|
|
|
variable_state.ensure_is_mutable(
|
|
|
|
|
full_mask, param_type.data_type(), variable_states.value_allocator());
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-07 17:32:28 +01:00
|
|
|
MultiFunction::ExecutionHints ProcedureExecutor::get_execution_hints() const
|
Geometry Nodes: refactor multi-threading in field evaluation
Previously, there was a fixed grain size for all multi-functions. That was
not sufficient because some functions could benefit a lot from smaller
grain sizes.
This refactors adds a new `MultiFunction::call_auto` method which has the
same effect as just calling `MultiFunction::call` but additionally figures
out how to execute the specific multi-function efficiently. It determines
a good grain size and decides whether the mask indices should be shifted
or not.
Most multi-function evaluations benefit from this, but medium sized work
loads (1000 - 50000 elements) benefit from it the most. Especially when
expensive multi-functions (e.g. noise) is involved. This is because for
smaller work loads, threading is rarely used and for larger work loads
threading worked fine before already.
With this patch, multi-functions can specify execution hints, that allow
the caller to execute it most efficiently. These execution hints still
have to be added to more functions.
Some performance measurements of a field evaluation involving noise and
math nodes, ordered by the number of elements being evaluated:
```
1,000,000: 133 ms -> 120 ms
100,000: 30 ms -> 18 ms
10,000: 20 ms -> 2.7 ms
1,000: 4 ms -> 0.5 ms
100: 0.5 ms -> 0.4 ms
```
2021-11-26 11:05:47 +01:00
|
|
|
{
|
|
|
|
|
ExecutionHints hints;
|
|
|
|
|
hints.allocates_array = true;
|
|
|
|
|
hints.min_grain_size = 10000;
|
|
|
|
|
return hints;
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-07 17:32:28 +01:00
|
|
|
} // namespace blender::fn::multi_function
|