Cleanup: simplify allocating buffers for a CPPType

This reduces verbosity when using `LinearAllocator` or `ResourceScope` to
allocate values for a `CPPType`. Now, this is simplified and one also does not
have to manually add a destructor call anymore.

Pull Request: https://projects.blender.org/blender/blender/pulls/137685
This commit is contained in:
Jacques Lucke
2025-04-17 22:01:07 +02:00
parent 9d94512f96
commit b6342a7e94
13 changed files with 57 additions and 72 deletions

View File

@@ -48,7 +48,7 @@ template<typename Key> class GValueMap {
template<typename ForwardKey> void add_new_by_move(ForwardKey &&key, GMutablePointer value)
{
const CPPType &type = *value.type();
void *buffer = allocator_.allocate(type.size, type.alignment);
void *buffer = allocator_.allocate(type);
type.move_construct(value.get(), buffer);
values_.add_new_as(std::forward<ForwardKey>(key), GMutablePointer{type, buffer});
}
@@ -58,7 +58,7 @@ template<typename Key> class GValueMap {
template<typename ForwardKey> void add_new_by_copy(ForwardKey &&key, GPointer value)
{
const CPPType &type = *value.type();
void *buffer = allocator_.allocate(type.size, type.alignment);
void *buffer = allocator_.allocate(type);
type.copy_construct(value.get(), buffer);
values_.add_new_as(std::forward<ForwardKey>(key), GMutablePointer{type, buffer});
}

View File

@@ -12,6 +12,7 @@
#pragma once
#include "BLI_cpp_type.hh"
#include "BLI_string_ref.hh"
#include "BLI_utility_mixins.hh"
#include "BLI_vector.hh"
@@ -99,6 +100,12 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
return static_cast<T *>(this->allocate(sizeof(T), alignof(T)));
}
/** Same as above but uses a runtime #CPPType. */
void *allocate(const CPPType &type)
{
return this->allocate(type.size, type.alignment);
}
/**
* Allocate a memory buffer that can hold T array with the given size.
*
@@ -110,6 +117,12 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
return MutableSpan<T>(array, size);
}
/** Same as above but uses a runtime #CPPType. */
void *allocate_array(const CPPType &type, const int64_t size)
{
return this->allocate(type.size * size, type.alignment);
}
/**
* Construct an instance of T in memory provided by this allocator.
*

View File

@@ -82,6 +82,12 @@ class ResourceScope : NonCopyable, NonMovable {
*/
template<typename T, typename... Args> T &construct(Args &&...args);
/**
* Allocate a buffer for the given type. The caller is responsible for initializing it before the
* #ResourceScope is destructed. The value in the returned buffer is destructed automatically.
*/
void *allocate_owned(const CPPType &type);
/**
* Returns a reference to a linear allocator that is owned by the #ResourceScope. Memory
* allocated through this allocator will be freed when the collector is destructed.
@@ -152,6 +158,15 @@ template<typename T, typename... Args> inline T &ResourceScope::construct(Args &
return value_ref;
}
inline void *ResourceScope::allocate_owned(const CPPType &type)
{
void *buffer = allocator_.allocate(type);
if (!type.is_trivially_destructible) {
this->add_destruct_call([&type, buffer]() { type.destruct(buffer); });
}
return buffer;
}
inline LinearAllocator<> &ResourceScope::allocator()
{
return allocator_;

View File

@@ -391,7 +391,7 @@ Vector<GVArray> evaluate_fields(ResourceScope &scope,
void *buffer;
if (!dst_varray || !dst_varray.is_span()) {
/* Allocate a new buffer for the computed result. */
buffer = scope.allocator().allocate(type.size * array_size, type.alignment);
buffer = scope.allocator().allocate_array(type, array_size);
if (!type.is_trivially_destructible) {
/* Destruct values in the end. */
@@ -437,12 +437,7 @@ Vector<GVArray> evaluate_fields(ResourceScope &scope,
const GFieldRef &field = constant_fields_to_evaluate[i];
const CPPType &type = field.cpp_type();
/* Allocate memory where the computed value will be stored in. */
void *buffer = scope.allocator().allocate(type.size, type.alignment);
if (!type.is_trivially_destructible) {
/* Destruct value in the end. */
scope.add_destruct_call([buffer, &type]() { type.destruct(buffer); });
}
void *buffer = scope.allocate_owned(type);
/* Pass output buffer to the procedure executor. */
mf_params.add_uninitialized_single_output({type, buffer, 1});

View File

@@ -658,7 +658,7 @@ class Executor {
{
const OutputSocket &socket = *self_.graph_inputs_[graph_input_index];
const CPPType &type = socket.type();
void *buffer = local_data.allocator->allocate(type.size, type.alignment);
void *buffer = local_data.allocator->allocate(type);
type.move_construct(input_data, buffer);
this->forward_value_to_linked_inputs(socket, {type, buffer}, current_task, local_data);
}
@@ -910,7 +910,7 @@ class Executor {
self_.logger_->log_socket_value(input_socket, {type, default_value}, local_context);
}
BLI_assert(input_state.value == nullptr);
input_state.value = allocator.allocate(type.size, type.alignment);
input_state.value = allocator.allocate(type);
type.copy_construct(default_value, input_state.value);
input_state.was_ready_for_execution = true;
}
@@ -1172,7 +1172,7 @@ class Executor {
value_to_forward = {};
}
else {
void *buffer = local_data.allocator->allocate(type.size, type.alignment);
void *buffer = local_data.allocator->allocate(type);
type.copy_construct(value_to_forward.get(), buffer);
this->forward_value_to_input(locked_node, input_state, {type, buffer}, current_task);
}
@@ -1367,7 +1367,7 @@ class GraphExecutorLFParams final : public Params {
if (output_state.value == nullptr) {
LinearAllocator<> &allocator = *this->get_local_data().allocator;
const CPPType &type = node_.output(index).type();
output_state.value = allocator.allocate(type.size, type.alignment);
output_state.value = allocator.allocate(type);
}
return output_state.value;
}

View File

@@ -9,7 +9,7 @@ namespace blender::fn::multi_function {
void ParamsBuilder::add_unused_output_for_unsupporting_function(const CPPType &type)
{
ResourceScope &scope = this->resource_scope();
void *buffer = scope.allocator().allocate(type.size * min_array_size_, type.alignment);
void *buffer = scope.allocator().allocate_array(type, min_array_size_);
const GMutableSpan span{type, buffer, min_array_size_};
actual_params_.append_unchecked_as(std::in_place_type<GMutableSpan>, span);
if (!type.is_trivially_destructible) {

View File

@@ -375,7 +375,7 @@ class LazyFunctionForBakeNode final : public LazyFunction {
LinearAllocator<> allocator;
for (const int i : bake_items_.index_range()) {
const CPPType &type = *outputs_[i].type;
next_values[i] = allocator.allocate(type.size, type.alignment);
next_values[i] = allocator.allocate(type);
}
this->copy_bake_state_to_values(
next_state, data_block_map, self_object, compute_context, next_values);

View File

@@ -658,7 +658,7 @@ class LazyFunctionForSimulationOutputNode final : public LazyFunction {
LinearAllocator<> allocator;
for (const int i : simulation_items_.index_range()) {
const CPPType &type = *outputs_[i].type;
next_values[i] = allocator.allocate(type.size, type.alignment);
next_values[i] = allocator.allocate(type);
}
copy_simulation_state_to_values(simulation_items_,
next_state,

View File

@@ -146,7 +146,6 @@ class LazyFunctionForClosureZone : public LazyFunction {
const auto &storage = *static_cast<const NodeGeometryClosureOutput *>(output_bnode_.storage);
std::unique_ptr<ResourceScope> closure_scope = std::make_unique<ResourceScope>();
LinearAllocator<> &closure_allocator = closure_scope->allocator();
lf::Graph &lf_graph = closure_scope->construct<lf::Graph>("Closure Graph");
lf::FunctionNode &lf_body_node = lf_graph.add_function(*body_fn_.function);
@@ -166,13 +165,9 @@ class LazyFunctionForClosureZone : public LazyFunction {
lf_graph.add_link(lf_body_node.output(body_fn_.indices.outputs.input_usages[i]),
lf_graph_input_usage);
void *default_value = closure_allocator.allocate(cpp_type.size, cpp_type.alignment);
void *default_value = closure_scope->allocate_owned(cpp_type);
construct_socket_default_value(*bsocket.typeinfo, default_value);
default_input_values.append(default_value);
if (!cpp_type.is_trivially_destructible) {
closure_scope->add_destruct_call(
[&cpp_type, default_value]() { cpp_type.destruct(default_value); });
}
}
closure_indices.inputs.main = lf_graph.graph_inputs().index_range().take_back(
storage.input_items.items_num);
@@ -200,12 +195,8 @@ class LazyFunctionForClosureZone : public LazyFunction {
for (const int i : zone_.border_links.index_range()) {
const CPPType &cpp_type = *zone_.border_links[i]->tosock->typeinfo->geometry_nodes_cpp_type;
void *input_ptr = params.try_get_input_data_ptr(zone_info_.indices.inputs.border_links[i]);
void *stored_ptr = closure_allocator.allocate(cpp_type.size, cpp_type.alignment);
void *stored_ptr = closure_scope->allocate_owned(cpp_type);
cpp_type.move_construct(input_ptr, stored_ptr);
if (!cpp_type.is_trivially_destructible) {
closure_scope->add_destruct_call(
[&cpp_type, stored_ptr]() { cpp_type.destruct(stored_ptr); });
}
lf_body_node.input(body_fn_.indices.inputs.border_links[i]).set_default_value(stored_ptr);
}
@@ -563,14 +554,8 @@ class LazyFunctionForEvaluateClosureNode : public LazyFunction {
}
auto get_output_default_value = [&](const bke::bNodeSocketType &type) {
const CPPType &cpp_type = *type.geometry_nodes_cpp_type;
void *fallback_value = eval_storage.scope.allocator().allocate(cpp_type.size,
cpp_type.alignment);
void *fallback_value = eval_storage.scope.allocate_owned(*type.geometry_nodes_cpp_type);
construct_socket_default_value(type, fallback_value);
if (!cpp_type.is_trivially_destructible) {
eval_storage.scope.add_destruct_call(
[fallback_value, type = &cpp_type]() { type->destruct(fallback_value); });
}
return fallback_value;
};
@@ -712,15 +697,10 @@ class LazyFunctionForEvaluateClosureNode : public LazyFunction {
continue;
}
}
const CPPType &cpp_type = *output_type.geometry_nodes_cpp_type;
void *default_output_value = eval_storage.scope.allocator().allocate(cpp_type.size,
cpp_type.alignment);
void *default_output_value = eval_storage.scope.allocate_owned(
*output_type.geometry_nodes_cpp_type);
construct_socket_default_value(output_type, default_output_value);
lf_main_output.set_default_value(default_output_value);
if (!cpp_type.is_trivially_destructible) {
eval_storage.scope.add_destruct_call(
[default_output_value, type = &cpp_type]() { type->destruct(default_output_value); });
}
}
static constexpr bool static_false = false;
@@ -774,7 +754,7 @@ void evaluate_closure_eagerly(const Closure &closure, ClosureEagerEvalParams &pa
const bke::bNodeSocketType &from_type = *item.type;
const bke::bNodeSocketType &to_type = *signature.inputs[*mapped_i].type;
const CPPType &to_cpp_type = *to_type.geometry_nodes_cpp_type;
void *value = allocator.allocate(to_cpp_type.size, to_cpp_type.alignment);
void *value = allocator.allocate(to_cpp_type);
if (&from_type == &to_type) {
to_cpp_type.copy_construct(item.value, value);
}
@@ -806,7 +786,7 @@ void evaluate_closure_eagerly(const Closure &closure, ClosureEagerEvalParams &pa
const bke::bNodeSocketType &type = *signature.inputs[main_input_i].type;
const CPPType &cpp_type = *type.geometry_nodes_cpp_type;
const void *default_value = closure.default_input_value(main_input_i);
void *value = allocator.allocate(cpp_type.size, cpp_type.alignment);
void *value = allocator.allocate(cpp_type);
cpp_type.copy_construct(default_value, value);
lf_input_values[lf_input_i] = {cpp_type, value};
}
@@ -830,8 +810,8 @@ void evaluate_closure_eagerly(const Closure &closure, ClosureEagerEvalParams &pa
for (const int main_output_i : indices.outputs.main.index_range()) {
const bke::bNodeSocketType &type = *signature.outputs[main_output_i].type;
const CPPType &cpp_type = *type.geometry_nodes_cpp_type;
lf_output_values[indices.outputs.main[main_output_i]] = {
cpp_type, allocator.allocate(cpp_type.size, cpp_type.alignment)};
lf_output_values[indices.outputs.main[main_output_i]] = {cpp_type,
allocator.allocate(cpp_type)};
}
lf::BasicParams lf_params{

View File

@@ -868,7 +868,7 @@ bke::GeometrySet execute_geometry_nodes_on_geometry(const bNodeTree &btree,
const CPPType *type = typeinfo->geometry_nodes_cpp_type;
BLI_assert(type != nullptr);
void *value = allocator.allocate(type->size, type->alignment);
void *value = allocator.allocate(*type);
initialize_group_input(btree, properties_set, i, value);
param_inputs[function.inputs.main[i]] = {type, value};
inputs_to_destruct.append({type, value});
@@ -891,7 +891,7 @@ bke::GeometrySet execute_geometry_nodes_on_geometry(const bNodeTree &btree,
for (const int i : IndexRange(num_outputs)) {
const lf::Output &lf_output = lazy_function.outputs()[i];
const CPPType &type = *lf_output.type;
void *buffer = allocator.allocate(type.size, type.alignment);
void *buffer = allocator.allocate(type);
param_outputs[i] = {type, buffer};
}
@@ -1080,14 +1080,8 @@ void get_geometry_nodes_input_base_values(const bNodeTree &btree,
continue;
}
void *value_buffer = scope.allocator().allocate(stype->geometry_nodes_cpp_type->size,
stype->geometry_nodes_cpp_type->alignment);
void *value_buffer = scope.allocate_owned(*stype->geometry_nodes_cpp_type);
init_socket_cpp_value_from_property(*property, socket_type, value_buffer);
if (!stype->geometry_nodes_cpp_type->is_trivially_destructible) {
scope.add_destruct_call([type = stype->geometry_nodes_cpp_type, value_buffer]() {
type->destruct(value_buffer);
});
}
if (stype->geometry_nodes_cpp_type == stype->base_cpp_type) {
r_values[input_i] = {stype->base_cpp_type, value_buffer};
continue;

View File

@@ -1232,7 +1232,7 @@ static GMutablePointer get_socket_default_value(LinearAllocator<> &allocator,
if (type == nullptr) {
return {};
}
void *buffer = allocator.allocate(type->size, type->alignment);
void *buffer = allocator.allocate(*type);
typeinfo.get_geometry_nodes_cpp_value(bsocket.default_value, buffer);
return {type, buffer};
}

View File

@@ -272,7 +272,7 @@ void GeoTreeLogger::log_value(const bNode &node, const bNodeSocket &socket, cons
};
auto log_generic_value = [&](const CPPType &type, const void *value) {
void *buffer = this->allocator->allocate(type.size, type.alignment);
void *buffer = this->allocator->allocate(type);
type.copy_construct(value, buffer);
store_logged_value(this->allocator->construct<GenericValueLog>(GMutablePointer{type, buffer}));
};

View File

@@ -746,13 +746,9 @@ struct SocketUsageInferencer {
}
/* Allocate memory for the output value. */
const CPPType &base_type = *output_socket->typeinfo->base_cpp_type;
void *value = scope_.allocator().allocate(base_type.size, base_type.alignment);
void *value = scope_.allocate_owned(base_type);
params.add_uninitialized_single_output(GMutableSpan(base_type, value, 1));
all_socket_values_.add_new(output_socket, value);
if (!base_type.is_trivially_destructible) {
scope_.add_destruct_call(
[type = &base_type, value]() { type->destruct(const_cast<void *>(value)); });
}
}
mf::ContextBuilder context;
/* Actually evaluate the multi-function. The outputs will be written into the memory allocated
@@ -827,14 +823,9 @@ struct SocketUsageInferencer {
}
}
const CPPType &base_type = *socket->typeinfo->base_cpp_type;
void *value_buffer = scope_.allocator().allocate(base_type.size, base_type.alignment);
void *value_buffer = scope_.allocate_owned(*socket->typeinfo->base_cpp_type);
socket->typeinfo->get_base_cpp_value(socket->default_value, value_buffer);
all_socket_values_.add_new(socket, value_buffer);
if (!base_type.is_trivially_destructible) {
scope_.add_destruct_call(
[type = &base_type, value_buffer]() { type->destruct(value_buffer); });
}
}
void value_task__input__linked(const SocketInContext &from_socket,
@@ -869,11 +860,8 @@ struct SocketUsageInferencer {
if (!conversions.is_convertible(*from_type, *to_type)) {
return nullptr;
}
void *dst = scope_.allocator().allocate(to_type->size, to_type->alignment);
void *dst = scope_.allocate_owned(*to_type);
conversions.convert_to_uninitialized(*from_type, *to_type, src, dst);
if (!to_type->is_trivially_destructible) {
scope_.add_destruct_call([to_type, dst]() { to_type->destruct(dst); });
}
return dst;
}
@@ -1079,7 +1067,7 @@ void infer_group_interface_inputs_usage(const bNodeTree &group,
if (base_type == nullptr) {
continue;
}
void *value = allocator.allocate(base_type->size, base_type->alignment);
void *value = allocator.allocate(*base_type);
stype.get_base_cpp_value(socket.default_value, value);
input_values[i] = GPointer(base_type, value);
}