diff --git a/source/blender/blenkernel/BKE_compute_context_cache.hh b/source/blender/blenkernel/BKE_compute_context_cache.hh new file mode 100644 index 00000000000..11056c85829 --- /dev/null +++ b/source/blender/blenkernel/BKE_compute_context_cache.hh @@ -0,0 +1,58 @@ +/* SPDX-FileCopyrightText: 2025 Blender Authors + * + * SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +#include "BKE_compute_contexts.hh" + +#include "BLI_map.hh" +#include "BLI_vector.hh" + +namespace blender::bke { + +/** + * When traversing the computation of a node tree (like in `socket_usage_inference.cc` or + * `partial_eval.cc`) one often enters and exists the same compute contexts. The cache implemented + * here avoids re-creating the same compute contexts over and over again. While requiring less + * memory and having potentially better performance, it can also be used to ensure that the same + * compute context will always have the same pointer, even if it's created in two different places. + * + * Constructing compute contexts through this cache can also be a bit more convenient. + */ +class ComputeContextCache { + /** Allocator used to allocate the compute contexts. */ + LinearAllocator<> allocator_; + /** The allocated computed contexts that need to be destructed in the end. */ + Vector> cache_; + + Map, const ModifierComputeContext *> + modifier_contexts_cache_; + Map, const GroupNodeComputeContext *> + group_node_contexts_cache_; + + public: + const ModifierComputeContext &for_modifier(const ComputeContext *parent, + const NodesModifierData &nmd); + const ModifierComputeContext &for_modifier(const ComputeContext *parent, + StringRef modifier_name); + + const GroupNodeComputeContext &for_group_node(const ComputeContext *parent, int32_t node_id); + const GroupNodeComputeContext &for_group_node(const ComputeContext *parent, + const bNode &caller_group_node, + const bNodeTree &caller_tree); + + /** + * A fallback that does not use caching and can be used for any compute context. + * More constructors like the ones above can be added as they become necessary. + */ + template const T &for_any_uncached(Args &&...args) + { + destruct_ptr compute_context = allocator_.construct(std::forward(args)...); + const T &result = *compute_context; + cache_.append(std::move(compute_context)); + return result; + } +}; + +} // namespace blender::bke diff --git a/source/blender/blenkernel/CMakeLists.txt b/source/blender/blenkernel/CMakeLists.txt index 9cbce5206e7..7f8d4df76f7 100644 --- a/source/blender/blenkernel/CMakeLists.txt +++ b/source/blender/blenkernel/CMakeLists.txt @@ -364,6 +364,7 @@ set(SRC BKE_colorband.hh BKE_colortools.hh BKE_compositor.hh + BKE_compute_context_cache.hh BKE_compute_contexts.hh BKE_constraint.h BKE_context.hh diff --git a/source/blender/blenkernel/intern/compute_contexts.cc b/source/blender/blenkernel/intern/compute_contexts.cc index c83e40c2df8..1a57d29d168 100644 --- a/source/blender/blenkernel/intern/compute_contexts.cc +++ b/source/blender/blenkernel/intern/compute_contexts.cc @@ -5,6 +5,7 @@ #include "DNA_modifier_types.h" #include "DNA_node_types.h" +#include "BKE_compute_context_cache.hh" #include "BKE_compute_contexts.hh" #include @@ -206,4 +207,39 @@ void OperatorComputeContext::print_current_in_line(std::ostream &stream) const stream << "Operator"; } +const ModifierComputeContext &ComputeContextCache::for_modifier(const ComputeContext *parent, + const NodesModifierData &nmd) +{ + return *modifier_contexts_cache_.lookup_or_add_cb( + std::pair{parent, StringRef(nmd.modifier.name)}, + [&]() { return &this->for_any_uncached(parent, nmd); }); +} + +const ModifierComputeContext &ComputeContextCache::for_modifier(const ComputeContext *parent, + StringRef modifier_name) +{ + return *modifier_contexts_cache_.lookup_or_add_cb(std::pair{parent, modifier_name}, [&]() { + return &this->for_any_uncached(parent, modifier_name); + }); +} + +const GroupNodeComputeContext &ComputeContextCache::for_group_node(const ComputeContext *parent, + const int32_t node_id) +{ + return *group_node_contexts_cache_.lookup_or_add_cb(std::pair{parent, node_id}, [&]() { + return &this->for_any_uncached(parent, node_id); + }); +} + +const GroupNodeComputeContext &ComputeContextCache::for_group_node(const ComputeContext *parent, + const bNode &caller_group_node, + const bNodeTree &caller_tree) +{ + return *group_node_contexts_cache_.lookup_or_add_cb( + std::pair{parent, caller_group_node.identifier}, [&]() { + return &this->for_any_uncached( + parent, caller_group_node, caller_tree); + }); +} + } // namespace blender::bke diff --git a/source/blender/nodes/NOD_partial_eval.hh b/source/blender/nodes/NOD_partial_eval.hh index c7e3de395ed..2041f130e1b 100644 --- a/source/blender/nodes/NOD_partial_eval.hh +++ b/source/blender/nodes/NOD_partial_eval.hh @@ -7,9 +7,10 @@ #include "NOD_node_in_compute_context.hh" #include "BLI_function_ref.hh" -#include "BLI_resource_scope.hh" #include "BLI_set.hh" +#include "BKE_compute_context_cache.hh" + #include "DNA_node_types.h" /** @@ -44,7 +45,7 @@ namespace blender::nodes::partial_eval { */ void eval_downstream( const Span initial_sockets, - ResourceScope &scope, + bke::ComputeContextCache &compute_context_cache, FunctionRef &r_outputs_to_propagate)> evaluate_node_fn, FunctionRef @@ -81,7 +82,7 @@ struct UpstreamEvalTargets { */ UpstreamEvalTargets eval_upstream( const Span initial_sockets, - ResourceScope &scope, + bke::ComputeContextCache &compute_context_cache, FunctionRef &r_modified_inputs)> evaluate_node_fn, FunctionRef diff --git a/source/blender/nodes/intern/inverse_eval.cc b/source/blender/nodes/intern/inverse_eval.cc index 61639a5bb7a..c2ae95fec38 100644 --- a/source/blender/nodes/intern/inverse_eval.cc +++ b/source/blender/nodes/intern/inverse_eval.cc @@ -131,13 +131,13 @@ LocalInverseEvalTargets find_local_inverse_eval_targets(const bNodeTree &tree, tree.ensure_topology_cache(); - ResourceScope scope; + bke::ComputeContextCache compute_context_cache; Map elem_by_socket; elem_by_socket.add({nullptr, initial_socket_elem.socket}, initial_socket_elem.elem); const partial_eval::UpstreamEvalTargets upstream_eval_targets = partial_eval::eval_upstream( {{nullptr, initial_socket_elem.socket}}, - scope, + compute_context_cache, /* Evaluate node. */ [&](const NodeInContext &ctx_node, Vector &r_modified_inputs) { evaluate_node_elem_upstream(ctx_node, r_modified_inputs, elem_by_socket); @@ -282,7 +282,7 @@ void foreach_element_on_inverse_eval_path( if (!initial_socket_elem.elem) { return; } - ResourceScope scope; + bke::ComputeContextCache compute_context_cache; Map upstream_elem_by_socket; upstream_elem_by_socket.add({&initial_context, initial_socket_elem.socket}, initial_socket_elem.elem); @@ -290,7 +290,7 @@ void foreach_element_on_inverse_eval_path( /* In a first pass, propagate upstream to find the upstream targets. */ const partial_eval::UpstreamEvalTargets upstream_eval_targets = partial_eval::eval_upstream( {{&initial_context, initial_socket_elem.socket}}, - scope, + compute_context_cache, /* Evaluate node. */ [&](const NodeInContext &ctx_node, Vector &r_modified_inputs) { evaluate_node_elem_upstream(ctx_node, r_modified_inputs, upstream_elem_by_socket); @@ -329,7 +329,7 @@ void foreach_element_on_inverse_eval_path( partial_eval::eval_downstream( initial_downstream_evaluation_sockets, - scope, + compute_context_cache, /* Evaluate node. */ [&](const NodeInContext &ctx_node, Vector &r_outputs_to_propagate) { evaluate_node_elem_downstream_filtered( @@ -690,7 +690,7 @@ bool backpropagate_socket_values(bContext &C, { nmd.node_group->ensure_topology_cache(); - ResourceScope scope; + bke::ComputeContextCache compute_context_cache; Map value_by_socket; Vector initial_sockets; @@ -726,7 +726,7 @@ bool backpropagate_socket_values(bContext &C, /* Actually backpropagate the socket values as far as possible in the node tree. */ const partial_eval::UpstreamEvalTargets upstream_eval_targets = partial_eval::eval_upstream( initial_sockets, - scope, + compute_context_cache, /* Evaluate node. */ [&](const NodeInContext &ctx_node, Vector &r_modified_inputs) { backpropagate_socket_values_through_node( diff --git a/source/blender/nodes/intern/partial_eval.cc b/source/blender/nodes/intern/partial_eval.cc index ca777fa169c..ad097e57ec5 100644 --- a/source/blender/nodes/intern/partial_eval.cc +++ b/source/blender/nodes/intern/partial_eval.cc @@ -105,7 +105,7 @@ struct NodeInContextDownstreamComparator { void eval_downstream( const Span initial_sockets, - ResourceScope &scope, + bke::ComputeContextCache &compute_context_cache, FunctionRef &r_outputs_to_propagate)> evaluate_node_fn, FunctionRef @@ -135,7 +135,7 @@ void eval_downstream( if (group_tree->has_available_link_cycle()) { return; } - const auto &group_context = scope.construct( + const auto &group_context = compute_context_cache.for_group_node( ctx_group_node_input.context, node, node.owner_tree()); const int socket_index = ctx_group_node_input.socket->index(); /* Forward the value to every group input node. */ @@ -218,7 +218,7 @@ void eval_downstream( if (!group_output) { continue; } - const ComputeContext &group_context = scope.construct( + const ComputeContext &group_context = compute_context_cache.for_group_node( context, node, node.owner_tree()); /* Propagate the values from the group output node to the outputs of the group node and * continue forwarding them from there. */ @@ -247,7 +247,7 @@ void eval_downstream( UpstreamEvalTargets eval_upstream( const Span initial_sockets, - ResourceScope &scope, + bke::ComputeContextCache &compute_context_cache, FunctionRef &r_modified_inputs)> evaluate_node_fn, FunctionRef @@ -284,7 +284,7 @@ UpstreamEvalTargets eval_upstream( if (!group_output) { return; } - const ComputeContext &group_context = scope.construct( + const ComputeContext &group_context = compute_context_cache.for_group_node( context, group_node, group_node.owner_tree()); propagate_value_fn( ctx_output_socket, diff --git a/source/blender/nodes/intern/socket_usage_inference.cc b/source/blender/nodes/intern/socket_usage_inference.cc index 45818425dc5..8faae9ceb5c 100644 --- a/source/blender/nodes/intern/socket_usage_inference.cc +++ b/source/blender/nodes/intern/socket_usage_inference.cc @@ -14,6 +14,7 @@ #include "DNA_material_types.h" #include "DNA_node_types.h" +#include "BKE_compute_context_cache.hh" #include "BKE_compute_contexts.hh" #include "BKE_node_legacy_types.hh" #include "BKE_node_runtime.hh" @@ -32,6 +33,7 @@ struct SocketUsageInferencer { private: /** Owns e.g. intermediate evaluated values. */ ResourceScope scope_; + bke::ComputeContextCache compute_context_cache_; /** Root node tree. */ const bNodeTree &root_tree_; @@ -319,7 +321,7 @@ struct SocketUsageInferencer { /* The group node input is used if any of the matching group inputs within the group is * used. */ - const ComputeContext &group_context = scope_.construct( + const ComputeContext &group_context = compute_context_cache_.for_group_node( socket.context, *node, node->owner_tree()); Vector dependent_sockets; for (const bNode *group_input_node : group->group_input_nodes()) { @@ -612,7 +614,7 @@ struct SocketUsageInferencer { all_socket_values_.add_new(socket, nullptr); return; } - const ComputeContext &group_context = scope_.construct( + const ComputeContext &group_context = compute_context_cache_.for_group_node( socket.context, *node, node->owner_tree()); const SocketInContext socket_in_group{&group_context, &group_output_node->input_socket(socket->index())};