Geometry Nodes: add compute context cache

For various purposes we traverse the computation done by a node tree (e.g. for
gizmos and socket usage infeferencing). For that we generally have to keep track
of the compute context we're in at any given time. During the traversal, it's
common to enter and exist the same compute contexts multiple times. Currently,
we'd always build a new compute context when that happens. That happens even
though the old one is generally still around, because other data may reference
it. This patch implements a `ComputeContextHash` type that avoids rebuilding the
same compute contexts over and over again.

I'm considering to also replace the usage of `ComputeContextBuilder` with this
cache somehow, but will see how that works out.

The reason I'm working on this now is that I have to traverse the node tree a
bit again to find where closures might be evaluated. I wanted to be able to
cache the compute contexts for a while already.

Pull Request: https://projects.blender.org/blender/blender/pulls/137360
This commit is contained in:
Jacques Lucke
2025-04-11 21:36:41 +02:00
parent 3c51029ec7
commit aab2b6004b
7 changed files with 115 additions and 17 deletions

View File

@@ -0,0 +1,58 @@
/* SPDX-FileCopyrightText: 2025 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
#include "BKE_compute_contexts.hh"
#include "BLI_map.hh"
#include "BLI_vector.hh"
namespace blender::bke {
/**
* When traversing the computation of a node tree (like in `socket_usage_inference.cc` or
* `partial_eval.cc`) one often enters and exists the same compute contexts. The cache implemented
* here avoids re-creating the same compute contexts over and over again. While requiring less
* memory and having potentially better performance, it can also be used to ensure that the same
* compute context will always have the same pointer, even if it's created in two different places.
*
* Constructing compute contexts through this cache can also be a bit more convenient.
*/
class ComputeContextCache {
/** Allocator used to allocate the compute contexts. */
LinearAllocator<> allocator_;
/** The allocated computed contexts that need to be destructed in the end. */
Vector<destruct_ptr<ComputeContext>> cache_;
Map<std::pair<const ComputeContext *, StringRef>, const ModifierComputeContext *>
modifier_contexts_cache_;
Map<std::pair<const ComputeContext *, int32_t>, const GroupNodeComputeContext *>
group_node_contexts_cache_;
public:
const ModifierComputeContext &for_modifier(const ComputeContext *parent,
const NodesModifierData &nmd);
const ModifierComputeContext &for_modifier(const ComputeContext *parent,
StringRef modifier_name);
const GroupNodeComputeContext &for_group_node(const ComputeContext *parent, int32_t node_id);
const GroupNodeComputeContext &for_group_node(const ComputeContext *parent,
const bNode &caller_group_node,
const bNodeTree &caller_tree);
/**
* A fallback that does not use caching and can be used for any compute context.
* More constructors like the ones above can be added as they become necessary.
*/
template<typename T, typename... Args> const T &for_any_uncached(Args &&...args)
{
destruct_ptr<T> compute_context = allocator_.construct<T>(std::forward<Args>(args)...);
const T &result = *compute_context;
cache_.append(std::move(compute_context));
return result;
}
};
} // namespace blender::bke

View File

@@ -364,6 +364,7 @@ set(SRC
BKE_colorband.hh
BKE_colortools.hh
BKE_compositor.hh
BKE_compute_context_cache.hh
BKE_compute_contexts.hh
BKE_constraint.h
BKE_context.hh

View File

@@ -5,6 +5,7 @@
#include "DNA_modifier_types.h"
#include "DNA_node_types.h"
#include "BKE_compute_context_cache.hh"
#include "BKE_compute_contexts.hh"
#include <ostream>
@@ -206,4 +207,39 @@ void OperatorComputeContext::print_current_in_line(std::ostream &stream) const
stream << "Operator";
}
const ModifierComputeContext &ComputeContextCache::for_modifier(const ComputeContext *parent,
const NodesModifierData &nmd)
{
return *modifier_contexts_cache_.lookup_or_add_cb(
std::pair{parent, StringRef(nmd.modifier.name)},
[&]() { return &this->for_any_uncached<ModifierComputeContext>(parent, nmd); });
}
const ModifierComputeContext &ComputeContextCache::for_modifier(const ComputeContext *parent,
StringRef modifier_name)
{
return *modifier_contexts_cache_.lookup_or_add_cb(std::pair{parent, modifier_name}, [&]() {
return &this->for_any_uncached<ModifierComputeContext>(parent, modifier_name);
});
}
const GroupNodeComputeContext &ComputeContextCache::for_group_node(const ComputeContext *parent,
const int32_t node_id)
{
return *group_node_contexts_cache_.lookup_or_add_cb(std::pair{parent, node_id}, [&]() {
return &this->for_any_uncached<GroupNodeComputeContext>(parent, node_id);
});
}
const GroupNodeComputeContext &ComputeContextCache::for_group_node(const ComputeContext *parent,
const bNode &caller_group_node,
const bNodeTree &caller_tree)
{
return *group_node_contexts_cache_.lookup_or_add_cb(
std::pair{parent, caller_group_node.identifier}, [&]() {
return &this->for_any_uncached<GroupNodeComputeContext>(
parent, caller_group_node, caller_tree);
});
}
} // namespace blender::bke

View File

@@ -7,9 +7,10 @@
#include "NOD_node_in_compute_context.hh"
#include "BLI_function_ref.hh"
#include "BLI_resource_scope.hh"
#include "BLI_set.hh"
#include "BKE_compute_context_cache.hh"
#include "DNA_node_types.h"
/**
@@ -44,7 +45,7 @@ namespace blender::nodes::partial_eval {
*/
void eval_downstream(
const Span<SocketInContext> initial_sockets,
ResourceScope &scope,
bke::ComputeContextCache &compute_context_cache,
FunctionRef<void(const NodeInContext &ctx_node,
Vector<const bNodeSocket *> &r_outputs_to_propagate)> evaluate_node_fn,
FunctionRef<bool(const SocketInContext &ctx_from, const SocketInContext &ctx_to)>
@@ -81,7 +82,7 @@ struct UpstreamEvalTargets {
*/
UpstreamEvalTargets eval_upstream(
const Span<SocketInContext> initial_sockets,
ResourceScope &scope,
bke::ComputeContextCache &compute_context_cache,
FunctionRef<void(const NodeInContext &ctx_node,
Vector<const bNodeSocket *> &r_modified_inputs)> evaluate_node_fn,
FunctionRef<bool(const SocketInContext &ctx_from, const SocketInContext &ctx_to)>

View File

@@ -131,13 +131,13 @@ LocalInverseEvalTargets find_local_inverse_eval_targets(const bNodeTree &tree,
tree.ensure_topology_cache();
ResourceScope scope;
bke::ComputeContextCache compute_context_cache;
Map<SocketInContext, ElemVariant> elem_by_socket;
elem_by_socket.add({nullptr, initial_socket_elem.socket}, initial_socket_elem.elem);
const partial_eval::UpstreamEvalTargets upstream_eval_targets = partial_eval::eval_upstream(
{{nullptr, initial_socket_elem.socket}},
scope,
compute_context_cache,
/* Evaluate node. */
[&](const NodeInContext &ctx_node, Vector<const bNodeSocket *> &r_modified_inputs) {
evaluate_node_elem_upstream(ctx_node, r_modified_inputs, elem_by_socket);
@@ -282,7 +282,7 @@ void foreach_element_on_inverse_eval_path(
if (!initial_socket_elem.elem) {
return;
}
ResourceScope scope;
bke::ComputeContextCache compute_context_cache;
Map<SocketInContext, ElemVariant> upstream_elem_by_socket;
upstream_elem_by_socket.add({&initial_context, initial_socket_elem.socket},
initial_socket_elem.elem);
@@ -290,7 +290,7 @@ void foreach_element_on_inverse_eval_path(
/* In a first pass, propagate upstream to find the upstream targets. */
const partial_eval::UpstreamEvalTargets upstream_eval_targets = partial_eval::eval_upstream(
{{&initial_context, initial_socket_elem.socket}},
scope,
compute_context_cache,
/* Evaluate node. */
[&](const NodeInContext &ctx_node, Vector<const bNodeSocket *> &r_modified_inputs) {
evaluate_node_elem_upstream(ctx_node, r_modified_inputs, upstream_elem_by_socket);
@@ -329,7 +329,7 @@ void foreach_element_on_inverse_eval_path(
partial_eval::eval_downstream(
initial_downstream_evaluation_sockets,
scope,
compute_context_cache,
/* Evaluate node. */
[&](const NodeInContext &ctx_node, Vector<const bNodeSocket *> &r_outputs_to_propagate) {
evaluate_node_elem_downstream_filtered(
@@ -690,7 +690,7 @@ bool backpropagate_socket_values(bContext &C,
{
nmd.node_group->ensure_topology_cache();
ResourceScope scope;
bke::ComputeContextCache compute_context_cache;
Map<SocketInContext, SocketValueVariant> value_by_socket;
Vector<SocketInContext> initial_sockets;
@@ -726,7 +726,7 @@ bool backpropagate_socket_values(bContext &C,
/* Actually backpropagate the socket values as far as possible in the node tree. */
const partial_eval::UpstreamEvalTargets upstream_eval_targets = partial_eval::eval_upstream(
initial_sockets,
scope,
compute_context_cache,
/* Evaluate node. */
[&](const NodeInContext &ctx_node, Vector<const bNodeSocket *> &r_modified_inputs) {
backpropagate_socket_values_through_node(

View File

@@ -105,7 +105,7 @@ struct NodeInContextDownstreamComparator {
void eval_downstream(
const Span<SocketInContext> initial_sockets,
ResourceScope &scope,
bke::ComputeContextCache &compute_context_cache,
FunctionRef<void(const NodeInContext &ctx_node,
Vector<const bNodeSocket *> &r_outputs_to_propagate)> evaluate_node_fn,
FunctionRef<bool(const SocketInContext &ctx_from, const SocketInContext &ctx_to)>
@@ -135,7 +135,7 @@ void eval_downstream(
if (group_tree->has_available_link_cycle()) {
return;
}
const auto &group_context = scope.construct<bke::GroupNodeComputeContext>(
const auto &group_context = compute_context_cache.for_group_node(
ctx_group_node_input.context, node, node.owner_tree());
const int socket_index = ctx_group_node_input.socket->index();
/* Forward the value to every group input node. */
@@ -218,7 +218,7 @@ void eval_downstream(
if (!group_output) {
continue;
}
const ComputeContext &group_context = scope.construct<bke::GroupNodeComputeContext>(
const ComputeContext &group_context = compute_context_cache.for_group_node(
context, node, node.owner_tree());
/* Propagate the values from the group output node to the outputs of the group node and
* continue forwarding them from there. */
@@ -247,7 +247,7 @@ void eval_downstream(
UpstreamEvalTargets eval_upstream(
const Span<SocketInContext> initial_sockets,
ResourceScope &scope,
bke::ComputeContextCache &compute_context_cache,
FunctionRef<void(const NodeInContext &ctx_node,
Vector<const bNodeSocket *> &r_modified_inputs)> evaluate_node_fn,
FunctionRef<bool(const SocketInContext &ctx_from, const SocketInContext &ctx_to)>
@@ -284,7 +284,7 @@ UpstreamEvalTargets eval_upstream(
if (!group_output) {
return;
}
const ComputeContext &group_context = scope.construct<bke::GroupNodeComputeContext>(
const ComputeContext &group_context = compute_context_cache.for_group_node(
context, group_node, group_node.owner_tree());
propagate_value_fn(
ctx_output_socket,

View File

@@ -14,6 +14,7 @@
#include "DNA_material_types.h"
#include "DNA_node_types.h"
#include "BKE_compute_context_cache.hh"
#include "BKE_compute_contexts.hh"
#include "BKE_node_legacy_types.hh"
#include "BKE_node_runtime.hh"
@@ -32,6 +33,7 @@ struct SocketUsageInferencer {
private:
/** Owns e.g. intermediate evaluated values. */
ResourceScope scope_;
bke::ComputeContextCache compute_context_cache_;
/** Root node tree. */
const bNodeTree &root_tree_;
@@ -319,7 +321,7 @@ struct SocketUsageInferencer {
/* The group node input is used if any of the matching group inputs within the group is
* used. */
const ComputeContext &group_context = scope_.construct<bke::GroupNodeComputeContext>(
const ComputeContext &group_context = compute_context_cache_.for_group_node(
socket.context, *node, node->owner_tree());
Vector<const bNodeSocket *> dependent_sockets;
for (const bNode *group_input_node : group->group_input_nodes()) {
@@ -612,7 +614,7 @@ struct SocketUsageInferencer {
all_socket_values_.add_new(socket, nullptr);
return;
}
const ComputeContext &group_context = scope_.construct<bke::GroupNodeComputeContext>(
const ComputeContext &group_context = compute_context_cache_.for_group_node(
socket.context, *node, node->owner_tree());
const SocketInContext socket_in_group{&group_context,
&group_output_node->input_socket(socket->index())};