From a774ebd5af07baa0d5506d2339fe862be2272464 Mon Sep 17 00:00:00 2001 From: Hans Goudey Date: Wed, 1 Oct 2025 18:40:49 +0200 Subject: [PATCH] Geometry Nodes: Field to Grid Node The purpose of this node is to create a grid with new voxel values on the same grid topology as an existing grid. The new values are the result of field evaluation. Multiple grids can be created at the same time, so that intermediate results used for multiple grids can be efficiently reused during evaluation. This is more efficient than the "Volume Cube" node, for instance, because the output grid shares the sparseness of the input topology grid. Pull Request: https://projects.blender.org/blender/blender/pulls/147074 --- .../startup/bl_ui/node_add_menu_geometry.py | 1 + source/blender/blenkernel/BKE_volume_grid.hh | 4 + .../blenkernel/BKE_volume_grid_process.hh | 119 +++++ source/blender/blenkernel/CMakeLists.txt | 1 + .../blender/blenkernel/intern/volume_grid.cc | 253 +++++++++- source/blender/makesdna/DNA_node_types.h | 18 + .../blender/makesrna/intern/rna_nodetree.cc | 90 ++++ source/blender/nodes/geometry/CMakeLists.txt | 2 + .../geometry/include/NOD_geo_field_to_grid.hh | 95 ++++ .../geometry/nodes/node_geo_field_to_grid.cc | 445 ++++++++++++++++++ .../nodes/intern/volume_grid_function_eval.cc | 306 ++---------- 11 files changed, 1053 insertions(+), 281 deletions(-) create mode 100644 source/blender/blenkernel/BKE_volume_grid_process.hh create mode 100644 source/blender/nodes/geometry/include/NOD_geo_field_to_grid.hh create mode 100644 source/blender/nodes/geometry/nodes/node_geo_field_to_grid.cc diff --git a/scripts/startup/bl_ui/node_add_menu_geometry.py b/scripts/startup/bl_ui/node_add_menu_geometry.py index d522f1f3f21..786fc644355 100644 --- a/scripts/startup/bl_ui/node_add_menu_geometry.py +++ b/scripts/startup/bl_ui/node_add_menu_geometry.py @@ -942,6 +942,7 @@ class NODE_MT_gn_volume_operations_base(node_add_menu.NodeMenu): if context.preferences.experimental.use_new_volume_nodes: self.node_operator(layout, "GeometryNodeGridToMesh") self.node_operator(layout, "GeometryNodeSDFGridBoolean") + self.node_operator(layout, "GeometryNodeFieldToGrid") self.draw_assets_for_catalog(layout, self.menu_path) diff --git a/source/blender/blenkernel/BKE_volume_grid.hh b/source/blender/blenkernel/BKE_volume_grid.hh index 2a6bb39981e..6306ea79735 100644 --- a/source/blender/blenkernel/BKE_volume_grid.hh +++ b/source/blender/blenkernel/BKE_volume_grid.hh @@ -368,6 +368,10 @@ template class VolumeGrid : public GVolumeGrid { void assert_correct_type() const; }; +/** + * Get the volume grid type based on the tree's type. + */ +VolumeGridType get_type(const openvdb::tree::TreeBase &tree); /** * Get the volume grid type based on the tree type in the grid. */ diff --git a/source/blender/blenkernel/BKE_volume_grid_process.hh b/source/blender/blenkernel/BKE_volume_grid_process.hh new file mode 100644 index 00000000000..ba6476f2a8a --- /dev/null +++ b/source/blender/blenkernel/BKE_volume_grid_process.hh @@ -0,0 +1,119 @@ +/* SPDX-FileCopyrightText: 2023 Blender Foundation + * + * SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +/** \file + * \ingroup bke + */ + +#ifdef WITH_OPENVDB + +# include + +# include "BKE_volume_grid.hh" +# include "BKE_volume_openvdb.hh" + +# include "BLI_function_ref.hh" +# include "BLI_generic_pointer.hh" +# include "BLI_generic_span.hh" +# include "BLI_index_mask_fwd.hh" + +namespace blender::bke::volume_grid { + +using LeafNodeMask = openvdb::util::NodeMask<3u>; + +using GetVoxelsFn = FunctionRef r_voxels)>; +/** + * Process voxels within a single leaf node. #get_voxels_fn is a mechanism to lazily create the + * actual voxel coordinates (sometimes that isn't necessary). + */ +using ProcessLeafFn = FunctionRef; +/** + * Process multiple sparse tiles. + */ +using ProcessTilesFn = FunctionRef tiles)>; +/** + * Process voxels from potentially multiple leaf nodes. This is use to efficiently process voxels + * across multiple leaf nodes with fewer active voxels. + */ +using ProcessVoxelsFn = FunctionRef voxels)>; + +/** + * Splits up the work of processing different parts of the topology into multiple tasks, with + * callbacks for each type of task called in parallel. + */ +void parallel_grid_topology_tasks(const openvdb::MaskTree &mask_tree, + ProcessLeafFn process_leaf_fn, + ProcessVoxelsFn process_voxels_fn, + ProcessTilesFn process_tiles_fn); + +template +constexpr bool is_supported_grid_type = is_same_any_v; + +template inline void to_typed_grid(const openvdb::GridBase &grid_base, Fn &&fn) +{ + const VolumeGridType grid_type = get_type(grid_base); + BKE_volume_grid_type_to_static_type(grid_type, [&](auto type_tag) { + using GridT = typename decltype(type_tag)::type; + if constexpr (is_supported_grid_type) { + fn(static_cast(grid_base)); + } + else { + BLI_assert_unreachable(); + } + }); +} + +template inline void to_typed_grid(openvdb::GridBase &grid_base, Fn &&fn) +{ + const VolumeGridType grid_type = get_type(grid_base); + BKE_volume_grid_type_to_static_type(grid_type, [&](auto type_tag) { + using GridT = typename decltype(type_tag)::type; + if constexpr (is_supported_grid_type) { + fn(static_cast(grid_base)); + } + else { + BLI_assert_unreachable(); + } + }); +} + +/** Create a grid with the same activated voxels and internal nodes as the given grid. */ +openvdb::GridBase::Ptr create_grid_with_topology(const openvdb::MaskTree &topology, + const openvdb::math::Transform &transform, + const VolumeGridType grid_type); + +/** Set values for the given voxels in the grid. They must already be activated. */ +void set_grid_values(openvdb::GridBase &grid_base, GSpan values, Span voxels); + +/** + * Set values for the given tiles in the grid. The tiles must be activated, but not to deeper + * levels beyond the tile. + */ +void set_tile_values(openvdb::GridBase &grid_base, GSpan values, Span tiles); + +/** + * Boolean grids are stored as bitmaps, but we often have to process arrays of booleans. This + * utility sets the bitmap values based on the boolean array. + */ +void set_mask_leaf_buffer_from_bools(openvdb::BoolGrid &grid, + Span values, + const IndexMask &index_mask, + Span voxels); + +void set_grid_background(openvdb::GridBase &grid_base, const GPointer value); + +} // namespace blender::bke::volume_grid + +/** \} */ + +#endif /* WITH_OPENVDB */ diff --git a/source/blender/blenkernel/CMakeLists.txt b/source/blender/blenkernel/CMakeLists.txt index 797cbdbb074..652af171d5e 100644 --- a/source/blender/blenkernel/CMakeLists.txt +++ b/source/blender/blenkernel/CMakeLists.txt @@ -534,6 +534,7 @@ set(SRC BKE_volume_grid_fields.hh BKE_volume_grid_file_cache.hh BKE_volume_grid_fwd.hh + BKE_volume_grid_process.hh BKE_volume_grid_type_traits.hh BKE_volume_openvdb.hh BKE_volume_render.hh diff --git a/source/blender/blenkernel/intern/volume_grid.cc b/source/blender/blenkernel/intern/volume_grid.cc index 7e1d8a08f7a..efb939bb35e 100644 --- a/source/blender/blenkernel/intern/volume_grid.cc +++ b/source/blender/blenkernel/intern/volume_grid.cc @@ -3,8 +3,10 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ #include "BKE_volume_grid.hh" +#include "BKE_volume_grid_process.hh" #include "BKE_volume_openvdb.hh" +#include "BLI_index_mask.hh" #include "BLI_memory_counter.hh" #include "BLI_task.hh" @@ -324,41 +326,46 @@ VolumeGridData &GVolumeGrid::get_for_write() return const_cast(*data_); } -VolumeGridType get_type(const openvdb::GridBase &grid) +VolumeGridType get_type(const openvdb::TreeBase &tree) { - if (grid.isType()) { + if (tree.isType()) { return VOLUME_GRID_FLOAT; } - if (grid.isType()) { + if (tree.isType()) { return VOLUME_GRID_VECTOR_FLOAT; } - if (grid.isType()) { + if (tree.isType()) { return VOLUME_GRID_BOOLEAN; } - if (grid.isType()) { + if (tree.isType()) { return VOLUME_GRID_DOUBLE; } - if (grid.isType()) { + if (tree.isType()) { return VOLUME_GRID_INT; } - if (grid.isType()) { + if (tree.isType()) { return VOLUME_GRID_INT64; } - if (grid.isType()) { + if (tree.isType()) { return VOLUME_GRID_VECTOR_INT; } - if (grid.isType()) { + if (tree.isType()) { return VOLUME_GRID_VECTOR_DOUBLE; } - if (grid.isType()) { + if (tree.isType()) { return VOLUME_GRID_MASK; } - if (grid.isType()) { + if (tree.isType()) { return VOLUME_GRID_POINTS; } return VOLUME_GRID_UNKNOWN; } +VolumeGridType get_type(const openvdb::GridBase &grid) +{ + return get_type(grid.baseTree()); +} + ImplicitSharingPtr<> OpenvdbTreeSharingInfo::make(std::shared_ptr tree) { return ImplicitSharingPtr<>{MEM_new(__func__, std::move(tree))}; @@ -502,4 +509,228 @@ std::string error_message_from_load(const VolumeGridData &grid) #endif } +/** + * Call #process_leaf_fn on the leaf node if it has a certain minimum number of active voxels. If + * there are only a few active voxels, gather those in #r_coords for later batch processing. + */ +template +static void parallel_grid_topology_tasks_leaf_node(const LeafNodeT &node, + const ProcessLeafFn process_leaf_fn, + Vector &r_coords) +{ + using NodeMaskT = typename LeafNodeT::NodeMaskType; + + const int on_count = node.onVoxelCount(); + /* This number is somewhat arbitrary. 64 is a 1/8th of the number of voxels in a standard leaf + * which is 8x8x8. It's a trade-off between benefiting from the better performance of + * leaf-processing vs. processing more voxels in a batch. */ + const int on_count_threshold = 64; + if (on_count <= on_count_threshold) { + /* The leaf contains only a few active voxels. It's beneficial to process them in a batch with + * active voxels from other leafs. So only gather them here for later processing. */ + for (auto value_iter = node.cbeginValueOn(); value_iter.test(); ++value_iter) { + const openvdb::Coord coord = value_iter.getCoord(); + r_coords.append(coord); + } + return; + } + /* Process entire leaf at once. This is especially beneficial when very many of the voxels in + * the leaf are active. In that case, one can work on the openvdb arrays stored in the leafs + * directly. */ + const NodeMaskT &value_mask = node.getValueMask(); + const openvdb::CoordBBox bbox = node.getNodeBoundingBox(); + process_leaf_fn(value_mask, bbox, [&](MutableSpan r_voxels) { + for (auto value_iter = node.cbeginValueOn(); value_iter.test(); ++value_iter) { + r_voxels[value_iter.pos()] = value_iter.getCoord(); + } + }); +} + +/** + * Calls the process functions on all the active tiles and voxels within the given internal node. + */ +template +static void parallel_grid_topology_tasks_internal_node(const InternalNodeT &node, + const ProcessLeafFn process_leaf_fn, + const ProcessVoxelsFn process_voxels_fn, + const ProcessTilesFn process_tiles_fn) +{ + using ChildNodeT = typename InternalNodeT::ChildNodeType; + using LeafNodeT = typename InternalNodeT::LeafNodeType; + using NodeMaskT = typename InternalNodeT::NodeMaskType; + using UnionT = typename InternalNodeT::UnionType; + + /* Gather the active sub-nodes first, to be able to parallelize over them more easily. */ + const NodeMaskT &child_mask = node.getChildMask(); + const UnionT *table = node.getTable(); + Vector child_indices; + for (auto child_mask_iter = child_mask.beginOn(); child_mask_iter.test(); ++child_mask_iter) { + child_indices.append(child_mask_iter.pos()); + } + + threading::parallel_for(child_indices.index_range(), 8, [&](const IndexRange range) { + /* Voxels collected from potentially multiple leaf nodes to be processed in one batch. This + * inline buffer size is sufficient to avoid an allocation in all cases (a single standard leaf + * has 512 voxels). */ + Vector gathered_voxels; + for (const int child_index : child_indices.as_span().slice(range)) { + const ChildNodeT &child = *table[child_index].getChild(); + if constexpr (std::is_same_v) { + parallel_grid_topology_tasks_leaf_node(child, process_leaf_fn, gathered_voxels); + /* If enough voxels have been gathered, process them in one batch. */ + if (gathered_voxels.size() >= 512) { + process_voxels_fn(gathered_voxels); + gathered_voxels.clear(); + } + } + else { + /* Recurse into lower-level internal nodes. */ + parallel_grid_topology_tasks_internal_node( + child, process_leaf_fn, process_voxels_fn, process_tiles_fn); + } + } + /* Process any remaining voxels. */ + if (!gathered_voxels.is_empty()) { + process_voxels_fn(gathered_voxels); + gathered_voxels.clear(); + } + }); + + /* Process the active tiles within the internal node. Note that these are not processed above + * already because there only sub-nodes are handled, but tiles are "inlined" into internal nodes. + * All tiles are first gathered and then processed in one batch. */ + const NodeMaskT &value_mask = node.getValueMask(); + Vector tile_bboxes; + for (auto value_mask_iter = value_mask.beginOn(); value_mask_iter.test(); ++value_mask_iter) { + const openvdb::Index32 index = value_mask_iter.pos(); + const openvdb::Coord tile_origin = node.offsetToGlobalCoord(index); + const openvdb::CoordBBox tile_bbox = openvdb::CoordBBox::createCube(tile_origin, + ChildNodeT::DIM); + tile_bboxes.append(tile_bbox); + } + if (!tile_bboxes.is_empty()) { + process_tiles_fn(tile_bboxes); + } +} + +/* Call the process functions on all active tiles and voxels in the given tree. */ +void parallel_grid_topology_tasks(const openvdb::MaskTree &mask_tree, + const ProcessLeafFn process_leaf_fn, + const ProcessVoxelsFn process_voxels_fn, + const ProcessTilesFn process_tiles_fn) +{ + /* Iterate over the root internal nodes. */ + for (auto root_child_iter = mask_tree.cbeginRootChildren(); root_child_iter.test(); + ++root_child_iter) + { + const auto &internal_node = *root_child_iter; + parallel_grid_topology_tasks_internal_node( + internal_node, process_leaf_fn, process_voxels_fn, process_tiles_fn); + } +} + +openvdb::GridBase::Ptr create_grid_with_topology(const openvdb::MaskTree &topology, + const openvdb::math::Transform &transform, + const VolumeGridType grid_type) +{ + openvdb::GridBase::Ptr grid; + BKE_volume_grid_type_to_static_type(grid_type, [&](auto type_tag) { + using GridT = typename decltype(type_tag)::type; + using TreeT = typename GridT::TreeType; + using ValueType = typename TreeT::ValueType; + const ValueType background{}; + auto tree = std::make_shared(topology, background, openvdb::TopologyCopy()); + grid = openvdb::createGrid(std::move(tree)); + grid->setTransform(transform.copy()); + }); + return grid; +} + +void set_grid_values(openvdb::GridBase &grid_base, + const GSpan values, + const Span voxels) +{ + BLI_assert(values.size() == voxels.size()); + to_typed_grid(grid_base, [&](auto &grid) { + using GridT = std::decay_t; + using ValueType = typename GridT::ValueType; + const ValueType *data = static_cast(values.data()); + + auto accessor = grid.getUnsafeAccessor(); + for (const int64_t i : voxels.index_range()) { + accessor.setValue(voxels[i], data[i]); + } + }); +} + +void set_tile_values(openvdb::GridBase &grid_base, + const GSpan values, + const Span tiles) +{ + BLI_assert(values.size() == tiles.size()); + to_typed_grid(grid_base, [&](auto &grid) { + using GridT = typename std::decay_t; + using TreeT = typename GridT::TreeType; + using ValueType = typename GridT::ValueType; + auto &tree = grid.tree(); + + const ValueType *computed_values = static_cast(values.data()); + + const auto set_tile_value = [&](auto &node, const openvdb::Coord &coord_in_tile, auto value) { + const openvdb::Index n = node.coordToOffset(coord_in_tile); + BLI_assert(node.isChildMaskOff(n)); + /* TODO: Figure out how to do this without const_cast, although the same is done in + * `openvdb_ax/openvdb_ax/compiler/VolumeExecutable.cc` which has a similar purpose. + * It seems like OpenVDB generally allows that, but it does not have a proper public + * API for this yet. */ + using UnionType = typename std::decay_t::UnionType; + auto *table = const_cast(node.getTable()); + table[n].setValue(value); + }; + + for (const int i : tiles.index_range()) { + const openvdb::CoordBBox tile = tiles[i]; + const openvdb::Coord coord_in_tile = tile.min(); + const auto &computed_value = computed_values[i]; + using InternalNode1 = typename TreeT::RootNodeType::ChildNodeType; + using InternalNode2 = typename InternalNode1::ChildNodeType; + /* Find the internal node that contains the tile and update the value in there. */ + if (auto *node = tree.template probeNode(coord_in_tile)) { + set_tile_value(*node, coord_in_tile, computed_value); + } + else if (auto *node = tree.template probeNode(coord_in_tile)) { + set_tile_value(*node, coord_in_tile, computed_value); + } + else { + BLI_assert_unreachable(); + } + } + }); +} + +void set_mask_leaf_buffer_from_bools(openvdb::BoolGrid &grid, + const Span values, + const IndexMask &index_mask, + const Span voxels) +{ + auto accessor = grid.getUnsafeAccessor(); + /* Could probably use int16_t for the iteration index. Double check this. */ + index_mask.foreach_index_optimized([&](const int i) { + const openvdb::Coord &coord = voxels[i]; + accessor.setValue(coord, values[i]); + }); +} + +void set_grid_background(openvdb::GridBase &grid_base, const GPointer value) +{ + to_typed_grid(grid_base, [&](auto &grid) { + using GridT = std::decay_t; + using ValueType = typename GridT::ValueType; + auto &tree = grid.tree(); + + BLI_assert(value.type()->size == sizeof(ValueType)); + tree.root().setBackground(*static_cast(value.get()), true); + }); +} + } // namespace blender::bke::volume_grid diff --git a/source/blender/makesdna/DNA_node_types.h b/source/blender/makesdna/DNA_node_types.h index fb8b3e64079..e5a857694f1 100644 --- a/source/blender/makesdna/DNA_node_types.h +++ b/source/blender/makesdna/DNA_node_types.h @@ -2408,6 +2408,24 @@ typedef struct NodeIndexSwitch { #endif } NodeIndexSwitch; +typedef struct GeometryNodeFieldToGridItem { + /** #eNodeSocketDatatype. */ + int8_t data_type; + char _pad[3]; + int identifier; + char *name; +} GeometryNodeFieldToGridItem; + +typedef struct GeometryNodeFieldToGrid { + /** #eNodeSocketDatatype. */ + int8_t data_type; + char _pad[3]; + int next_identifier; + GeometryNodeFieldToGridItem *items; + int items_num; + int active_index; +} GeometryNodeFieldToGrid; + typedef struct NodeGeometryDistributePointsInVolume { /** #GeometryNodePointDistributeVolumeMode. */ uint8_t mode; diff --git a/source/blender/makesrna/intern/rna_nodetree.cc b/source/blender/makesrna/intern/rna_nodetree.cc index 5bc5a6b3c28..c75e66402ca 100644 --- a/source/blender/makesrna/intern/rna_nodetree.cc +++ b/source/blender/makesrna/intern/rna_nodetree.cc @@ -651,6 +651,7 @@ static const EnumPropertyItem node_cryptomatte_layer_name_items[] = { # include "NOD_geo_bundle.hh" # include "NOD_geo_capture_attribute.hh" # include "NOD_geo_closure.hh" +# include "NOD_geo_field_to_grid.hh" # include "NOD_geo_foreach_geometry_element.hh" # include "NOD_geo_index_switch.hh" # include "NOD_geo_menu_switch.hh" @@ -684,6 +685,7 @@ using blender::nodes::ClosureOutputItemsAccessor; using blender::nodes::CombineBundleItemsAccessor; using blender::nodes::EvaluateClosureInputItemsAccessor; using blender::nodes::EvaluateClosureOutputItemsAccessor; +using blender::nodes::FieldToGridItemsAccessor; using blender::nodes::FileOutputItemsAccessor; using blender::nodes::ForeachGeometryElementGenerationItemsAccessor; using blender::nodes::ForeachGeometryElementInputItemsAccessor; @@ -3845,6 +3847,19 @@ static IndexSwitchItem *rna_NodeIndexSwitchItems_new(ID *id, bNode *node, Main * return new_item; } +/* The same as #grid_socket_type_items_filter_fn. */ +static const EnumPropertyItem *rna_NodeFieldToGridItem_data_type_itemf(bContext * /*C*/, + PointerRNA * /*ptr*/, + PropertyRNA * /*prop*/, + bool *r_free) +{ + *r_free = true; + return itemf_function_check( + rna_enum_node_socket_data_type_items, [](const EnumPropertyItem *item) { + return blender::nodes::socket_type_supports_grids(eNodeSocketDatatype(item->value)); + }); +} + static const EnumPropertyItem *rna_NodeGeometryCaptureAttributeItem_data_type_itemf( bContext * /*C*/, PointerRNA * /*ptr*/, PropertyRNA * /*prop*/, bool *r_free) { @@ -8098,6 +8113,80 @@ static void def_geo_index_switch(BlenderRNA *brna, StructRNA *srna) RNA_def_property_srna(prop, "NodeIndexSwitchItems"); } +static void rna_def_geo_field_to_grid_item(BlenderRNA *brna) +{ + PropertyRNA *prop; + + StructRNA *srna = RNA_def_struct(brna, "GeometryNodeFieldToGridItem", nullptr); + RNA_def_struct_ui_text(srna, "Field to Grid Item", ""); + RNA_def_struct_sdna(srna, "GeometryNodeFieldToGridItem"); + + rna_def_node_item_array_socket_item_common(srna, "FieldToGridItemsAccessor", false); + prop = RNA_def_property(srna, "data_type", PROP_ENUM, PROP_NONE); + RNA_def_property_enum_items(prop, rna_enum_node_socket_data_type_items); + RNA_def_property_enum_funcs(prop, nullptr, nullptr, "rna_NodeFieldToGridItem_data_type_itemf"); + RNA_def_property_ui_text(prop, "Data Type", ""); + RNA_def_property_clear_flag(prop, PROP_ANIMATABLE); + RNA_def_property_update( + prop, NC_NODE | NA_EDITED, "rna_Node_ItemArray_item_update"); + + prop = RNA_def_property(srna, "identifier", PROP_INT, PROP_NONE); + RNA_def_property_clear_flag(prop, PROP_EDITABLE); +} + +static void rna_def_geo_field_to_grid_items(BlenderRNA *brna) +{ + StructRNA *srna = RNA_def_struct(brna, "GeometryNodeFieldToGridItems", nullptr); + RNA_def_struct_ui_text(srna, "Items", "Collection of field to grid items"); + RNA_def_struct_sdna(srna, "bNode"); + + rna_def_node_item_array_new_with_socket_and_name( + srna, "GeometryNodeFieldToGridItem", "FieldToGridItemsAccessor"); + rna_def_node_item_array_common_functions( + srna, "GeometryNodeFieldToGridItem", "FieldToGridItemsAccessor"); +} + +static void def_geo_field_to_grid(BlenderRNA *brna, StructRNA *srna) +{ + PropertyRNA *prop; + + rna_def_geo_field_to_grid_item(brna); + rna_def_geo_field_to_grid_items(brna); + + RNA_def_struct_sdna_from(srna, "GeometryNodeFieldToGrid", "storage"); + + prop = RNA_def_property(srna, "grid_items", PROP_COLLECTION, PROP_NONE); + RNA_def_property_collection_sdna(prop, nullptr, "items", "items_num"); + RNA_def_property_struct_type(prop, "GeometryNodeFieldToGridItem"); + RNA_def_property_ui_text(prop, "Items", ""); + RNA_def_property_srna(prop, "GeometryNodeFieldToGridItems"); + + prop = RNA_def_property(srna, "active_index", PROP_INT, PROP_UNSIGNED); + RNA_def_property_int_sdna(prop, nullptr, "active_index"); + RNA_def_property_ui_text(prop, "Active Item Index", "Index of the active item"); + RNA_def_property_clear_flag(prop, PROP_ANIMATABLE); + RNA_def_property_flag(prop, PROP_NO_DEG_UPDATE); + RNA_def_property_update(prop, NC_NODE, nullptr); + + prop = RNA_def_property(srna, "active_item", PROP_POINTER, PROP_NONE); + RNA_def_property_struct_type(prop, "RepeatItem"); + RNA_def_property_pointer_funcs(prop, + "rna_Node_ItemArray_active_get", + "rna_Node_ItemArray_active_set", + nullptr, + nullptr); + RNA_def_property_flag(prop, PROP_EDITABLE | PROP_NO_DEG_UPDATE); + RNA_def_property_ui_text(prop, "Active Item Index", "Index of the active item"); + RNA_def_property_update(prop, NC_NODE, nullptr); + + prop = RNA_def_property(srna, "data_type", PROP_ENUM, PROP_NONE); + RNA_def_property_enum_items(prop, rna_enum_node_socket_data_type_items); + RNA_def_property_enum_funcs(prop, nullptr, nullptr, "rna_NodeFieldToGridItem_data_type_itemf"); + RNA_def_property_ui_text(prop, "Data Type", "Data type for topology grid"); + RNA_def_property_clear_flag(prop, PROP_ANIMATABLE); + RNA_def_property_update(prop, NC_NODE | NA_EDITED, "rna_Node_socket_update"); +} + static void rna_def_fn_format_string_item(BlenderRNA *brna) { StructRNA *srna; @@ -10063,6 +10152,7 @@ static void rna_def_nodes(BlenderRNA *brna) define("GeometryNode", "GeometryNodeFieldAverage"); define("GeometryNode", "GeometryNodeFieldMinAndMax"); define("GeometryNode", "GeometryNodeFieldOnDomain"); + define("GeometryNode", "GeometryNodeFieldToGrid", def_geo_field_to_grid); define("GeometryNode", "GeometryNodeFieldVariance"); define("GeometryNode", "GeometryNodeFillCurve"); define("GeometryNode", "GeometryNodeFilletCurve"); diff --git a/source/blender/nodes/geometry/CMakeLists.txt b/source/blender/nodes/geometry/CMakeLists.txt index b178b6e0f37..af7e8298685 100644 --- a/source/blender/nodes/geometry/CMakeLists.txt +++ b/source/blender/nodes/geometry/CMakeLists.txt @@ -83,6 +83,7 @@ set(SRC nodes/node_geo_evaluate_at_index.cc nodes/node_geo_evaluate_closure.cc nodes/node_geo_evaluate_on_domain.cc + nodes/node_geo_field_to_grid.cc nodes/node_geo_extrude_mesh.cc nodes/node_geo_field_average.cc nodes/node_geo_field_min_and_max.cc @@ -255,6 +256,7 @@ set(SRC include/NOD_geo_bundle.hh include/NOD_geo_capture_attribute.hh include/NOD_geo_closure.hh + include/NOD_geo_field_to_grid.hh include/NOD_geo_foreach_geometry_element.hh include/NOD_geo_index_switch.hh include/NOD_geo_menu_switch.hh diff --git a/source/blender/nodes/geometry/include/NOD_geo_field_to_grid.hh b/source/blender/nodes/geometry/include/NOD_geo_field_to_grid.hh new file mode 100644 index 00000000000..738a1c4bcf6 --- /dev/null +++ b/source/blender/nodes/geometry/include/NOD_geo_field_to_grid.hh @@ -0,0 +1,95 @@ +/* SPDX-FileCopyrightText: 2025 Blender Authors + * + * SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +#include "DNA_node_types.h" + +#include "NOD_socket_items.hh" + +namespace blender::nodes { + +/** + * Makes it possible to use various functions (e.g. the ones in `NOD_socket_items.hh`) for field + * to grid items. + */ +struct FieldToGridItemsAccessor : public socket_items::SocketItemsAccessorDefaults { + using ItemT = GeometryNodeFieldToGridItem; + static StructRNA *item_srna; + static int node_type; + static constexpr StringRefNull node_idname = "GeometryNodeFieldToGrid"; + static constexpr bool has_type = true; + static constexpr bool has_name = true; + static constexpr bool has_single_identifier_str = false; + struct operator_idnames { + static constexpr StringRefNull add_item = "NODE_OT_field_to_grid_item_add"; + static constexpr StringRefNull remove_item = "NODE_OT_field_to_grid_item_remove"; + static constexpr StringRefNull move_item = "NODE_OT_field_to_grid_item_move"; + }; + struct ui_idnames { + static constexpr StringRefNull list = "NODE_UL_field_to_grid_items"; + }; + struct rna_names { + static constexpr StringRefNull items = "grid_items"; + static constexpr StringRefNull active_index = "active_index"; + }; + + static socket_items::SocketItemsRef get_items_from_node(bNode &node) + { + auto &storage = *static_cast(node.storage); + return {&storage.items, &storage.items_num, &storage.active_index}; + } + + static void copy_item(const GeometryNodeFieldToGridItem &src, GeometryNodeFieldToGridItem &dst) + { + dst = src; + dst.name = BLI_strdup_null(dst.name); + } + + static void destruct_item(GeometryNodeFieldToGridItem *item) + { + MEM_SAFE_FREE(item->name); + } + + static void blend_write_item(BlendWriter *writer, const ItemT &item); + static void blend_read_data_item(BlendDataReader *reader, ItemT &item); + + static eNodeSocketDatatype get_socket_type(const ItemT &item) + { + return eNodeSocketDatatype(item.data_type); + } + + static bool supports_socket_type(const eNodeSocketDatatype socket_type, const int /*ntree_type*/) + { + return socket_type_supports_grids(socket_type); + } + + static char **get_name(GeometryNodeFieldToGridItem &item) + { + return &item.name; + } + + static void init_with_socket_type_and_name(bNode &node, + GeometryNodeFieldToGridItem &item, + const eNodeSocketDatatype socket_type, + const char *name) + { + auto *storage = static_cast(node.storage); + item.data_type = socket_type; + item.identifier = storage->next_identifier++; + socket_items::set_item_name_and_make_unique(node, item, name); + } + + static std::string input_socket_identifier_for_item(const GeometryNodeFieldToGridItem &item) + { + return "Field_" + std::to_string(item.identifier); + } + + static std::string output_socket_identifier_for_item(const GeometryNodeFieldToGridItem &item) + { + return "Grid_" + std::to_string(item.identifier); + } +}; + +} // namespace blender::nodes diff --git a/source/blender/nodes/geometry/nodes/node_geo_field_to_grid.cc b/source/blender/nodes/geometry/nodes/node_geo_field_to_grid.cc new file mode 100644 index 00000000000..8a92560714d --- /dev/null +++ b/source/blender/nodes/geometry/nodes/node_geo_field_to_grid.cc @@ -0,0 +1,445 @@ +/* SPDX-FileCopyrightText: 2025 Blender Authors + * + * SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "NOD_socket_search_link.hh" +#include "node_geometry_util.hh" + +#include "NOD_geo_field_to_grid.hh" +#include "NOD_socket_items_blend.hh" +#include "NOD_socket_items_ops.hh" +#include "NOD_socket_items_ui.hh" + +#include "UI_interface_layout.hh" +#include "UI_resources.hh" + +#include "RNA_enum_types.hh" +#include "RNA_prototypes.hh" + +#include "BLO_read_write.hh" + +#ifdef WITH_OPENVDB +# include "BKE_volume_grid_fields.hh" +# include "BKE_volume_grid_process.hh" +#endif + +namespace blender::nodes::node_geo_field_to_grid_cc { + +NODE_STORAGE_FUNCS(GeometryNodeFieldToGrid) +using ItemsAccessor = FieldToGridItemsAccessor; + +namespace grid = bke::volume_grid; + +static void node_declare(NodeDeclarationBuilder &b) +{ + b.use_custom_socket_order(); + b.allow_any_socket_order(); + b.add_default_layout(); + + const bNode *node = b.node_or_null(); + if (!node) { + return; + } + const GeometryNodeFieldToGrid &storage = node_storage(*node); + const eNodeSocketDatatype data_type = eNodeSocketDatatype(storage.data_type); + + b.add_input(data_type, "Topology").structure_type(StructureType::Grid); + + const Span items(storage.items, storage.items_num); + for (const int i : items.index_range()) { + const GeometryNodeFieldToGridItem &item = items[i]; + const eNodeSocketDatatype data_type = eNodeSocketDatatype(item.data_type); + const std::string input_identifier = ItemsAccessor::input_socket_identifier_for_item(item); + const std::string output_identifier = ItemsAccessor::output_socket_identifier_for_item(item); + + b.add_input(data_type, item.name, input_identifier).supports_field(); + b.add_output(data_type, item.name, output_identifier) + .structure_type(StructureType::Grid) + .align_with_previous() + .description("Output grid with evaluated field values"); + } + + b.add_input("", "__extend__").structure_type(StructureType::Field); + b.add_output("", "__extend__") + .structure_type(StructureType::Grid) + .align_with_previous(); +} + +static void node_layout(uiLayout *layout, bContext * /*C*/, PointerRNA *ptr) +{ + layout->prop(ptr, "data_type", UI_ITEM_NONE, "", ICON_NONE); +} + +static void node_layout_ex(uiLayout *layout, bContext *C, PointerRNA *ptr) +{ + bNodeTree &tree = *reinterpret_cast(ptr->owner_id); + bNode &node = *static_cast(ptr->data); + if (uiLayout *panel = layout->panel(C, "field_to_grid_items", false, IFACE_("Fields"))) { + socket_items::ui::draw_items_list_with_operators(C, panel, tree, node); + socket_items::ui::draw_active_item_props(tree, node, [&](PointerRNA *item_ptr) { + panel->use_property_split_set(true); + panel->use_property_decorate_set(false); + panel->prop(item_ptr, "data_type", UI_ITEM_NONE, std::nullopt, ICON_NONE); + }); + } +} + +static std::optional node_type_for_socket_type(const bNodeSocket &socket) +{ + switch (socket.type) { + case SOCK_FLOAT: + return SOCK_FLOAT; + case SOCK_BOOLEAN: + return SOCK_BOOLEAN; + case SOCK_INT: + return SOCK_INT; + case SOCK_VECTOR: + case SOCK_RGBA: + return SOCK_VECTOR; + default: + return std::nullopt; + } +} + +static void node_gather_link_search_ops(GatherLinkSearchOpParams ¶ms) +{ + if (!USER_EXPERIMENTAL_TEST(&U, use_new_volume_nodes)) { + return; + } + const std::optional data_type = node_type_for_socket_type( + params.other_socket()); + if (!data_type) { + return; + } + if (params.in_out() == SOCK_IN) { + params.add_item(IFACE_("Topology"), [data_type](LinkSearchOpParams ¶ms) { + bNode &node = params.add_node("GeometryNodeFieldToGrid"); + node_storage(node).data_type = *data_type; + params.update_and_connect_available_socket(node, "Topology"); + }); + params.add_item(IFACE_("Field"), [data_type](LinkSearchOpParams ¶ms) { + bNode &node = params.add_node("GeometryNodeFieldToGrid"); + socket_items::add_item_with_socket_type_and_name( + params.node_tree, node, *data_type, params.socket.name); + params.update_and_connect_available_socket(node, params.socket.name); + }); + } + else { + params.add_item(IFACE_("Grid"), [data_type](LinkSearchOpParams ¶ms) { + bNode &node = params.add_node("GeometryNodeFieldToGrid"); + socket_items::add_item_with_socket_type_and_name( + params.node_tree, node, *data_type, params.socket.name); + params.update_and_connect_available_socket(node, params.socket.name); + }); + } +} + +#ifdef WITH_OPENVDB +BLI_NOINLINE static void process_leaf_node(const Span fields, + const openvdb::math::Transform &transform, + const grid::LeafNodeMask &leaf_node_mask, + const openvdb::CoordBBox &leaf_bbox, + const grid::GetVoxelsFn get_voxels_fn, + const Span output_grids) +{ + AlignedBuffer<8192, 8> allocation_buffer; + ResourceScope scope; + scope.allocator().provide_buffer(allocation_buffer); + + IndexMaskMemory memory; + const IndexMask index_mask = IndexMask::from_predicate( + IndexRange(grid::LeafNodeMask::SIZE), + GrainSize(grid::LeafNodeMask::SIZE), + memory, + [&](const int64_t i) { return leaf_node_mask.isOn(i); }); + + const openvdb::Coord any_voxel_in_leaf = leaf_bbox.min(); + MutableSpan voxels = scope.allocator().allocate_array( + index_mask.min_array_size()); + get_voxels_fn(voxels); + + bke::VoxelFieldContext field_context{transform, voxels}; + fn::FieldEvaluator evaluator{field_context, &index_mask}; + + Array> boolean_outputs(fields.size()); + for (const int i : fields.index_range()) { + const CPPType &type = fields[i].cpp_type(); + grid::to_typed_grid(*output_grids[i], [&](auto &grid) { + using GridT = typename std::decay_t; + using ValueT = typename GridT::ValueType; + + auto &tree = grid.tree(); + auto *leaf_node = tree.probeLeaf(any_voxel_in_leaf); + /* Should have been added before. */ + BLI_assert(leaf_node); + + /* Boolean grids are special because they encode the values as bitmask. */ + if constexpr (std::is_same_v) { + boolean_outputs[i] = scope.allocator().allocate_array(index_mask.min_array_size()); + evaluator.add_with_destination(fields[i], boolean_outputs[i]); + } + else { + /* Write directly into the buffer of the output leaf node. */ + ValueT *buffer = leaf_node->buffer().data(); + evaluator.add_with_destination(fields[i], + GMutableSpan(type, buffer, grid::LeafNodeMask::SIZE)); + } + }); + } + + evaluator.evaluate(); + + for (const int i : fields.index_range()) { + if (!boolean_outputs[i].is_empty()) { + grid::set_mask_leaf_buffer_from_bools(static_cast(*output_grids[i]), + boolean_outputs[i], + index_mask, + voxels); + } + } +} + +BLI_NOINLINE static void process_voxels(const Span fields, + const openvdb::math::Transform &transform, + const Span voxels, + const Span output_grids) +{ + const int64_t voxels_num = voxels.size(); + AlignedBuffer<8192, 8> allocation_buffer; + ResourceScope scope; + scope.allocator().provide_buffer(allocation_buffer); + + bke::VoxelFieldContext field_context{transform, voxels}; + fn::FieldEvaluator evaluator{field_context, voxels_num}; + + Array output_values(output_grids.size()); + for (const int i : fields.index_range()) { + const CPPType &type = fields[i].cpp_type(); + output_values[i] = {type, scope.allocator().allocate_array(type, voxels_num), voxels_num}; + evaluator.add_with_destination(fields[i], output_values[i]); + } + evaluator.evaluate(); + + for (const int i : fields.index_range()) { + grid::set_grid_values(*output_grids[i], output_values[i], voxels); + } +} + +BLI_NOINLINE static void process_tiles(const Span fields, + const openvdb::math::Transform &transform, + const Span tiles, + const Span output_grids) +{ + const int64_t tiles_num = tiles.size(); + AlignedBuffer<8192, 8> allocation_buffer; + ResourceScope scope; + scope.allocator().provide_buffer(allocation_buffer); + + bke::TilesFieldContext field_context{transform, tiles}; + fn::FieldEvaluator evaluator{field_context, tiles_num}; + + Array output_values(output_grids.size()); + for (const int i : fields.index_range()) { + const CPPType &type = fields[i].cpp_type(); + output_values[i] = {type, scope.allocator().allocate_array(type, tiles_num), tiles_num}; + evaluator.add_with_destination(fields[i], output_values[i]); + } + evaluator.evaluate(); + + for (const int i : fields.index_range()) { + grid::set_tile_values(*output_grids[i], output_values[i], tiles); + } +} + +BLI_NOINLINE static void process_background(const Span fields, + const openvdb::math::Transform &transform, + const Span output_grids) +{ + AlignedBuffer<256, 8> allocation_buffer; + ResourceScope scope; + scope.allocator().provide_buffer(allocation_buffer); + + static const openvdb::CoordBBox background_space = openvdb::CoordBBox::inf(); + bke::TilesFieldContext field_context(transform, Span(&background_space, 1)); + fn::FieldEvaluator evaluator(field_context, 1); + + Array output_values(output_grids.size()); + for (const int i : fields.index_range()) { + const CPPType &type = fields[i].cpp_type(); + output_values[i] = {type, scope.allocator().allocate(type)}; + evaluator.add_with_destination(fields[i], GMutableSpan{type, output_values[i].get(), 1}); + } + evaluator.evaluate(); + + for (const int i : fields.index_range()) { + grid::set_grid_background(*output_grids[i], output_values[i]); + } +} +#endif + +static void node_geo_exec(GeoNodeExecParams params) +{ +#ifdef WITH_OPENVDB + const GeometryNodeFieldToGrid &storage = node_storage(params.node()); + const Span items(storage.items, storage.items_num); + bke::GVolumeGrid topology_grid = params.extract_input("Topology"); + if (!topology_grid) { + params.error_message_add(NodeWarningType::Error, "The topology grid input is required"); + params.set_default_remaining_outputs(); + return; + } + + bke::VolumeTreeAccessToken tree_token; + const openvdb::GridBase &topology_base = topology_grid->grid(tree_token); + const openvdb::math::Transform &transform = topology_base.transform(); + + Vector required_items; + for (const int i : items.index_range()) { + if (params.output_is_required(ItemsAccessor::output_socket_identifier_for_item(items[i]))) { + required_items.append(i); + } + } + + Vector fields(required_items.size()); + for (const int i : required_items.index_range()) { + const int input_i = required_items[i]; + const std::string identifier = ItemsAccessor::input_socket_identifier_for_item(items[input_i]); + fields[i] = params.extract_input(identifier); + } + + openvdb::MaskTree mask_tree; + grid::to_typed_grid(topology_base, + [&](const auto &grid) { mask_tree.topologyUnion(grid.tree()); }); + + Vector output_grids(required_items.size()); + for (const int i : required_items.index_range()) { + const eNodeSocketDatatype socket_type = eNodeSocketDatatype(items[i].data_type); + const VolumeGridType grid_type = *bke::socket_type_to_grid_type(socket_type); + output_grids[i] = grid::create_grid_with_topology(mask_tree, transform, grid_type); + } + + grid::parallel_grid_topology_tasks( + mask_tree, + [&](const grid::LeafNodeMask &leaf_node_mask, + const openvdb::CoordBBox &leaf_bbox, + const grid::GetVoxelsFn get_voxels_fn) { + process_leaf_node( + fields, transform, leaf_node_mask, leaf_bbox, get_voxels_fn, output_grids); + }, + [&](const Span voxels) { + process_voxels(fields, transform, voxels, output_grids); + }, + [&](const Span tiles) { + process_tiles(fields, transform, tiles, output_grids); + }); + + process_background(fields, transform, output_grids); + + for (const int i : required_items.index_range()) { + const int output_i = required_items[i]; + const std::string identifier = ItemsAccessor::output_socket_identifier_for_item( + items[output_i]); + params.set_output(identifier, bke::GVolumeGrid(std::move(output_grids[i]))); + } + +#else + node_geo_exec_with_missing_openvdb(params); +#endif +} + +static void node_init(bNodeTree *tree, bNode *node) +{ + GeometryNodeFieldToGrid *data = MEM_callocN(__func__); + data->data_type = SOCK_FLOAT; + node->storage = data; + socket_items::add_item_with_socket_type_and_name( + *tree, *node, SOCK_FLOAT, "Value"); +} + +static void node_free_storage(bNode *node) +{ + socket_items::destruct_array(*node); + MEM_freeN(node->storage); +} + +static void node_copy_storage(bNodeTree * /*dst_tree*/, bNode *dst_node, const bNode *src_node) +{ + const GeometryNodeFieldToGrid &src_storage = node_storage(*src_node); + auto *dst_storage = MEM_dupallocN(__func__, src_storage); + dst_node->storage = dst_storage; + + socket_items::copy_array(*src_node, *dst_node); +} + +static void node_operators() +{ + socket_items::ops::make_common_operators(); +} + +static bool node_insert_link(bke::NodeInsertLinkParams ¶ms) +{ + return socket_items::try_add_item_via_any_extend_socket( + params.ntree, params.node, params.node, params.link); +} + +static void node_blend_write(const bNodeTree & /*tree*/, const bNode &node, BlendWriter &writer) +{ + socket_items::blend_write(&writer, node); +} + +static void node_blend_read(bNodeTree & /*tree*/, bNode &node, BlendDataReader &reader) +{ + socket_items::blend_read_data(&reader, node); +} + +static const bNodeSocket *node_internally_linked_input(const bNodeTree & /*tree*/, + const bNode &node, + const bNodeSocket &output_socket) +{ + return node.input_by_identifier(output_socket.identifier); +} + +static void node_register() +{ + static blender::bke::bNodeType ntype; + + geo_node_type_base(&ntype, "GeometryNodeFieldToGrid"); + ntype.ui_name = "Field to Grid"; + ntype.ui_description = + "Create new grids by evaluating new values on an existing volume grid topology"; + ntype.nclass = NODE_CLASS_GEOMETRY; + ntype.declare = node_declare; + ntype.initfunc = node_init; + blender::bke::node_type_storage( + ntype, "GeometryNodeFieldToGrid", node_free_storage, node_copy_storage); + ntype.geometry_node_execute = node_geo_exec; + ntype.draw_buttons = node_layout; + ntype.draw_buttons_ex = node_layout_ex; + ntype.register_operators = node_operators; + ntype.insert_link = node_insert_link; + ntype.ignore_inferred_input_socket_visibility = true; + ntype.gather_link_search_ops = node_gather_link_search_ops; + ntype.internally_linked_input = node_internally_linked_input; + ntype.blend_write_storage_content = node_blend_write; + ntype.blend_data_read_storage_content = node_blend_read; + blender::bke::node_register_type(ntype); +} +NOD_REGISTER_NODE(node_register) + +} // namespace blender::nodes::node_geo_field_to_grid_cc + +namespace blender::nodes { + +StructRNA *FieldToGridItemsAccessor::item_srna = &RNA_GeometryNodeFieldToGridItem; + +void FieldToGridItemsAccessor::blend_write_item(BlendWriter *writer, const ItemT &item) +{ + BLO_write_string(writer, item.name); +} + +void FieldToGridItemsAccessor::blend_read_data_item(BlendDataReader *reader, ItemT &item) +{ + BLO_read_string(reader, &item.name); +} + +} // namespace blender::nodes diff --git a/source/blender/nodes/intern/volume_grid_function_eval.cc b/source/blender/nodes/intern/volume_grid_function_eval.cc index 26f2d72d3ef..d77f6a5ea44 100644 --- a/source/blender/nodes/intern/volume_grid_function_eval.cc +++ b/source/blender/nodes/intern/volume_grid_function_eval.cc @@ -11,6 +11,7 @@ #include "BKE_node_socket_value.hh" #include "BKE_volume_grid.hh" #include "BKE_volume_grid_fields.hh" +#include "BKE_volume_grid_process.hh" #include "BKE_volume_openvdb.hh" #include @@ -27,44 +28,10 @@ namespace blender::nodes { +namespace grid = bke::volume_grid; + #ifdef WITH_OPENVDB -template -static constexpr bool is_supported_grid_type = is_same_any_v; - -template static void to_typed_grid(const openvdb::GridBase &grid_base, Fn &&fn) -{ - const VolumeGridType grid_type = bke::volume_grid::get_type(grid_base); - BKE_volume_grid_type_to_static_type(grid_type, [&](auto type_tag) { - using GridT = typename decltype(type_tag)::type; - if constexpr (is_supported_grid_type) { - fn(static_cast(grid_base)); - } - else { - BLI_assert_unreachable(); - } - }); -} - -template static void to_typed_grid(openvdb::GridBase &grid_base, Fn &&fn) -{ - const VolumeGridType grid_type = bke::volume_grid::get_type(grid_base); - BKE_volume_grid_type_to_static_type(grid_type, [&](auto type_tag) { - using GridT = typename decltype(type_tag)::type; - if constexpr (is_supported_grid_type) { - fn(static_cast(grid_base)); - } - else { - BLI_assert_unreachable(); - } - }); -} - static std::optional cpp_type_to_grid_type(const CPPType &cpp_type) { const std::optional cd_type = bke::cpp_type_to_custom_data_type(cpp_type); @@ -74,134 +41,6 @@ static std::optional cpp_type_to_grid_type(const CPPType &cpp_ty return bke::custom_data_type_to_volume_grid_type(*cd_type); } -using LeafNodeMask = openvdb::util::NodeMask<3u>; -using GetVoxelsFn = FunctionRef r_voxels)>; -using ProcessLeafFn = FunctionRef; -using ProcessTilesFn = FunctionRef tiles)>; -using ProcessVoxelsFn = FunctionRef voxels)>; - -/** - * Call #process_leaf_fn on the leaf node if it has a certain minimum number of active voxels. If - * there are only a few active voxels, gather those in #r_coords for later batch processing. - */ -template -static void parallel_grid_topology_tasks_leaf_node(const LeafNodeT &node, - const ProcessLeafFn process_leaf_fn, - Vector &r_coords) -{ - using NodeMaskT = typename LeafNodeT::NodeMaskType; - - const int on_count = node.onVoxelCount(); - /* This number is somewhat arbitrary. 64 is a 1/8th of the number of voxels in a standard leaf - * which is 8x8x8. It's a trade-off between benefiting from the better performance of - * leaf-processing vs. processing more voxels in a batch. */ - const int on_count_threshold = 64; - if (on_count <= on_count_threshold) { - /* The leaf contains only a few active voxels. It's beneficial to process them in a batch with - * active voxels from other leafs. So only gather them here for later processing. */ - for (auto value_iter = node.cbeginValueOn(); value_iter.test(); ++value_iter) { - const openvdb::Coord coord = value_iter.getCoord(); - r_coords.append(coord); - } - return; - } - /* Process entire leaf at once. This is especially beneficial when very many of the voxels in - * the leaf are active. In that case, one can work on the openvdb arrays stored in the leafs - * directly. */ - const NodeMaskT &value_mask = node.getValueMask(); - const openvdb::CoordBBox bbox = node.getNodeBoundingBox(); - process_leaf_fn(value_mask, bbox, [&](MutableSpan r_voxels) { - for (auto value_iter = node.cbeginValueOn(); value_iter.test(); ++value_iter) { - r_voxels[value_iter.pos()] = value_iter.getCoord(); - } - }); -} - -/** - * Calls the process functions on all the active tiles and voxels within the given internal node. - */ -template -static void parallel_grid_topology_tasks_internal_node(const InternalNodeT &node, - const ProcessLeafFn process_leaf_fn, - const ProcessVoxelsFn process_voxels_fn, - const ProcessTilesFn process_tiles_fn) -{ - using ChildNodeT = typename InternalNodeT::ChildNodeType; - using LeafNodeT = typename InternalNodeT::LeafNodeType; - using NodeMaskT = typename InternalNodeT::NodeMaskType; - using UnionT = typename InternalNodeT::UnionType; - - /* Gather the active sub-nodes first, to be able to parallelize over them more easily. */ - const NodeMaskT &child_mask = node.getChildMask(); - const UnionT *table = node.getTable(); - Vector child_indices; - for (auto child_mask_iter = child_mask.beginOn(); child_mask_iter.test(); ++child_mask_iter) { - child_indices.append(child_mask_iter.pos()); - } - - threading::parallel_for(child_indices.index_range(), 8, [&](const IndexRange range) { - /* Voxels collected from potentially multiple leaf nodes to be processed in one batch. This - * inline buffer size is sufficient to avoid an allocation in all cases (a single standard leaf - * has 512 voxels). */ - Vector gathered_voxels; - for (const int child_index : child_indices.as_span().slice(range)) { - const ChildNodeT &child = *table[child_index].getChild(); - if constexpr (std::is_same_v) { - parallel_grid_topology_tasks_leaf_node(child, process_leaf_fn, gathered_voxels); - /* If enough voxels have been gathered, process them in one batch. */ - if (gathered_voxels.size() >= 512) { - process_voxels_fn(gathered_voxels); - gathered_voxels.clear(); - } - } - else { - /* Recurse into lower-level internal nodes. */ - parallel_grid_topology_tasks_internal_node( - child, process_leaf_fn, process_voxels_fn, process_tiles_fn); - } - } - /* Process any remaining voxels. */ - if (!gathered_voxels.is_empty()) { - process_voxels_fn(gathered_voxels); - gathered_voxels.clear(); - } - }); - - /* Process the active tiles within the internal node. Note that these are not processed above - * already because there only sub-nodes are handled, but tiles are "inlined" into internal nodes. - * All tiles are first gathered and then processed in one batch. */ - const NodeMaskT &value_mask = node.getValueMask(); - Vector tile_bboxes; - for (auto value_mask_iter = value_mask.beginOn(); value_mask_iter.test(); ++value_mask_iter) { - const openvdb::Index32 index = value_mask_iter.pos(); - const openvdb::Coord tile_origin = node.offsetToGlobalCoord(index); - const openvdb::CoordBBox tile_bbox = openvdb::CoordBBox::createCube(tile_origin, - ChildNodeT::DIM); - tile_bboxes.append(tile_bbox); - } - if (!tile_bboxes.is_empty()) { - process_tiles_fn(tile_bboxes); - } -} - -/* Call the process functions on all active tiles and voxels in the given tree. */ -static void parallel_grid_topology_tasks(const openvdb::MaskTree &mask_tree, - const ProcessLeafFn process_leaf_fn, - const ProcessVoxelsFn process_voxels_fn, - const ProcessTilesFn process_tiles_fn) -{ - /* Iterate over the root internal nodes. */ - for (auto root_child_iter = mask_tree.cbeginRootChildren(); root_child_iter.test(); - ++root_child_iter) - { - const auto &internal_node = *root_child_iter; - parallel_grid_topology_tasks_internal_node( - internal_node, process_leaf_fn, process_voxels_fn, process_tiles_fn); - } -} - /** * Call the multi-function in a batch on all active voxels in a leaf node. * @@ -221,16 +60,17 @@ BLI_NOINLINE static void process_leaf_node(const mf::MultiFunction &fn, const Span input_grids, MutableSpan output_grids, const openvdb::math::Transform &transform, - const LeafNodeMask &leaf_node_mask, + const grid::LeafNodeMask &leaf_node_mask, const openvdb::CoordBBox &leaf_bbox, - const GetVoxelsFn get_voxels_fn) + const grid::GetVoxelsFn get_voxels_fn) { /* Create an index mask for all the active voxels in the leaf. */ IndexMaskMemory memory; const IndexMask index_mask = IndexMask::from_predicate( - IndexRange(LeafNodeMask::SIZE), GrainSize(LeafNodeMask::SIZE), memory, [&](const int64_t i) { - return leaf_node_mask.isOn(i); - }); + IndexRange(grid::LeafNodeMask::SIZE), + GrainSize(grid::LeafNodeMask::SIZE), + memory, + [&](const int64_t i) { return leaf_node_mask.isOn(i); }); AlignedBuffer<8192, 8> allocation_buffer; ResourceScope scope; @@ -259,7 +99,7 @@ BLI_NOINLINE static void process_leaf_node(const mf::MultiFunction &fn, if (const openvdb::GridBase *grid_base = input_grids[input_i]) { /* The input is a grid, so we can attempt to reference the grid values directly. */ - to_typed_grid(*grid_base, [&](const auto &grid) { + grid::to_typed_grid(*grid_base, [&](const auto &grid) { using GridT = typename std::decay_t; using ValueT = typename GridT::ValueType; BLI_assert(param_cpp_type.size == sizeof(ValueT)); @@ -279,9 +119,9 @@ BLI_NOINLINE static void process_leaf_node(const mf::MultiFunction &fn, params.add_readonly_single_input(values); } else { - const Span values(leaf_node->buffer().data(), LeafNodeMask::SIZE); - const LeafNodeMask &input_leaf_mask = leaf_node->valueMask(); - const LeafNodeMask missing_mask = leaf_node_mask & !input_leaf_mask; + const Span values(leaf_node->buffer().data(), grid::LeafNodeMask::SIZE); + const grid::LeafNodeMask &input_leaf_mask = leaf_node->valueMask(); + const grid::LeafNodeMask missing_mask = leaf_node_mask & !input_leaf_mask; if (missing_mask.isOff()) { /* All values available, so reference the data directly. */ params.add_readonly_single_input( @@ -338,7 +178,7 @@ BLI_NOINLINE static void process_leaf_node(const mf::MultiFunction &fn, } openvdb::GridBase &grid_base = *output_grids[output_i]; - to_typed_grid(grid_base, [&](auto &grid) { + grid::to_typed_grid(grid_base, [&](auto &grid) { using GridT = typename std::decay_t; using ValueT = typename GridT::ValueType; @@ -357,7 +197,7 @@ BLI_NOINLINE static void process_leaf_node(const mf::MultiFunction &fn, /* Write directly into the buffer of the output leaf node. */ ValueT *values = leaf_node->buffer().data(); params.add_uninitialized_single_output( - GMutableSpan(param_cpp_type, values, LeafNodeMask::SIZE)); + GMutableSpan(param_cpp_type, values, grid::LeafNodeMask::SIZE)); } }); } @@ -373,14 +213,11 @@ BLI_NOINLINE static void process_leaf_node(const mf::MultiFunction &fn, if (!param_cpp_type.is()) { continue; } - openvdb::BoolGrid &grid = static_cast(*output_grids[output_i]); - const Span values = params.computed_array(param_index).typed(); - auto accessor = grid.getUnsafeAccessor(); - const Span voxels = ensure_voxel_coords(); - index_mask.foreach_index([&](const int64_t i) { - const openvdb::Coord &coord = voxels[i]; - accessor.setValue(coord, values[i]); - }); + grid::set_mask_leaf_buffer_from_bools( + static_cast(*output_grids[output_i]), + params.computed_array(param_index).typed(), + index_mask, + ensure_voxel_coords()); } } @@ -417,7 +254,7 @@ BLI_NOINLINE static void process_voxels(const mf::MultiFunction &fn, if (const openvdb::GridBase *grid_base = input_grids[input_i]) { /* Retrieve all voxel values from the input grid. */ - to_typed_grid(*grid_base, [&](const auto &grid) { + grid::to_typed_grid(*grid_base, [&](const auto &grid) { using ValueType = typename std::decay_t::ValueType; const auto &tree = grid.tree(); /* Could try to cache the accessor across batches, but it's not straight forward since its @@ -435,7 +272,8 @@ BLI_NOINLINE static void process_voxels(const mf::MultiFunction &fn, }); } else if (value_variant.is_context_dependent_field()) { - /* Evaluate the field on all voxels. */ + /* Evaluate the field on all voxels. + * TODO: Collect fields from all inputs to evaluate together. */ const fn::GField field = value_variant.get(); const CPPType &type = field.cpp_type(); bke::VoxelFieldContext field_context{transform, voxels}; @@ -469,21 +307,8 @@ BLI_NOINLINE static void process_voxels(const mf::MultiFunction &fn, if (!output_grids[output_i]) { continue; } - openvdb::GridBase &grid_base = *output_grids[output_i]; - to_typed_grid(grid_base, [&](auto &grid) { - using GridT = std::decay_t; - using ValueType = typename GridT::ValueType; - const int param_index = input_values.size() + output_i; - const ValueType *computed_values = static_cast( - params.computed_array(param_index).data()); - - auto accessor = grid.getUnsafeAccessor(); - for (const int64_t i : IndexRange(voxels_num)) { - const openvdb::Coord &coord = voxels[i]; - const ValueType &value = computed_values[i]; - accessor.setValue(coord, value); - } - }); + const int param_index = input_values.size() + output_i; + grid::set_grid_values(*output_grids[output_i], params.computed_array(param_index), voxels); } } @@ -522,7 +347,7 @@ BLI_NOINLINE static void process_tiles(const mf::MultiFunction &fn, if (const openvdb::GridBase *grid_base = input_grids[input_i]) { /* Sample the tile values from the input grid. */ - to_typed_grid(*grid_base, [&](const auto &grid) { + grid::to_typed_grid(*grid_base, [&](const auto &grid) { using GridT = std::decay_t; using ValueType = typename GridT::ValueType; const auto &tree = grid.tree(); @@ -541,7 +366,8 @@ BLI_NOINLINE static void process_tiles(const mf::MultiFunction &fn, }); } else if (value_variant.is_context_dependent_field()) { - /* Evaluate the field on all tiles. */ + /* Evaluate the field on all tiles. + * TODO: Gather fields from all inputs to evaluate together. */ const fn::GField field = value_variant.get(); const CPPType &type = field.cpp_type(); bke::TilesFieldContext field_context{transform, tiles}; @@ -580,47 +406,7 @@ BLI_NOINLINE static void process_tiles(const mf::MultiFunction &fn, continue; } const int param_index = input_values.size() + output_i; - openvdb::GridBase &grid_base = *output_grids[output_i]; - to_typed_grid(grid_base, [&](auto &grid) { - using GridT = typename std::decay_t; - using TreeT = typename GridT::TreeType; - using ValueType = typename GridT::ValueType; - auto &tree = grid.tree(); - - const ValueType *computed_values = static_cast( - params.computed_array(param_index).data()); - - const auto set_tile_value = - [&](auto &node, const openvdb::Coord &coord_in_tile, auto value) { - const openvdb::Index n = node.coordToOffset(coord_in_tile); - BLI_assert(node.isChildMaskOff(n)); - /* TODO: Figure out how to do this without const_cast, although the same is done in - * `openvdb_ax/openvdb_ax/compiler/VolumeExecutable.cc` which has a similar purpose. - * It seems like OpenVDB generally allows that, but it does not have a proper public - * API for this yet. */ - using UnionType = typename std::decay_t::UnionType; - auto *table = const_cast(node.getTable()); - table[n].setValue(value); - }; - - for (const int i : IndexRange(tiles_num)) { - const openvdb::CoordBBox tile = tiles[i]; - const openvdb::Coord coord_in_tile = tile.min(); - const auto &computed_value = computed_values[i]; - using InternalNode1 = typename TreeT::RootNodeType::ChildNodeType; - using InternalNode2 = typename InternalNode1::ChildNodeType; - /* Find the internal node that contains the tile and update the value in there. */ - if (auto *node = tree.template probeNode(coord_in_tile)) { - set_tile_value(*node, coord_in_tile, computed_value); - } - else if (auto *node = tree.template probeNode(coord_in_tile)) { - set_tile_value(*node, coord_in_tile, computed_value); - } - else { - BLI_assert_unreachable(); - } - } - }); + grid::set_tile_values(*output_grids[output_i], params.computed_array(param_index), tiles); } } @@ -644,7 +430,7 @@ BLI_NOINLINE static void process_background(const mf::MultiFunction &fn, const CPPType ¶m_cpp_type = param_type.data_type().single_type(); if (const openvdb::GridBase *grid_base = input_grids[input_i]) { - to_typed_grid(*grid_base, [&](const auto &grid) { + grid::to_typed_grid(*grid_base, [&](const auto &grid) { # ifndef NDEBUG using GridT = std::decay_t; using ValueType = typename GridT::ValueType; @@ -694,16 +480,7 @@ BLI_NOINLINE static void process_background(const mf::MultiFunction &fn, } const int param_index = input_values.size() + output_i; const GSpan value = params.computed_array(param_index); - - openvdb::GridBase &grid_base = *output_grids[output_i]; - to_typed_grid(grid_base, [&](auto &grid) { - using GridT = std::decay_t; - using ValueType = typename GridT::ValueType; - auto &tree = grid.tree(); - - BLI_assert(value.type().size == sizeof(ValueType)); - tree.root().setBackground(*static_cast(value.data()), true); - }); + grid::set_grid_background(*output_grids[output_i], GPointer(value.type(), value.data())); } } @@ -756,7 +533,7 @@ bool execute_multi_function_on_value_variant__volume_grid( if (!grid) { continue; } - to_typed_grid(*grid, [&](const auto &grid) { mask_tree.topologyUnion(grid.tree()); }); + grid::to_typed_grid(*grid, [&](const auto &grid) { mask_tree.topologyUnion(grid.tree()); }); } Array output_grids(output_values.size()); @@ -773,25 +550,14 @@ bool execute_multi_function_on_value_variant__volume_grid( return false; } - openvdb::GridBase::Ptr grid; - BKE_volume_grid_type_to_static_type(*grid_type, [&](auto type_tag) { - using GridT = typename decltype(type_tag)::type; - using TreeT = typename GridT::TreeType; - using ValueType = typename TreeT::ValueType; - const ValueType background{}; - auto tree = std::make_shared(mask_tree, background, openvdb::TopologyCopy()); - grid = openvdb::createGrid(std::move(tree)); - }); - - grid->setTransform(transform->copy()); - output_grids[i] = std::move(grid); + output_grids[i] = grid::create_grid_with_topology(mask_tree, *transform, *grid_type); } - parallel_grid_topology_tasks( + grid::parallel_grid_topology_tasks( mask_tree, - [&](const LeafNodeMask &leaf_node_mask, + [&](const grid::LeafNodeMask &leaf_node_mask, const openvdb::CoordBBox &leaf_bbox, - const GetVoxelsFn get_voxels_fn) { + const grid::GetVoxelsFn get_voxels_fn) { process_leaf_node(fn, input_values, input_grids,