/* SPDX-FileCopyrightText: 2023 Blender Authors * * SPDX-License-Identifier: GPL-2.0-or-later */ #include "BLI_array_utils.hh" #include "BLI_listbase.h" #include "BLI_rand.hh" #include "BLI_task.hh" #include "DNA_collection_types.h" #include "DNA_object_types.h" #include "BKE_customdata.hh" #include "BKE_geometry_set.hh" #include "BKE_geometry_set_instances.hh" #include "BKE_instances.hh" #include "BLT_translation.hh" namespace blender::bke { InstanceReference::InstanceReference(GeometrySet geometry_set) : type_(Type::GeometrySet), geometry_set_(std::make_unique(std::move(geometry_set))) { } InstanceReference::InstanceReference(const InstanceReference &other) : type_(other.type_), data_(other.data_) { if (other.geometry_set_) { geometry_set_ = std::make_unique(*other.geometry_set_); } } void InstanceReference::ensure_owns_direct_data() { if (type_ != Type::GeometrySet) { return; } geometry_set_->ensure_owns_direct_data(); } bool InstanceReference::owns_direct_data() const { if (type_ != Type::GeometrySet) { /* The object and collection instances are not direct data. */ return true; } return geometry_set_->owns_direct_data(); } void InstanceReference::count_memory(MemoryCounter &memory) const { switch (type_) { case Type::GeometrySet: { geometry_set_->count_memory(memory); } default: { break; } } } static void convert_collection_to_instances(const Collection &collection, bke::Instances &instances) { LISTBASE_FOREACH (CollectionChild *, collection_child, &collection.children) { float4x4 transform = float4x4::identity(); transform.location() += float3(collection_child->collection->instance_offset); transform.location() -= float3(collection.instance_offset); const int handle = instances.add_reference(*collection_child->collection); instances.add_instance(handle, transform); } LISTBASE_FOREACH (CollectionObject *, collection_object, &collection.gobject) { float4x4 transform = float4x4::identity(); transform.location() -= float3(collection.instance_offset); transform *= (collection_object->ob)->object_to_world(); const int handle = instances.add_reference(*collection_object->ob); instances.add_instance(handle, transform); } } void InstanceReference::to_geometry_set(GeometrySet &r_geometry_set) const { r_geometry_set.clear(); switch (type_) { case Type::Object: { const Object &object = this->object(); r_geometry_set = bke::object_get_evaluated_geometry_set(object); break; } case Type::Collection: { const Collection &collection = this->collection(); std::unique_ptr instances_ptr = std::make_unique(); convert_collection_to_instances(collection, *instances_ptr); r_geometry_set.replace_instances(instances_ptr.release()); break; } case Type::GeometrySet: { r_geometry_set = this->geometry_set(); break; } case Type::None: { break; } } } StringRefNull InstanceReference::name() const { switch (type_) { case Type::Object: return this->object().id.name + 2; case Type::Collection: return this->collection().id.name + 2; case Type::GeometrySet: return this->geometry_set().name; case Type::None: break; } return ""; } bool operator==(const InstanceReference &a, const InstanceReference &b) { if (a.geometry_set_ && b.geometry_set_) { return *a.geometry_set_ == *b.geometry_set_; } return a.type_ == b.type_ && a.data_ == b.data_; } uint64_t InstanceReference::hash() const { const uint64_t geometry_hash = geometry_set_ ? geometry_set_->hash() : 0; return get_default_hash(geometry_hash, type_, data_); } Instances::Instances() { CustomData_reset(&attributes_); } Instances::Instances(Instances &&other) : references_(std::move(other.references_)), instances_num_(other.instances_num_), attributes_(other.attributes_), reference_user_counts_(std::move(other.reference_user_counts_)), almost_unique_ids_cache_(std::move(other.almost_unique_ids_cache_)) { CustomData_reset(&other.attributes_); } Instances::Instances(const Instances &other) : references_(other.references_), instances_num_(other.instances_num_), reference_user_counts_(other.reference_user_counts_), almost_unique_ids_cache_(other.almost_unique_ids_cache_) { CustomData_init_from(&other.attributes_, &attributes_, CD_MASK_ALL, other.instances_num_); } Instances::~Instances() { CustomData_free(&attributes_, instances_num_); } Instances &Instances::operator=(const Instances &other) { if (this == &other) { return *this; } std::destroy_at(this); new (this) Instances(other); return *this; } Instances &Instances::operator=(Instances &&other) { if (this == &other) { return *this; } std::destroy_at(this); new (this) Instances(std::move(other)); return *this; } void Instances::resize(int capacity) { CustomData_realloc(&attributes_, instances_num_, capacity, CD_SET_DEFAULT); instances_num_ = capacity; } void Instances::add_instance(const int instance_handle, const float4x4 &transform) { BLI_assert(instance_handle >= 0); BLI_assert(instance_handle < references_.size()); const int old_size = instances_num_; instances_num_++; CustomData_realloc(&attributes_, old_size, instances_num_); this->reference_handles_for_write().last() = instance_handle; this->transforms_for_write().last() = transform; this->tag_reference_handles_changed(); } Span Instances::reference_handles() const { return {static_cast( CustomData_get_layer_named(&attributes_, CD_PROP_INT32, ".reference_index")), instances_num_}; } MutableSpan Instances::reference_handles_for_write() { int *data = static_cast(CustomData_get_layer_named_for_write( &attributes_, CD_PROP_INT32, ".reference_index", instances_num_)); if (!data) { data = static_cast(CustomData_add_layer_named( &attributes_, CD_PROP_INT32, CD_SET_DEFAULT, instances_num_, ".reference_index")); } return {data, instances_num_}; } Span Instances::transforms() const { return {static_cast( CustomData_get_layer_named(&attributes_, CD_PROP_FLOAT4X4, "instance_transform")), instances_num_}; } MutableSpan Instances::transforms_for_write() { float4x4 *data = static_cast(CustomData_get_layer_named_for_write( &attributes_, CD_PROP_FLOAT4X4, "instance_transform", instances_num_)); if (!data) { data = static_cast(CustomData_add_layer_named( &attributes_, CD_PROP_FLOAT4X4, CD_SET_DEFAULT, instances_num_, "instance_transform")); } return {data, instances_num_}; } GeometrySet &Instances::geometry_set_from_reference(const int reference_index) { /* If this assert fails, it means #ensure_geometry_instances must be called first or that the * reference can't be converted to a geometry set. */ BLI_assert(references_[reference_index].type() == InstanceReference::Type::GeometrySet); return references_[reference_index].geometry_set(); } std::optional Instances::find_reference_handle(const InstanceReference &query) { for (const int i : references_.index_range()) { const InstanceReference &reference = references_[i]; if (reference == query) { return i; } } return std::nullopt; } int Instances::add_reference(const InstanceReference &reference) { if (std::optional handle = this->find_reference_handle(reference)) { return *handle; } return this->add_new_reference(reference); } int Instances::add_new_reference(const InstanceReference &reference) { this->tag_reference_handles_changed(); return references_.append_and_get_index(reference); } Span Instances::references() const { return references_; } void Instances::remove(const IndexMask &mask, const AttributeFilter &attribute_filter) { const std::optional masked_range = mask.to_range(); if (masked_range.has_value() && masked_range->start() == 0) { /* Deleting from the end of the array can be much faster since no data has to be shifted. */ this->resize(mask.size()); this->remove_unused_references(); return; } Instances new_instances; new_instances.references_ = std::move(references_); new_instances.instances_num_ = mask.size(); gather_attributes(this->attributes(), AttrDomain::Instance, AttrDomain::Instance, attribute_filter, mask, new_instances.attributes_for_write()); *this = std::move(new_instances); this->remove_unused_references(); } void Instances::remove_unused_references() { const int tot_instances = instances_num_; const int tot_references_before = references_.size(); if (tot_instances == 0) { /* If there are no instances, no reference is needed. */ references_.clear(); return; } if (tot_references_before == 1) { /* There is only one reference and at least one instance. So the only existing reference is * used. Nothing to do here. */ return; } const Span reference_handles = this->reference_handles(); Array usage_by_handle(tot_references_before, false); std::mutex mutex; /* Loop over all instances to see which references are used. */ threading::parallel_for(IndexRange(tot_instances), 1000, [&](IndexRange range) { /* Use local counter to avoid lock contention. */ Array local_usage_by_handle(tot_references_before, false); for (const int i : range) { const int handle = reference_handles[i]; BLI_assert(handle >= 0 && handle < tot_references_before); local_usage_by_handle[handle] = true; } std::lock_guard lock{mutex}; for (const int i : IndexRange(tot_references_before)) { usage_by_handle[i] |= local_usage_by_handle[i]; } }); if (!usage_by_handle.as_span().contains(false)) { /* All references are used. */ return; } /* Create new references and a mapping for the handles. */ Vector handle_mapping; Vector new_references; int next_new_handle = 0; bool handles_have_to_be_updated = false; for (const int old_handle : IndexRange(tot_references_before)) { if (!usage_by_handle[old_handle]) { /* Add some dummy value. It won't be read again. */ handle_mapping.append(-1); } else { const InstanceReference &reference = references_[old_handle]; handle_mapping.append(next_new_handle); new_references.append(reference); if (old_handle != next_new_handle) { handles_have_to_be_updated = true; } next_new_handle++; } } references_ = new_references; if (!handles_have_to_be_updated) { /* All remaining handles are the same as before, so they don't have to be updated. This happens * when unused handles are only at the end. */ return; } /* Update handles of instances. */ { const MutableSpan reference_handles = this->reference_handles_for_write(); threading::parallel_for(IndexRange(tot_instances), 1000, [&](IndexRange range) { for (const int i : range) { reference_handles[i] = handle_mapping[reference_handles[i]]; } }); } } int Instances::instances_num() const { return this->instances_num_; } int Instances::references_num() const { return references_.size(); } bool Instances::owns_direct_data() const { for (const InstanceReference &reference : references_) { if (!reference.owns_direct_data()) { return false; } } return true; } void Instances::ensure_owns_direct_data() { for (const InstanceReference &const_reference : references_) { /* `const` cast is fine because we are not changing anything that would change the hash of the * reference. */ InstanceReference &reference = const_cast(const_reference); reference.ensure_owns_direct_data(); } } void Instances::count_memory(MemoryCounter &memory) const { CustomData_count_memory(attributes_, instances_num_, memory); for (const InstanceReference &reference : references_) { reference.count_memory(memory); } } static Array generate_unique_instance_ids(Span original_ids) { Array unique_ids(original_ids.size()); Set used_unique_ids; used_unique_ids.reserve(original_ids.size()); Vector instances_with_id_collision; for (const int instance_index : original_ids.index_range()) { const int original_id = original_ids[instance_index]; if (used_unique_ids.add(original_id)) { /* The original id has not been used by another instance yet. */ unique_ids[instance_index] = original_id; } else { /* The original id of this instance collided with a previous instance, it needs to be looked * at again in a second pass. Don't generate a new random id here, because this might collide * with other existing ids. */ instances_with_id_collision.append(instance_index); } } Map generator_by_original_id; for (const int instance_index : instances_with_id_collision) { const int original_id = original_ids[instance_index]; RandomNumberGenerator &rng = generator_by_original_id.lookup_or_add_cb(original_id, [&]() { RandomNumberGenerator rng; rng.seed_random(original_id); return rng; }); const int max_iteration = 100; for (int iteration = 0;; iteration++) { /* Try generating random numbers until an unused one has been found. */ const int random_id = rng.get_int32(); if (used_unique_ids.add(random_id)) { /* This random id is not used by another instance. */ unique_ids[instance_index] = random_id; break; } if (iteration == max_iteration) { /* It seems to be very unlikely that we ever run into this case (assuming there are less * than 2^30 instances). However, if that happens, it's better to use an id that is not * unique than to be stuck in an infinite loop. */ unique_ids[instance_index] = original_id; break; } } } return unique_ids; } Span Instances::reference_user_counts() const { reference_user_counts_.ensure([&](Array &r_data) { const int references_num = references_.size(); r_data.reinitialize(references_num); r_data.fill(0); const Span handles = this->reference_handles(); for (const int handle : handles) { if (handle >= 0 && handle < references_num) { r_data[handle]++; } } }); return reference_user_counts_.data(); } Span Instances::almost_unique_ids() const { almost_unique_ids_cache_.ensure([&](Array &r_data) { bke::AttributeReader instance_ids_attribute = this->attributes().lookup("id"); if (instance_ids_attribute) { Span instance_ids = instance_ids_attribute.varray.get_internal_span(); if (r_data.size() != instance_ids.size()) { r_data = generate_unique_instance_ids(instance_ids); } } else { r_data.reinitialize(instances_num_); array_utils::fill_index_range(r_data.as_mutable_span()); } }); return almost_unique_ids_cache_.data(); } static float3 get_transform_position(const float4x4 &transform) { return transform.location(); } static void set_transform_position(float4x4 &transform, const float3 position) { transform.location() = position; } VArray instance_position_varray(const Instances &instances) { return VArray::ForDerivedSpan(instances.transforms()); } VMutableArray instance_position_varray_for_write(Instances &instances) { MutableSpan transforms = instances.transforms_for_write(); return VMutableArray:: ForDerivedSpan(transforms); } } // namespace blender::bke