2023-08-16 00:20:26 +10:00
|
|
|
/* SPDX-FileCopyrightText: 2023 Blender Authors
|
2023-05-31 16:19:06 +02:00
|
|
|
*
|
|
|
|
|
* SPDX-License-Identifier: GPL-2.0-or-later */
|
2022-10-17 11:39:40 +02:00
|
|
|
|
2022-10-19 12:38:48 -05:00
|
|
|
#include "BLI_array_utils.hh"
|
2022-10-17 11:39:40 +02:00
|
|
|
#include "BLI_rand.hh"
|
|
|
|
|
#include "BLI_task.hh"
|
|
|
|
|
|
|
|
|
|
#include "BKE_attribute_math.hh"
|
|
|
|
|
#include "BKE_geometry_set.hh"
|
|
|
|
|
#include "BKE_instances.hh"
|
|
|
|
|
|
|
|
|
|
namespace blender::bke {
|
|
|
|
|
|
|
|
|
|
InstanceReference::InstanceReference(GeometrySet geometry_set)
|
|
|
|
|
: type_(Type::GeometrySet),
|
|
|
|
|
geometry_set_(std::make_unique<GeometrySet>(std::move(geometry_set)))
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void InstanceReference::ensure_owns_direct_data()
|
|
|
|
|
{
|
|
|
|
|
if (type_ != Type::GeometrySet) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
geometry_set_->ensure_owns_direct_data();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool InstanceReference::owns_direct_data() const
|
|
|
|
|
{
|
|
|
|
|
if (type_ != Type::GeometrySet) {
|
|
|
|
|
/* The object and collection instances are not direct data. */
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
return geometry_set_->owns_direct_data();
|
|
|
|
|
}
|
|
|
|
|
|
2023-10-07 23:10:43 +02:00
|
|
|
bool operator==(const InstanceReference &a, const InstanceReference &b)
|
|
|
|
|
{
|
|
|
|
|
if (a.geometry_set_ && b.geometry_set_) {
|
|
|
|
|
return *a.geometry_set_ == *b.geometry_set_;
|
|
|
|
|
}
|
|
|
|
|
return a.type_ == b.type_ && a.data_ == b.data_;
|
|
|
|
|
}
|
|
|
|
|
|
2023-10-16 18:41:07 +02:00
|
|
|
Instances::Instances()
|
|
|
|
|
{
|
|
|
|
|
CustomData_reset(&attributes_);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Instances::Instances(Instances &&other)
|
|
|
|
|
: references_(std::move(other.references_)),
|
|
|
|
|
reference_handles_(std::move(other.reference_handles_)),
|
|
|
|
|
transforms_(std::move(other.transforms_)),
|
|
|
|
|
almost_unique_ids_(std::move(other.almost_unique_ids_)),
|
|
|
|
|
attributes_(other.attributes_)
|
|
|
|
|
{
|
|
|
|
|
CustomData_reset(&other.attributes_);
|
|
|
|
|
}
|
|
|
|
|
|
2022-10-17 11:39:40 +02:00
|
|
|
Instances::Instances(const Instances &other)
|
|
|
|
|
: references_(other.references_),
|
|
|
|
|
reference_handles_(other.reference_handles_),
|
|
|
|
|
transforms_(other.transforms_),
|
2023-10-16 18:41:07 +02:00
|
|
|
almost_unique_ids_(other.almost_unique_ids_)
|
2022-10-17 11:39:40 +02:00
|
|
|
{
|
2023-10-16 18:41:07 +02:00
|
|
|
CustomData_copy(&other.attributes_, &attributes_, CD_MASK_ALL, other.instances_num());
|
2022-10-17 11:39:40 +02:00
|
|
|
}
|
|
|
|
|
|
2023-10-16 18:41:07 +02:00
|
|
|
Instances::~Instances()
|
2022-10-17 11:39:40 +02:00
|
|
|
{
|
2023-10-16 18:41:07 +02:00
|
|
|
CustomData_free(&attributes_, this->instances_num());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Instances &Instances::operator=(const Instances &other)
|
|
|
|
|
{
|
|
|
|
|
if (this == &other) {
|
|
|
|
|
return *this;
|
|
|
|
|
}
|
|
|
|
|
std::destroy_at(this);
|
|
|
|
|
new (this) Instances(other);
|
|
|
|
|
return *this;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Instances &Instances::operator=(Instances &&other)
|
|
|
|
|
{
|
|
|
|
|
if (this == &other) {
|
|
|
|
|
return *this;
|
|
|
|
|
}
|
|
|
|
|
std::destroy_at(this);
|
|
|
|
|
new (this) Instances(std::move(other));
|
|
|
|
|
return *this;
|
2022-10-17 11:39:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void Instances::resize(int capacity)
|
|
|
|
|
{
|
2023-10-16 18:41:07 +02:00
|
|
|
const int old_size = this->instances_num();
|
2022-10-17 11:39:40 +02:00
|
|
|
reference_handles_.resize(capacity);
|
|
|
|
|
transforms_.resize(capacity);
|
2023-10-16 18:41:07 +02:00
|
|
|
CustomData_realloc(&attributes_, old_size, capacity, CD_SET_DEFAULT);
|
2022-10-17 11:39:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void Instances::add_instance(const int instance_handle, const float4x4 &transform)
|
|
|
|
|
{
|
|
|
|
|
BLI_assert(instance_handle >= 0);
|
|
|
|
|
BLI_assert(instance_handle < references_.size());
|
2023-10-16 18:41:07 +02:00
|
|
|
const int old_size = this->instances_num();
|
2022-10-17 11:39:40 +02:00
|
|
|
reference_handles_.append(instance_handle);
|
|
|
|
|
transforms_.append(transform);
|
2023-10-16 18:41:07 +02:00
|
|
|
CustomData_realloc(&attributes_, old_size, transforms_.size());
|
2022-10-17 11:39:40 +02:00
|
|
|
}
|
|
|
|
|
|
2023-06-15 22:18:28 +02:00
|
|
|
Span<int> Instances::reference_handles() const
|
2022-10-17 11:39:40 +02:00
|
|
|
{
|
|
|
|
|
return reference_handles_;
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-15 22:18:28 +02:00
|
|
|
MutableSpan<int> Instances::reference_handles()
|
2022-10-17 11:39:40 +02:00
|
|
|
{
|
|
|
|
|
return reference_handles_;
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-15 22:18:28 +02:00
|
|
|
MutableSpan<float4x4> Instances::transforms()
|
2022-10-17 11:39:40 +02:00
|
|
|
{
|
|
|
|
|
return transforms_;
|
|
|
|
|
}
|
2023-06-15 22:18:28 +02:00
|
|
|
Span<float4x4> Instances::transforms() const
|
2022-10-17 11:39:40 +02:00
|
|
|
{
|
|
|
|
|
return transforms_;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
GeometrySet &Instances::geometry_set_from_reference(const int reference_index)
|
|
|
|
|
{
|
|
|
|
|
/* If this assert fails, it means #ensure_geometry_instances must be called first or that the
|
|
|
|
|
* reference can't be converted to a geometry set. */
|
|
|
|
|
BLI_assert(references_[reference_index].type() == InstanceReference::Type::GeometrySet);
|
|
|
|
|
|
2023-10-07 23:10:43 +02:00
|
|
|
return references_[reference_index].geometry_set();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::optional<int> Instances::find_reference_handle(const InstanceReference &query)
|
|
|
|
|
{
|
|
|
|
|
for (const int i : references_.index_range()) {
|
|
|
|
|
const InstanceReference &reference = references_[i];
|
|
|
|
|
if (reference == query) {
|
|
|
|
|
return i;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return std::nullopt;
|
2022-10-17 11:39:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int Instances::add_reference(const InstanceReference &reference)
|
|
|
|
|
{
|
2023-10-07 23:10:43 +02:00
|
|
|
if (std::optional<int> handle = this->find_reference_handle(reference)) {
|
|
|
|
|
return *handle;
|
|
|
|
|
}
|
|
|
|
|
return references_.append_and_get_index(reference);
|
2022-10-17 11:39:40 +02:00
|
|
|
}
|
|
|
|
|
|
2023-06-15 22:18:28 +02:00
|
|
|
Span<InstanceReference> Instances::references() const
|
2022-10-17 11:39:40 +02:00
|
|
|
{
|
|
|
|
|
return references_;
|
|
|
|
|
}
|
|
|
|
|
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
void Instances::remove(const IndexMask &mask,
|
2023-01-05 14:05:30 +01:00
|
|
|
const AnonymousAttributePropagationInfo &propagation_info)
|
2022-10-17 11:39:40 +02:00
|
|
|
{
|
BLI: refactor IndexMask for better performance and memory usage
Goals of this refactor:
* Reduce memory consumption of `IndexMask`. The old `IndexMask` uses an
`int64_t` for each index which is more than necessary in pretty much all
practical cases currently. Using `int32_t` might still become limiting
in the future in case we use this to index e.g. byte buffers larger than
a few gigabytes. We also don't want to template `IndexMask`, because
that would cause a split in the "ecosystem", or everything would have to
be implemented twice or templated.
* Allow for more multi-threading. The old `IndexMask` contains a single
array. This is generally good but has the problem that it is hard to fill
from multiple-threads when the final size is not known from the beginning.
This is commonly the case when e.g. converting an array of bool to an
index mask. Currently, this kind of code only runs on a single thread.
* Allow for efficient set operations like join, intersect and difference.
It should be possible to multi-thread those operations.
* It should be possible to iterate over an `IndexMask` very efficiently.
The most important part of that is to avoid all memory access when iterating
over continuous ranges. For some core nodes (e.g. math nodes), we generate
optimized code for the cases of irregular index masks and simple index ranges.
To achieve these goals, a few compromises had to made:
* Slicing of the mask (at specific indices) and random element access is
`O(log #indices)` now, but with a low constant factor. It should be possible
to split a mask into n approximately equally sized parts in `O(n)` though,
making the time per split `O(1)`.
* Using range-based for loops does not work well when iterating over a nested
data structure like the new `IndexMask`. Therefor, `foreach_*` functions with
callbacks have to be used. To avoid extra code complexity at the call site,
the `foreach_*` methods support multi-threading out of the box.
The new data structure splits an `IndexMask` into an arbitrary number of ordered
`IndexMaskSegment`. Each segment can contain at most `2^14 = 16384` indices. The
indices within a segment are stored as `int16_t`. Each segment has an additional
`int64_t` offset which allows storing arbitrary `int64_t` indices. This approach
has the main benefits that segments can be processed/constructed individually on
multiple threads without a serial bottleneck. Also it reduces the memory
requirements significantly.
For more details see comments in `BLI_index_mask.hh`.
I did a few tests to verify that the data structure generally improves
performance and does not cause regressions:
* Our field evaluation benchmarks take about as much as before. This is to be
expected because we already made sure that e.g. add node evaluation is
vectorized. The important thing here is to check that changes to the way we
iterate over the indices still allows for auto-vectorization.
* Memory usage by a mask is about 1/4 of what it was before in the average case.
That's mainly caused by the switch from `int64_t` to `int16_t` for indices.
In the worst case, the memory requirements can be larger when there are many
indices that are very far away. However, when they are far away from each other,
that indicates that there aren't many indices in total. In common cases, memory
usage can be way lower than 1/4 of before, because sub-ranges use static memory.
* For some more specific numbers I benchmarked `IndexMask::from_bools` in
`index_mask_from_selection` on 10.000.000 elements at various probabilities for
`true` at every index:
```
Probability Old New
0 4.6 ms 0.8 ms
0.001 5.1 ms 1.3 ms
0.2 8.4 ms 1.8 ms
0.5 15.3 ms 3.0 ms
0.8 20.1 ms 3.0 ms
0.999 25.1 ms 1.7 ms
1 13.5 ms 1.1 ms
```
Pull Request: https://projects.blender.org/blender/blender/pulls/104629
2023-05-24 18:11:41 +02:00
|
|
|
const std::optional<IndexRange> masked_range = mask.to_range();
|
|
|
|
|
if (masked_range.has_value() && masked_range->start() == 0) {
|
2022-10-17 11:39:40 +02:00
|
|
|
/* Deleting from the end of the array can be much faster since no data has to be shifted. */
|
|
|
|
|
this->resize(mask.size());
|
|
|
|
|
this->remove_unused_references();
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2023-10-16 18:41:07 +02:00
|
|
|
const int new_size = mask.size();
|
2022-10-17 11:39:40 +02:00
|
|
|
|
2023-10-16 18:41:07 +02:00
|
|
|
Instances new_instances;
|
|
|
|
|
new_instances.references_ = std::move(references_);
|
|
|
|
|
new_instances.reference_handles_.resize(new_size);
|
|
|
|
|
new_instances.transforms_.resize(new_size);
|
|
|
|
|
array_utils::gather(
|
|
|
|
|
reference_handles_.as_span(), mask, new_instances.reference_handles_.as_mutable_span());
|
|
|
|
|
array_utils::gather(transforms_.as_span(), mask, new_instances.transforms_.as_mutable_span());
|
2022-10-17 11:39:40 +02:00
|
|
|
|
2023-10-16 18:41:07 +02:00
|
|
|
gather_attributes(this->attributes(),
|
|
|
|
|
ATTR_DOMAIN_INSTANCE,
|
|
|
|
|
propagation_info,
|
|
|
|
|
{"position"},
|
|
|
|
|
mask,
|
|
|
|
|
new_instances.attributes_for_write());
|
2022-10-17 11:39:40 +02:00
|
|
|
|
2023-10-16 18:41:07 +02:00
|
|
|
*this = std::move(new_instances);
|
2022-10-17 11:39:40 +02:00
|
|
|
|
|
|
|
|
this->remove_unused_references();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void Instances::remove_unused_references()
|
|
|
|
|
{
|
|
|
|
|
const int tot_instances = this->instances_num();
|
|
|
|
|
const int tot_references_before = references_.size();
|
|
|
|
|
|
|
|
|
|
if (tot_instances == 0) {
|
|
|
|
|
/* If there are no instances, no reference is needed. */
|
|
|
|
|
references_.clear();
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
if (tot_references_before == 1) {
|
|
|
|
|
/* There is only one reference and at least one instance. So the only existing reference is
|
|
|
|
|
* used. Nothing to do here. */
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Array<bool> usage_by_handle(tot_references_before, false);
|
|
|
|
|
std::mutex mutex;
|
|
|
|
|
|
|
|
|
|
/* Loop over all instances to see which references are used. */
|
|
|
|
|
threading::parallel_for(IndexRange(tot_instances), 1000, [&](IndexRange range) {
|
|
|
|
|
/* Use local counter to avoid lock contention. */
|
|
|
|
|
Array<bool> local_usage_by_handle(tot_references_before, false);
|
|
|
|
|
|
|
|
|
|
for (const int i : range) {
|
|
|
|
|
const int handle = reference_handles_[i];
|
|
|
|
|
BLI_assert(handle >= 0 && handle < tot_references_before);
|
|
|
|
|
local_usage_by_handle[handle] = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::lock_guard lock{mutex};
|
|
|
|
|
for (const int i : IndexRange(tot_references_before)) {
|
|
|
|
|
usage_by_handle[i] |= local_usage_by_handle[i];
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
if (!usage_by_handle.as_span().contains(false)) {
|
|
|
|
|
/* All references are used. */
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Create new references and a mapping for the handles. */
|
|
|
|
|
Vector<int> handle_mapping;
|
2023-10-07 23:10:43 +02:00
|
|
|
Vector<InstanceReference> new_references;
|
2022-10-17 11:39:40 +02:00
|
|
|
int next_new_handle = 0;
|
|
|
|
|
bool handles_have_to_be_updated = false;
|
|
|
|
|
for (const int old_handle : IndexRange(tot_references_before)) {
|
|
|
|
|
if (!usage_by_handle[old_handle]) {
|
|
|
|
|
/* Add some dummy value. It won't be read again. */
|
|
|
|
|
handle_mapping.append(-1);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
const InstanceReference &reference = references_[old_handle];
|
|
|
|
|
handle_mapping.append(next_new_handle);
|
2023-10-07 23:10:43 +02:00
|
|
|
new_references.append(reference);
|
2022-10-17 11:39:40 +02:00
|
|
|
if (old_handle != next_new_handle) {
|
|
|
|
|
handles_have_to_be_updated = true;
|
|
|
|
|
}
|
|
|
|
|
next_new_handle++;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
references_ = new_references;
|
|
|
|
|
|
|
|
|
|
if (!handles_have_to_be_updated) {
|
|
|
|
|
/* All remaining handles are the same as before, so they don't have to be updated. This happens
|
|
|
|
|
* when unused handles are only at the end. */
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Update handles of instances. */
|
|
|
|
|
threading::parallel_for(IndexRange(tot_instances), 1000, [&](IndexRange range) {
|
|
|
|
|
for (const int i : range) {
|
|
|
|
|
reference_handles_[i] = handle_mapping[reference_handles_[i]];
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int Instances::instances_num() const
|
|
|
|
|
{
|
|
|
|
|
return transforms_.size();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int Instances::references_num() const
|
|
|
|
|
{
|
|
|
|
|
return references_.size();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool Instances::owns_direct_data() const
|
|
|
|
|
{
|
|
|
|
|
for (const InstanceReference &reference : references_) {
|
|
|
|
|
if (!reference.owns_direct_data()) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void Instances::ensure_owns_direct_data()
|
|
|
|
|
{
|
|
|
|
|
for (const InstanceReference &const_reference : references_) {
|
2022-10-19 12:52:55 +11:00
|
|
|
/* `const` cast is fine because we are not changing anything that would change the hash of the
|
2022-10-17 11:39:40 +02:00
|
|
|
* reference. */
|
|
|
|
|
InstanceReference &reference = const_cast<InstanceReference &>(const_reference);
|
|
|
|
|
reference.ensure_owns_direct_data();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-05-26 14:01:25 -04:00
|
|
|
static Array<int> generate_unique_instance_ids(Span<int> original_ids)
|
2022-10-17 11:39:40 +02:00
|
|
|
{
|
|
|
|
|
Array<int> unique_ids(original_ids.size());
|
|
|
|
|
|
|
|
|
|
Set<int> used_unique_ids;
|
|
|
|
|
used_unique_ids.reserve(original_ids.size());
|
|
|
|
|
Vector<int> instances_with_id_collision;
|
|
|
|
|
for (const int instance_index : original_ids.index_range()) {
|
|
|
|
|
const int original_id = original_ids[instance_index];
|
|
|
|
|
if (used_unique_ids.add(original_id)) {
|
|
|
|
|
/* The original id has not been used by another instance yet. */
|
|
|
|
|
unique_ids[instance_index] = original_id;
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
/* The original id of this instance collided with a previous instance, it needs to be looked
|
|
|
|
|
* at again in a second pass. Don't generate a new random id here, because this might collide
|
|
|
|
|
* with other existing ids. */
|
|
|
|
|
instances_with_id_collision.append(instance_index);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Map<int, RandomNumberGenerator> generator_by_original_id;
|
|
|
|
|
for (const int instance_index : instances_with_id_collision) {
|
|
|
|
|
const int original_id = original_ids[instance_index];
|
|
|
|
|
RandomNumberGenerator &rng = generator_by_original_id.lookup_or_add_cb(original_id, [&]() {
|
|
|
|
|
RandomNumberGenerator rng;
|
|
|
|
|
rng.seed_random(original_id);
|
|
|
|
|
return rng;
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const int max_iteration = 100;
|
|
|
|
|
for (int iteration = 0;; iteration++) {
|
|
|
|
|
/* Try generating random numbers until an unused one has been found. */
|
|
|
|
|
const int random_id = rng.get_int32();
|
|
|
|
|
if (used_unique_ids.add(random_id)) {
|
|
|
|
|
/* This random id is not used by another instance. */
|
|
|
|
|
unique_ids[instance_index] = random_id;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
if (iteration == max_iteration) {
|
|
|
|
|
/* It seems to be very unlikely that we ever run into this case (assuming there are less
|
|
|
|
|
* than 2^30 instances). However, if that happens, it's better to use an id that is not
|
|
|
|
|
* unique than to be stuck in an infinite loop. */
|
|
|
|
|
unique_ids[instance_index] = original_id;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return unique_ids;
|
|
|
|
|
}
|
|
|
|
|
|
2023-05-26 14:01:25 -04:00
|
|
|
Span<int> Instances::almost_unique_ids() const
|
2022-10-17 11:39:40 +02:00
|
|
|
{
|
|
|
|
|
std::lock_guard lock(almost_unique_ids_mutex_);
|
2023-10-16 18:41:07 +02:00
|
|
|
bke::AttributeReader<int> instance_ids_attribute = this->attributes().lookup<int>("id");
|
|
|
|
|
if (instance_ids_attribute) {
|
|
|
|
|
Span<int> instance_ids = instance_ids_attribute.varray.get_internal_span();
|
2022-10-17 11:39:40 +02:00
|
|
|
if (almost_unique_ids_.size() != instance_ids.size()) {
|
|
|
|
|
almost_unique_ids_ = generate_unique_instance_ids(instance_ids);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
almost_unique_ids_.reinitialize(this->instances_num());
|
|
|
|
|
for (const int i : almost_unique_ids_.index_range()) {
|
|
|
|
|
almost_unique_ids_[i] = i;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return almost_unique_ids_;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
} // namespace blender::bke
|