Files
test2/source/blender/blenlib/intern/array_utils.cc
Hans Goudey f4421dfc8e Curves: Cache whether any curve is cyclic
Replacing the "cyclic offsets" cache from cfb8696a73.
That was more information than was necessary in the end.
This is implemented by de-duplicating the existing "contains"
function that's implemented twice for boolean virtual arrays.

Pull Request: https://projects.blender.org/blender/blender/pulls/144761
2025-08-25 18:22:36 +02:00

290 lines
9.5 KiB
C++

/* SPDX-FileCopyrightText: 2023 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup bli
*/
#include <functional>
#include "BLI_array_utils.hh"
#include "BLI_threads.h"
#include "atomic_ops.h"
namespace blender::array_utils {
void copy(const GVArray &src, GMutableSpan dst, const int64_t grain_size)
{
BLI_assert(src.type() == dst.type());
BLI_assert(src.size() == dst.size());
threading::parallel_for(src.index_range(), grain_size, [&](const IndexRange range) {
src.materialize_to_uninitialized(range, dst.data());
});
}
void copy(const GVArray &src,
const IndexMask &selection,
GMutableSpan dst,
const int64_t grain_size)
{
BLI_assert(src.type() == dst.type());
BLI_assert(src.size() >= selection.min_array_size());
BLI_assert(dst.size() >= selection.min_array_size());
threading::parallel_for(selection.index_range(), grain_size, [&](const IndexRange range) {
src.materialize_to_uninitialized(selection.slice(range), dst.data());
});
}
void gather(const GVArray &src,
const IndexMask &indices,
GMutableSpan dst,
const int64_t grain_size)
{
BLI_assert(src.type() == dst.type());
BLI_assert(indices.size() == dst.size());
threading::parallel_for(indices.index_range(), grain_size, [&](const IndexRange range) {
src.materialize_compressed_to_uninitialized(indices.slice(range), dst.slice(range).data());
});
}
void gather(const GSpan src, const IndexMask &indices, GMutableSpan dst, const int64_t grain_size)
{
gather(GVArray::from_span(src), indices, dst, grain_size);
}
void copy_group_to_group(const OffsetIndices<int> src_offsets,
const OffsetIndices<int> dst_offsets,
const IndexMask &selection,
const GSpan src,
GMutableSpan dst)
{
/* Each group might be large, so a threaded copy might make sense here too. */
selection.foreach_index(GrainSize(512), [&](const int i) {
dst.slice(dst_offsets[i]).copy_from(src.slice(src_offsets[i]));
});
}
void count_indices(const Span<int> indices, MutableSpan<int> counts)
{
if (indices.size() < 8192 || BLI_system_thread_count() < 4) {
for (const int i : indices) {
counts[i]++;
}
}
else {
threading::parallel_for(indices.index_range(), 4096, [&](const IndexRange range) {
for (const int i : indices.slice(range)) {
atomic_add_and_fetch_int32(&counts[i], 1);
}
});
}
}
void invert_booleans(MutableSpan<bool> span)
{
threading::parallel_for(span.index_range(), 4096, [&](IndexRange range) {
for (const int i : range) {
span[i] = !span[i];
}
});
}
void invert_booleans(MutableSpan<bool> span, const IndexMask &mask)
{
mask.foreach_index_optimized<int64_t>([&](const int64_t i) { span[i] = !span[i]; });
}
static bool all_equal(const Span<bool> span, const bool test)
{
return std::all_of(span.begin(), span.end(), [&](const bool value) { return value == test; });
}
static bool all_equal(const VArray<bool> &varray, const IndexRange range, const bool test)
{
return std::all_of(
range.begin(), range.end(), [&](const int64_t i) { return varray[i] == test; });
}
BooleanMix booleans_mix_calc(const VArray<bool> &varray, const IndexRange range_to_check)
{
if (varray.is_empty()) {
return BooleanMix::None;
}
const CommonVArrayInfo info = varray.common_info();
if (info.type == CommonVArrayInfo::Type::Single) {
return *static_cast<const bool *>(info.data) ? BooleanMix::AllTrue : BooleanMix::AllFalse;
}
if (info.type == CommonVArrayInfo::Type::Span) {
const Span<bool> span(static_cast<const bool *>(info.data), varray.size());
return threading::parallel_reduce(
range_to_check,
4096,
BooleanMix::None,
[&](const IndexRange range, const BooleanMix init) {
if (init == BooleanMix::Mixed) {
return init;
}
const Span<bool> slice = span.slice(range);
const bool compare = (init == BooleanMix::None) ? slice.first() :
(init == BooleanMix::AllTrue);
if (all_equal(slice, compare)) {
return compare ? BooleanMix::AllTrue : BooleanMix::AllFalse;
}
return BooleanMix::Mixed;
},
[&](BooleanMix a, BooleanMix b) { return (a == b) ? a : BooleanMix::Mixed; });
}
return threading::parallel_reduce(
range_to_check,
2048,
BooleanMix::None,
[&](const IndexRange range, const BooleanMix init) {
if (init == BooleanMix::Mixed) {
return init;
}
/* Alternatively, this could use #materialize to retrieve many values at once. */
const bool compare = (init == BooleanMix::None) ? varray[range.first()] :
(init == BooleanMix::AllTrue);
if (all_equal(varray, range, compare)) {
return compare ? BooleanMix::AllTrue : BooleanMix::AllFalse;
}
return BooleanMix::Mixed;
},
[&](BooleanMix a, BooleanMix b) { return (a == b) ? a : BooleanMix::Mixed; });
}
int64_t count_booleans(const VArray<bool> &varray, const IndexMask &mask)
{
if (varray.is_empty() || mask.is_empty()) {
return 0;
}
/* Check if mask is full. */
if (varray.size() == mask.size()) {
const CommonVArrayInfo info = varray.common_info();
if (info.type == CommonVArrayInfo::Type::Single) {
return *static_cast<const bool *>(info.data) ? varray.size() : 0;
}
if (info.type == CommonVArrayInfo::Type::Span) {
const Span<bool> span(static_cast<const bool *>(info.data), varray.size());
return threading::parallel_reduce(
varray.index_range(),
4096,
0,
[&](const IndexRange range, const int64_t init) {
const Span<bool> slice = span.slice(range);
return init + std::count(slice.begin(), slice.end(), true);
},
std::plus<>());
}
return threading::parallel_reduce(
varray.index_range(),
2048,
0,
[&](const IndexRange range, const int64_t init) {
int64_t value = init;
/* Alternatively, this could use #materialize to retrieve many values at once. */
for (const int64_t i : range) {
value += int64_t(varray[i]);
}
return value;
},
std::plus<>());
}
const CommonVArrayInfo info = varray.common_info();
if (info.type == CommonVArrayInfo::Type::Single) {
return *static_cast<const bool *>(info.data) ? mask.size() : 0;
}
int64_t value = 0;
mask.foreach_segment([&](const IndexMaskSegment segment) {
for (const int64_t i : segment) {
value += int64_t(varray[i]);
}
});
return value;
}
bool contains(const VArray<bool> &varray, const IndexMask &indices_to_check, const bool value)
{
const CommonVArrayInfo info = varray.common_info();
if (info.type == CommonVArrayInfo::Type::Single) {
return *static_cast<const bool *>(info.data) == value;
}
if (info.type == CommonVArrayInfo::Type::Span) {
const Span<bool> span(static_cast<const bool *>(info.data), varray.size());
return threading::parallel_reduce(
indices_to_check.index_range(),
4096,
false,
[&](const IndexRange range, const bool init) {
if (init) {
return init;
}
const IndexMask sliced_mask = indices_to_check.slice(range);
if (std::optional<IndexRange> range = sliced_mask.to_range()) {
return span.slice(*range).contains(value);
}
for (const int64_t segment_i : IndexRange(sliced_mask.segments_num())) {
const IndexMaskSegment segment = sliced_mask.segment(segment_i);
for (const int i : segment) {
if (span[i] == value) {
return true;
}
}
}
return false;
},
std::logical_or());
}
return threading::parallel_reduce(
indices_to_check.index_range(),
2048,
false,
[&](const IndexRange range, const bool init) {
if (init) {
return init;
}
constexpr int64_t MaxChunkSize = 512;
const int64_t slice_end = range.one_after_last();
for (int64_t start = range.start(); start < slice_end; start += MaxChunkSize) {
const int64_t end = std::min<int64_t>(start + MaxChunkSize, slice_end);
const int64_t size = end - start;
const IndexMask sliced_mask = indices_to_check.slice(start, size);
std::array<bool, MaxChunkSize> values;
auto values_end = values.begin() + size;
varray.materialize_compressed(sliced_mask, values);
if (std::find(values.begin(), values_end, value) != values_end) {
return true;
}
}
return false;
},
std::logical_or());
}
int64_t count_booleans(const VArray<bool> &varray)
{
return count_booleans(varray, IndexMask(varray.size()));
}
bool indices_are_range(Span<int> indices, IndexRange range)
{
if (indices.size() != range.size()) {
return false;
}
return threading::parallel_reduce(
range.index_range(),
4096,
true,
[&](const IndexRange part, const bool is_range) {
const Span<int> local_indices = indices.slice(part);
const IndexRange local_range = range.slice(part);
return is_range &&
std::equal(local_indices.begin(), local_indices.end(), local_range.begin());
},
std::logical_and<>());
}
} // namespace blender::array_utils