Refactor: BLI: reduce code size for virtual arrays

Previously, `VArrayImpl` had a `materialize` and `materialize_to_uninitialized`
function. Now both are merged into one with an additional `bool
dst_is_uninitialized` parameter.  The same is done for the
`materialize_compressed` method as all as `GVArrayImpl`.

While this kind of merging is typically not ideal, it reduces the binary size by
~200kb while being basically free performance wise. The cost of this predictable
boolean check is expected to be negligible even if only very few indices are
materialized. Additionally, in most cases, this parameter does not even have to
be checked, because for trivial types it does not matter if the destination
array is already initialized or not when overwriting it.

It saves this much memory, because there are quite a few implementations being
generated with e.g. `VArray::from_func` and a lot of code was duplicated for
each instantiation.

This changes only the actual `(G)VArrayImpl`, but not the `VArray` and `GVArray`
API which is typically used to work with virtual arrays.

Pull Request: https://projects.blender.org/blender/blender/pulls/145144
This commit is contained in:
Jacques Lucke
2025-08-28 14:24:25 +02:00
parent 0056784996
commit 2e4d2ad5ab
6 changed files with 288 additions and 269 deletions

View File

@@ -1700,7 +1700,9 @@ class VArrayImpl_For_VertexWeights final : public VMutableArrayImpl<float> {
});
}
void materialize(const IndexMask &mask, float *dst) const override
void materialize(const IndexMask &mask,
float *dst,
const bool /*dst_is_uninitialized*/) const override
{
if (dverts_ == nullptr) {
mask.foreach_index([&](const int i) { dst[i] = 0.0f; });
@@ -1717,11 +1719,6 @@ class VArrayImpl_For_VertexWeights final : public VMutableArrayImpl<float> {
});
}
void materialize_to_uninitialized(const IndexMask &mask, float *dst) const override
{
this->materialize(mask, dst);
}
private:
MDeformWeight *find_weight_at_index(const int64_t index)
{

View File

@@ -751,14 +751,13 @@ class GVArray_For_ConvertedGVArray : public GVArrayImpl {
from_type_.destruct(buffer);
}
void materialize(const IndexMask &mask, void *dst) const override
{
type_->destruct_n(dst, mask.min_array_size());
this->materialize_to_uninitialized(mask, dst);
}
void materialize_to_uninitialized(const IndexMask &mask, void *dst) const override
void materialize(const IndexMask &mask,
void *dst,
const bool dst_is_uninitialized) const override
{
if (!dst_is_uninitialized) {
type_->destruct_n(dst, mask.min_array_size());
}
call_convert_to_uninitialized_fn(varray_,
*old_to_new_conversions_.multi_function,
mask,
@@ -809,14 +808,13 @@ class GVMutableArray_For_ConvertedGVMutableArray : public GVMutableArrayImpl {
varray_.set_by_relocate(index, buffer);
}
void materialize(const IndexMask &mask, void *dst) const override
{
type_->destruct_n(dst, mask.min_array_size());
this->materialize_to_uninitialized(mask, dst);
}
void materialize_to_uninitialized(const IndexMask &mask, void *dst) const override
void materialize(const IndexMask &mask,
void *dst,
const bool dst_is_uninitialized) const override
{
if (!dst_is_uninitialized) {
type_->destruct_n(dst, mask.min_array_size());
}
call_convert_to_uninitialized_fn(varray_,
*old_to_new_conversions_.multi_function,
mask,

View File

@@ -45,11 +45,10 @@ class GVArrayImpl {
virtual CommonVArrayInfo common_info() const;
virtual void materialize(const IndexMask &mask, void *dst) const;
virtual void materialize_to_uninitialized(const IndexMask &mask, void *dst) const;
virtual void materialize_compressed(const IndexMask &mask, void *dst) const;
virtual void materialize_compressed_to_uninitialized(const IndexMask &mask, void *dst) const;
virtual void materialize(const IndexMask &mask, void *dst, bool dst_is_uninitialized) const;
virtual void materialize_compressed(const IndexMask &mask,
void *dst,
bool dst_is_uninitialized) const;
virtual bool try_assign_VArray(void *varray) const;
};
@@ -323,26 +322,19 @@ template<typename T> class GVArrayImpl_For_VArray : public GVArrayImpl {
new (r_value) T(varray_[index]);
}
void materialize(const IndexMask &mask, void *dst) const override
void materialize(const IndexMask &mask,
void *dst,
const bool dst_is_uninitialized) const override
{
varray_.materialize(mask, MutableSpan(static_cast<T *>(dst), mask.min_array_size()));
varray_.get_implementation()->materialize(mask, static_cast<T *>(dst), dst_is_uninitialized);
}
void materialize_to_uninitialized(const IndexMask &mask, void *dst) const override
void materialize_compressed(const IndexMask &mask,
void *dst,
const bool dst_is_uninitialized) const override
{
varray_.materialize_to_uninitialized(
mask, MutableSpan(static_cast<T *>(dst), mask.min_array_size()));
}
void materialize_compressed(const IndexMask &mask, void *dst) const override
{
varray_.materialize_compressed(mask, MutableSpan(static_cast<T *>(dst), mask.size()));
}
void materialize_compressed_to_uninitialized(const IndexMask &mask, void *dst) const override
{
varray_.materialize_compressed_to_uninitialized(
mask, MutableSpan(static_cast<T *>(dst), mask.size()));
varray_.get_implementation()->materialize_compressed(
mask, static_cast<T *>(dst), dst_is_uninitialized);
}
bool try_assign_VArray(void *varray) const override
@@ -388,24 +380,16 @@ template<typename T> class VArrayImpl_For_GVArray : public VArrayImpl<T> {
return true;
}
void materialize(const IndexMask &mask, T *dst) const override
void materialize(const IndexMask &mask, T *dst, const bool dst_is_uninitialized) const override
{
varray_.materialize(mask, dst);
varray_.get_implementation()->materialize(mask, dst, dst_is_uninitialized);
}
void materialize_to_uninitialized(const IndexMask &mask, T *dst) const override
void materialize_compressed(const IndexMask &mask,
T *dst,
const bool dst_is_uninitialized) const override
{
varray_.materialize_to_uninitialized(mask, dst);
}
void materialize_compressed(const IndexMask &mask, T *dst) const override
{
varray_.materialize_compressed(mask, dst);
}
void materialize_compressed_to_uninitialized(const IndexMask &mask, T *dst) const override
{
varray_.materialize_compressed_to_uninitialized(mask, dst);
varray_.get_implementation()->materialize_compressed(mask, dst, dst_is_uninitialized);
}
};
@@ -460,26 +444,19 @@ template<typename T> class GVMutableArrayImpl_For_VMutableArray : public GVMutab
varray_.set_all(Span(static_cast<const T *>(src), size_));
}
void materialize(const IndexMask &mask, void *dst) const override
void materialize(const IndexMask &mask,
void *dst,
const bool dst_is_uninitialized) const override
{
varray_.materialize(mask, MutableSpan(static_cast<T *>(dst), mask.min_array_size()));
varray_.get_implementation()->materialize(mask, static_cast<T *>(dst), dst_is_uninitialized);
}
void materialize_to_uninitialized(const IndexMask &mask, void *dst) const override
void materialize_compressed(const IndexMask &mask,
void *dst,
const bool dst_is_uninitialized) const override
{
varray_.materialize_to_uninitialized(
mask, MutableSpan(static_cast<T *>(dst), mask.min_array_size()));
}
void materialize_compressed(const IndexMask &mask, void *dst) const override
{
varray_.materialize_compressed(mask, MutableSpan(static_cast<T *>(dst), mask.size()));
}
void materialize_compressed_to_uninitialized(const IndexMask &mask, void *dst) const override
{
varray_.materialize_compressed_to_uninitialized(
mask, MutableSpan(static_cast<T *>(dst), mask.size()));
varray_.get_implementation()->materialize_compressed(
mask, static_cast<T *>(dst), dst_is_uninitialized);
}
bool try_assign_VArray(void *varray) const override
@@ -538,24 +515,16 @@ template<typename T> class VMutableArrayImpl_For_GVMutableArray : public VMutabl
return true;
}
void materialize(const IndexMask &mask, T *dst) const override
void materialize(const IndexMask &mask, T *dst, const bool dst_is_uninitialized) const override
{
varray_.materialize(mask, dst);
varray_.get_implementation()->materialize(mask, dst, dst_is_uninitialized);
}
void materialize_to_uninitialized(const IndexMask &mask, T *dst) const override
void materialize_compressed(const IndexMask &mask,
T *dst,
const bool dst_is_uninitialized) const override
{
varray_.materialize_to_uninitialized(mask, dst);
}
void materialize_compressed(const IndexMask &mask, T *dst) const override
{
varray_.materialize_compressed(mask, dst);
}
void materialize_compressed_to_uninitialized(const IndexMask &mask, T *dst) const override
{
varray_.materialize_compressed_to_uninitialized(mask, dst);
varray_.get_implementation()->materialize_compressed(mask, dst, dst_is_uninitialized);
}
};
@@ -594,11 +563,11 @@ class GVArrayImpl_For_GSpan : public GVMutableArrayImpl {
CommonVArrayInfo common_info() const override;
void materialize(const IndexMask &mask, void *dst) const override;
void materialize_to_uninitialized(const IndexMask &mask, void *dst) const override;
void materialize(const IndexMask &mask, void *dst, bool dst_is_uninitialized) const override;
void materialize_compressed(const IndexMask &mask, void *dst) const override;
void materialize_compressed_to_uninitialized(const IndexMask &mask, void *dst) const override;
void materialize_compressed(const IndexMask &mask,
void *dst,
bool dst_is_uninitialized) const override;
};
class GVArrayImpl_For_GSpan_final final : public GVArrayImpl_For_GSpan {
@@ -635,10 +604,10 @@ class GVArrayImpl_For_SingleValueRef : public GVArrayImpl {
void get(const int64_t index, void *r_value) const override;
void get_to_uninitialized(const int64_t index, void *r_value) const override;
CommonVArrayInfo common_info() const override;
void materialize(const IndexMask &mask, void *dst) const override;
void materialize_to_uninitialized(const IndexMask &mask, void *dst) const override;
void materialize_compressed(const IndexMask &mask, void *dst) const override;
void materialize_compressed_to_uninitialized(const IndexMask &mask, void *dst) const override;
void materialize(const IndexMask &mask, void *dst, bool dst_is_uninitialized) const override;
void materialize_compressed(const IndexMask &mask,
void *dst,
bool dst_is_uninitialized) const override;
};
class GVArrayImpl_For_SingleValueRef_final final : public GVArrayImpl_For_SingleValueRef {

View File

@@ -109,17 +109,19 @@ template<typename T> class VArrayImpl {
* Copy values from the virtual array into the provided span. The index of the value in the
* virtual array is the same as the index in the span.
*/
virtual void materialize(const IndexMask &mask, T *dst) const
virtual void materialize(const IndexMask &mask, T *dst, const bool dst_is_uninitialized) const
{
mask.foreach_index([&](const int64_t i) { dst[i] = this->get(i); });
}
/**
* Same as #materialize but #r_span is expected to be uninitialized.
*/
virtual void materialize_to_uninitialized(const IndexMask &mask, T *dst) const
{
mask.foreach_index([&](const int64_t i) { new (dst + i) T(this->get(i)); });
if constexpr (std::is_trivially_copyable_v<T>) {
mask.foreach_index([&](const int64_t i) { dst[i] = this->get(i); });
}
else {
if (dst_is_uninitialized) {
mask.foreach_index([&](const int64_t i) { new (dst + i) T(this->get(i)); });
}
else {
mask.foreach_index([&](const int64_t i) { dst[i] = this->get(i); });
}
}
}
/**
@@ -127,18 +129,22 @@ template<typename T> class VArrayImpl {
* in virtual array is not the same as the index in the output span. Instead, the span is filled
* without gaps.
*/
virtual void materialize_compressed(const IndexMask &mask, T *dst) const
virtual void materialize_compressed(const IndexMask &mask,
T *dst,
const bool dst_is_uninitialized) const
{
mask.foreach_index([&](const int64_t i, const int64_t pos) { dst[pos] = this->get(i); });
}
/**
* Same as #materialize_compressed but #r_span is expected to be uninitialized.
*/
virtual void materialize_compressed_to_uninitialized(const IndexMask &mask, T *dst) const
{
mask.foreach_index(
[&](const int64_t i, const int64_t pos) { new (dst + pos) T(this->get(i)); });
if constexpr (std::is_trivially_copyable_v<T>) {
mask.foreach_index([&](const int64_t i, const int64_t pos) { dst[pos] = this->get(i); });
}
else {
if (dst_is_uninitialized) {
mask.foreach_index(
[&](const int64_t i, const int64_t pos) { new (dst + pos) T(this->get(i)); });
}
else {
mask.foreach_index([&](const int64_t i, const int64_t pos) { dst[pos] = this->get(i); });
}
}
}
/**
@@ -222,26 +228,39 @@ template<typename T> class VArrayImpl_For_Span : public VMutableArrayImpl<T> {
return CommonVArrayInfo(CommonVArrayInfo::Type::Span, true, data_);
}
void materialize(const IndexMask &mask, T *dst) const override
void materialize(const IndexMask &mask, T *dst, const bool dst_is_uninitialized) const override
{
mask.foreach_index_optimized<int64_t>([&](const int64_t i) { dst[i] = data_[i]; });
if constexpr (std::is_trivially_copyable_v<T>) {
mask.foreach_index_optimized<int64_t>([&](const int64_t i) { dst[i] = data_[i]; });
}
else {
if (dst_is_uninitialized) {
mask.foreach_index_optimized<int64_t>([&](const int64_t i) { new (dst + i) T(data_[i]); });
}
else {
mask.foreach_index_optimized<int64_t>([&](const int64_t i) { dst[i] = data_[i]; });
}
}
}
void materialize_to_uninitialized(const IndexMask &mask, T *dst) const override
void materialize_compressed(const IndexMask &mask,
T *dst,
const bool dst_is_uninitialized) const override
{
mask.foreach_index_optimized<int64_t>([&](const int64_t i) { new (dst + i) T(data_[i]); });
}
void materialize_compressed(const IndexMask &mask, T *dst) const override
{
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i, const int64_t pos) { dst[pos] = data_[i]; });
}
void materialize_compressed_to_uninitialized(const IndexMask &mask, T *dst) const override
{
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i, const int64_t pos) { new (dst + pos) T(data_[i]); });
if constexpr (std::is_trivially_copyable_v<T>) {
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i, const int64_t pos) { dst[pos] = data_[i]; });
}
else {
if (dst_is_uninitialized) {
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i, const int64_t pos) { new (dst + pos) T(data_[i]); });
}
else {
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i, const int64_t pos) { dst[pos] = data_[i]; });
}
}
}
};
@@ -314,24 +333,36 @@ template<typename T> class VArrayImpl_For_Single final : public VArrayImpl<T> {
return CommonVArrayInfo(CommonVArrayInfo::Type::Single, true, &value_);
}
void materialize(const IndexMask &mask, T *dst) const override
void materialize(const IndexMask &mask, T *dst, const bool dst_is_uninitialized) const override
{
mask.foreach_index([&](const int64_t i) { dst[i] = value_; });
if constexpr (std::is_trivially_copyable_v<T>) {
mask.foreach_index([&](const int64_t i) { dst[i] = value_; });
}
else {
if (dst_is_uninitialized) {
mask.foreach_index([&](const int64_t i) { new (dst + i) T(value_); });
}
else {
mask.foreach_index([&](const int64_t i) { dst[i] = value_; });
}
}
}
void materialize_to_uninitialized(const IndexMask &mask, T *dst) const override
void materialize_compressed(const IndexMask &mask,
T *dst,
const bool dst_is_uninitialized) const override
{
mask.foreach_index([&](const int64_t i) { new (dst + i) T(value_); });
}
void materialize_compressed(const IndexMask &mask, T *dst) const override
{
initialized_fill_n(dst, mask.size(), value_);
}
void materialize_compressed_to_uninitialized(const IndexMask &mask, T *dst) const override
{
uninitialized_fill_n(dst, mask.size(), value_);
if constexpr (std::is_trivially_copyable_v<T>) {
initialized_fill_n(dst, mask.size(), value_);
}
else {
if (dst_is_uninitialized) {
uninitialized_fill_n(dst, mask.size(), value_);
}
else {
initialized_fill_n(dst, mask.size(), value_);
}
}
}
};
@@ -358,25 +389,37 @@ template<typename T, typename GetFunc> class VArrayImpl_For_Func final : public
return get_func_(index);
}
void materialize(const IndexMask &mask, T *dst) const override
void materialize(const IndexMask &mask, T *dst, const bool dst_is_uninitialized) const override
{
mask.foreach_index([&](const int64_t i) { dst[i] = get_func_(i); });
if constexpr (std::is_trivially_copyable_v<T>) {
mask.foreach_index([&](const int64_t i) { dst[i] = get_func_(i); });
}
else {
if (dst_is_uninitialized) {
mask.foreach_index([&](const int64_t i) { new (dst + i) T(get_func_(i)); });
}
else {
mask.foreach_index([&](const int64_t i) { dst[i] = get_func_(i); });
}
}
}
void materialize_to_uninitialized(const IndexMask &mask, T *dst) const override
void materialize_compressed(const IndexMask &mask,
T *dst,
const bool dst_is_uninitialized) const override
{
mask.foreach_index([&](const int64_t i) { new (dst + i) T(get_func_(i)); });
}
void materialize_compressed(const IndexMask &mask, T *dst) const override
{
mask.foreach_index([&](const int64_t i, const int64_t pos) { dst[pos] = get_func_(i); });
}
void materialize_compressed_to_uninitialized(const IndexMask &mask, T *dst) const override
{
mask.foreach_index(
[&](const int64_t i, const int64_t pos) { new (dst + pos) T(get_func_(i)); });
if constexpr (std::is_trivially_copyable_v<T>) {
mask.foreach_index([&](const int64_t i, const int64_t pos) { dst[pos] = get_func_(i); });
}
else {
if (dst_is_uninitialized) {
mask.foreach_index(
[&](const int64_t i, const int64_t pos) { new (dst + pos) T(get_func_(i)); });
}
else {
mask.foreach_index([&](const int64_t i, const int64_t pos) { dst[pos] = get_func_(i); });
}
}
}
};
@@ -414,27 +457,43 @@ class VArrayImpl_For_DerivedSpan final : public VMutableArrayImpl<ElemT> {
SetFunc(data_[index], std::move(value));
}
void materialize(const IndexMask &mask, ElemT *dst) const override
void materialize(const IndexMask &mask,
ElemT *dst,
const bool dst_is_uninitialized) const override
{
mask.foreach_index_optimized<int64_t>([&](const int64_t i) { dst[i] = GetFunc(data_[i]); });
if constexpr (std::is_trivially_copyable_v<ElemT>) {
mask.foreach_index_optimized<int64_t>([&](const int64_t i) { dst[i] = GetFunc(data_[i]); });
}
else {
if (dst_is_uninitialized) {
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i) { new (dst + i) ElemT(GetFunc(data_[i])); });
}
else {
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i) { dst[i] = GetFunc(data_[i]); });
}
}
}
void materialize_to_uninitialized(const IndexMask &mask, ElemT *dst) const override
void materialize_compressed(const IndexMask &mask,
ElemT *dst,
const bool dst_is_uninitialized) const override
{
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i) { new (dst + i) ElemT(GetFunc(data_[i])); });
}
void materialize_compressed(const IndexMask &mask, ElemT *dst) const override
{
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i, const int64_t pos) { dst[pos] = GetFunc(data_[i]); });
}
void materialize_compressed_to_uninitialized(const IndexMask &mask, ElemT *dst) const override
{
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i, const int64_t pos) { new (dst + pos) ElemT(GetFunc(data_[i])); });
if constexpr (std::is_trivially_copyable_v<ElemT>) {
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i, const int64_t pos) { dst[pos] = GetFunc(data_[i]); });
}
else {
if (dst_is_uninitialized) {
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i, const int64_t pos) { new (dst + pos) ElemT(GetFunc(data_[i])); });
}
else {
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i, const int64_t pos) { dst[pos] = GetFunc(data_[i]); });
}
}
}
};
@@ -738,7 +797,7 @@ template<typename T> class VArrayCommon {
void materialize(const IndexMask &mask, MutableSpan<T> r_span) const
{
BLI_assert(mask.min_array_size() <= this->size());
impl_->materialize(mask, r_span.data());
impl_->materialize(mask, r_span.data(), false);
}
void materialize_to_uninitialized(MutableSpan<T> r_span) const
@@ -749,18 +808,18 @@ template<typename T> class VArrayCommon {
void materialize_to_uninitialized(const IndexMask &mask, MutableSpan<T> r_span) const
{
BLI_assert(mask.min_array_size() <= this->size());
impl_->materialize_to_uninitialized(mask, r_span.data());
impl_->materialize(mask, r_span.data(), true);
}
/** Copy some elements of the virtual array into a span. */
void materialize_compressed(const IndexMask &mask, MutableSpan<T> r_span) const
{
impl_->materialize_compressed(mask, r_span.data());
impl_->materialize_compressed(mask, r_span.data(), false);
}
void materialize_compressed_to_uninitialized(const IndexMask &mask, MutableSpan<T> r_span) const
{
impl_->materialize_compressed_to_uninitialized(mask, r_span.data());
impl_->materialize_compressed(mask, r_span.data(), true);
}
/** See #GVArrayImpl::try_assign_GVArray. */

View File

@@ -16,36 +16,40 @@ namespace blender {
/** \name #GVArrayImpl
* \{ */
void GVArrayImpl::materialize(const IndexMask &mask, void *dst) const
void GVArrayImpl::materialize(const IndexMask &mask,
void *dst,
const bool dst_is_uninitialized) const
{
mask.foreach_index_optimized<int64_t>([&](const int64_t i) {
void *elem_dst = POINTER_OFFSET(dst, type_->size * i);
this->get(i, elem_dst);
});
if (dst_is_uninitialized) {
mask.foreach_index_optimized<int64_t>([&](const int64_t i) {
void *elem_dst = POINTER_OFFSET(dst, type_->size * i);
this->get_to_uninitialized(i, elem_dst);
});
}
else {
mask.foreach_index_optimized<int64_t>([&](const int64_t i) {
void *elem_dst = POINTER_OFFSET(dst, type_->size * i);
this->get(i, elem_dst);
});
}
}
void GVArrayImpl::materialize_to_uninitialized(const IndexMask &mask, void *dst) const
void GVArrayImpl::materialize_compressed(const IndexMask &mask,
void *dst,
const bool dst_is_uninitialized) const
{
mask.foreach_index_optimized<int64_t>([&](const int64_t i) {
void *elem_dst = POINTER_OFFSET(dst, type_->size * i);
this->get_to_uninitialized(i, elem_dst);
});
}
void GVArrayImpl::materialize_compressed(const IndexMask &mask, void *dst) const
{
mask.foreach_index_optimized<int64_t>([&](const int64_t i, const int64_t pos) {
void *elem_dst = POINTER_OFFSET(dst, type_->size * pos);
this->get(i, elem_dst);
});
}
void GVArrayImpl::materialize_compressed_to_uninitialized(const IndexMask &mask, void *dst) const
{
mask.foreach_index_optimized<int64_t>([&](const int64_t i, const int64_t pos) {
void *elem_dst = POINTER_OFFSET(dst, type_->size * pos);
this->get_to_uninitialized(i, elem_dst);
});
if (dst_is_uninitialized) {
mask.foreach_index_optimized<int64_t>([&](const int64_t i, const int64_t pos) {
void *elem_dst = POINTER_OFFSET(dst, type_->size * pos);
this->get_to_uninitialized(i, elem_dst);
});
}
else {
mask.foreach_index_optimized<int64_t>([&](const int64_t i, const int64_t pos) {
void *elem_dst = POINTER_OFFSET(dst, type_->size * pos);
this->get(i, elem_dst);
});
}
}
void GVArrayImpl::get(const int64_t index, void *r_value) const
@@ -151,25 +155,28 @@ CommonVArrayInfo GVArrayImpl_For_GSpan::common_info() const
return CommonVArrayInfo{CommonVArrayInfo::Type::Span, true, data_};
}
void GVArrayImpl_For_GSpan::materialize(const IndexMask &mask, void *dst) const
void GVArrayImpl_For_GSpan::materialize(const IndexMask &mask,
void *dst,
const bool dst_is_uninitialized) const
{
type_->copy_assign_indices(data_, dst, mask);
if (dst_is_uninitialized) {
type_->copy_construct_indices(data_, dst, mask);
}
else {
type_->copy_assign_indices(data_, dst, mask);
}
}
void GVArrayImpl_For_GSpan::materialize_to_uninitialized(const IndexMask &mask, void *dst) const
void GVArrayImpl_For_GSpan::materialize_compressed(const IndexMask &mask,
void *dst,
const bool dst_is_uninitialized) const
{
type_->copy_construct_indices(data_, dst, mask);
}
void GVArrayImpl_For_GSpan::materialize_compressed(const IndexMask &mask, void *dst) const
{
type_->copy_assign_compressed(data_, dst, mask);
}
void GVArrayImpl_For_GSpan::materialize_compressed_to_uninitialized(const IndexMask &mask,
void *dst) const
{
type_->copy_construct_compressed(data_, dst, mask);
if (dst_is_uninitialized) {
type_->copy_construct_compressed(data_, dst, mask);
}
else {
type_->copy_assign_compressed(data_, dst, mask);
}
}
/** \} */
@@ -195,26 +202,28 @@ CommonVArrayInfo GVArrayImpl_For_SingleValueRef::common_info() const
return CommonVArrayInfo{CommonVArrayInfo::Type::Single, true, value_};
}
void GVArrayImpl_For_SingleValueRef::materialize(const IndexMask &mask, void *dst) const
void GVArrayImpl_For_SingleValueRef::materialize(const IndexMask &mask,
void *dst,
const bool dst_is_uninitialized) const
{
type_->fill_assign_indices(value_, dst, mask);
if (dst_is_uninitialized) {
type_->fill_construct_indices(value_, dst, mask);
}
else {
type_->fill_assign_indices(value_, dst, mask);
}
}
void GVArrayImpl_For_SingleValueRef::materialize_to_uninitialized(const IndexMask &mask,
void *dst) const
void GVArrayImpl_For_SingleValueRef::materialize_compressed(const IndexMask &mask,
void *dst,
const bool dst_is_uninitialized) const
{
type_->fill_construct_indices(value_, dst, mask);
}
void GVArrayImpl_For_SingleValueRef::materialize_compressed(const IndexMask &mask, void *dst) const
{
type_->fill_assign_n(value_, dst, mask.size());
}
void GVArrayImpl_For_SingleValueRef::materialize_compressed_to_uninitialized(const IndexMask &mask,
void *dst) const
{
type_->fill_construct_n(value_, dst, mask.size());
if (dst_is_uninitialized) {
type_->fill_construct_n(value_, dst, mask.size());
}
else {
type_->fill_assign_n(value_, dst, mask.size());
}
}
/** \} */
@@ -278,19 +287,16 @@ template<int BufferSize> class GVArrayImpl_For_SmallTrivialSingleValue : public
memcpy(r_value, &buffer_, type_->size);
}
void materialize(const IndexMask &mask, void *dst) const final
{
this->materialize_to_uninitialized(mask, dst);
}
void materialize_to_uninitialized(const IndexMask &mask, void *dst) const final
void materialize(const IndexMask &mask,
void *dst,
const bool /*dst_is_uninitialized*/) const final
{
type_->fill_construct_indices(buffer_, dst, mask);
}
void materialize_compressed(const IndexMask &mask, void *dst) const final
{
this->materialize_compressed_to_uninitialized(mask, dst);
}
void materialize_compressed_to_uninitialized(const IndexMask &mask, void *dst) const final
void materialize_compressed(const IndexMask &mask,
void *dst,
const bool /*dst_is_uninitialized*/) const final
{
type_->fill_construct_n(buffer_, dst, mask.size());
}
@@ -515,31 +521,21 @@ class GVArrayImpl_For_SlicedGVArray : public GVArrayImpl {
return {};
}
void materialize(const IndexMask &mask, void *dst) const final
void materialize(const IndexMask &mask, void *dst, const bool dst_is_uninitialized) const final
{
IndexMaskMemory memory;
const IndexMask shifted_mask = mask.shift(offset_, memory);
void *shifted_dst = POINTER_OFFSET(dst, -offset_ * type_->size);
varray_.materialize(shifted_mask, shifted_dst);
varray_.get_implementation()->materialize(shifted_mask, shifted_dst, dst_is_uninitialized);
}
void materialize_to_uninitialized(const IndexMask &mask, void *dst) const final
void materialize_compressed(const IndexMask &mask,
void *dst,
const bool dst_is_uninitialized) const final
{
IndexMaskMemory memory;
const IndexMask shifted_mask = mask.shift(offset_, memory);
void *shifted_dst = POINTER_OFFSET(dst, -offset_ * type_->size);
varray_.materialize_to_uninitialized(shifted_mask, shifted_dst);
}
void materialize_compressed(const IndexMask &mask, void *dst) const final
{
IndexMaskMemory memory;
const IndexMask shifted_mask = mask.shift(offset_, memory);
varray_.materialize_compressed(shifted_mask, dst);
}
void materialize_compressed_to_uninitialized(const IndexMask &mask, void *dst) const override
{
IndexMaskMemory memory;
const IndexMask shifted_mask = mask.shift(offset_, memory);
varray_.materialize_compressed_to_uninitialized(shifted_mask, dst);
varray_.get_implementation()->materialize_compressed(shifted_mask, dst, dst_is_uninitialized);
}
};
@@ -582,7 +578,7 @@ void GVArrayCommon::materialize(void *dst) const
void GVArrayCommon::materialize(const IndexMask &mask, void *dst) const
{
impl_->materialize(mask, dst);
impl_->materialize(mask, dst, false);
}
void GVArrayCommon::materialize_to_uninitialized(void *dst) const
@@ -593,17 +589,17 @@ void GVArrayCommon::materialize_to_uninitialized(void *dst) const
void GVArrayCommon::materialize_to_uninitialized(const IndexMask &mask, void *dst) const
{
BLI_assert(mask.min_array_size() <= impl_->size());
impl_->materialize_to_uninitialized(mask, dst);
impl_->materialize(mask, dst, true);
}
void GVArrayCommon::materialize_compressed(const IndexMask &mask, void *dst) const
{
impl_->materialize_compressed(mask, dst);
impl_->materialize_compressed(mask, dst, false);
}
void GVArrayCommon::materialize_compressed_to_uninitialized(const IndexMask &mask, void *dst) const
{
impl_->materialize_compressed_to_uninitialized(mask, dst);
impl_->materialize_compressed(mask, dst, true);
}
void GVArrayCommon::copy_from(const GVArrayCommon &other)

View File

@@ -301,7 +301,7 @@ inline void execute_materialized(TypeSequence<ParamTags...> /*param_tags*/,
}
/* As a fallback, do a virtual function call to retrieve all elements in the current
* chunk. The elements are stored in a temporary buffer reused for every chunk. */
varray_impl.materialize_compressed_to_uninitialized(*current_segment_mask, tmp_buffer);
varray_impl.materialize_compressed(*current_segment_mask, tmp_buffer, true);
/* Remember that this parameter has been materialized, so that the values are
* destructed properly when the chunk is done. */
arg_info.mode = MaterializeArgMode::Materialized;