Refactor: BLI: Make some CPPType properties public instead of using methods
This makes accessing these properties more convenient. Since we only ever have const references to `CPPType`, there isn't really a benefit to using methods to avoid mutation. Pull Request: https://projects.blender.org/blender/blender/pulls/137482
This commit is contained in:
@@ -178,7 +178,7 @@ void VolumeGridBakeItem::count_memory(MemoryCounter &memory) const
|
||||
|
||||
PrimitiveBakeItem::PrimitiveBakeItem(const CPPType &type, const void *value) : type_(type)
|
||||
{
|
||||
value_ = MEM_mallocN_aligned(type.size(), type.alignment(), __func__);
|
||||
value_ = MEM_mallocN_aligned(type.size, type.alignment, __func__);
|
||||
type.copy_construct(value, value_);
|
||||
}
|
||||
|
||||
|
||||
@@ -416,8 +416,8 @@ static std::shared_ptr<DictionaryValue> write_blob_simple_gspan(BlobWriter &blob
|
||||
const GSpan data)
|
||||
{
|
||||
const CPPType &type = data.type();
|
||||
BLI_assert(type.is_trivial());
|
||||
if (type.size() == 1 || type.is<ColorGeometry4b>()) {
|
||||
BLI_assert(type.is_trivial);
|
||||
if (type.size == 1 || type.is<ColorGeometry4b>()) {
|
||||
return write_blob_raw_bytes(blob_writer, blob_sharing, data.data(), data.size_in_bytes());
|
||||
}
|
||||
return write_blob_raw_data_with_endian(
|
||||
@@ -429,13 +429,13 @@ static std::shared_ptr<DictionaryValue> write_blob_simple_gspan(BlobWriter &blob
|
||||
GMutableSpan r_data)
|
||||
{
|
||||
const CPPType &type = r_data.type();
|
||||
BLI_assert(type.is_trivial());
|
||||
if (type.size() == 1 || type.is<ColorGeometry4b>()) {
|
||||
BLI_assert(type.is_trivial);
|
||||
if (type.size == 1 || type.is<ColorGeometry4b>()) {
|
||||
return read_blob_raw_bytes(blob_reader, io_data, r_data.size_in_bytes(), r_data.data());
|
||||
}
|
||||
if (type.is_any<int16_t, uint16_t, int32_t, uint32_t, int64_t, uint64_t, float>()) {
|
||||
return read_blob_raw_data_with_endian(
|
||||
blob_reader, io_data, type.size(), r_data.size(), r_data.data());
|
||||
blob_reader, io_data, type.size, r_data.size(), r_data.data());
|
||||
}
|
||||
if (type.is_any<float2, int2>()) {
|
||||
return read_blob_raw_data_with_endian(
|
||||
@@ -485,7 +485,7 @@ static std::shared_ptr<DictionaryValue> write_blob_shared_simple_gspan(
|
||||
const char *func = __func__;
|
||||
const std::optional<ImplicitSharingInfoAndData> sharing_info_and_data = blob_sharing.read_shared(
|
||||
io_data, [&]() -> std::optional<ImplicitSharingInfoAndData> {
|
||||
void *data_mem = MEM_mallocN_aligned(size * cpp_type.size(), cpp_type.alignment(), func);
|
||||
void *data_mem = MEM_mallocN_aligned(size * cpp_type.size, cpp_type.alignment, func);
|
||||
if (!read_blob_simple_gspan(blob_reader, io_data, {cpp_type, data_mem, size})) {
|
||||
MEM_freeN(data_mem);
|
||||
return std::nullopt;
|
||||
|
||||
@@ -402,20 +402,20 @@ static GSpan evaluate_attribute(const GVArray &src,
|
||||
if (src.is_span()) {
|
||||
return src.get_internal_span();
|
||||
}
|
||||
buffer.reinitialize(curves.points_num() * src.type().size());
|
||||
buffer.reinitialize(curves.points_num() * src.type().size);
|
||||
src.materialize(buffer.data());
|
||||
GMutableSpan eval{src.type(), buffer.data(), curves.points_num()};
|
||||
return eval;
|
||||
}
|
||||
|
||||
if (src.is_span()) {
|
||||
buffer.reinitialize(curves.evaluated_points_num() * src.type().size());
|
||||
buffer.reinitialize(curves.evaluated_points_num() * src.type().size);
|
||||
GMutableSpan eval{src.type(), buffer.data(), curves.evaluated_points_num()};
|
||||
curves.interpolate_to_evaluated(src.get_internal_span(), eval);
|
||||
return eval;
|
||||
}
|
||||
GVArraySpan src_buffer(src);
|
||||
buffer.reinitialize(curves.evaluated_points_num() * src.type().size());
|
||||
buffer.reinitialize(curves.evaluated_points_num() * src.type().size);
|
||||
GMutableSpan eval{src.type(), buffer.data(), curves.evaluated_points_num()};
|
||||
curves.interpolate_to_evaluated(src_buffer, eval);
|
||||
return eval;
|
||||
|
||||
@@ -866,7 +866,7 @@ bool try_capture_fields_on_geometry(MutableAttributeAccessor attributes,
|
||||
|
||||
/* Could avoid allocating a new buffer if:
|
||||
* - The field does not depend on that attribute (we can't easily check for that yet). */
|
||||
void *buffer = MEM_mallocN_aligned(type.size() * domain_size, type.alignment(), __func__);
|
||||
void *buffer = MEM_mallocN_aligned(type.size * domain_size, type.alignment, __func__);
|
||||
if (!selection_is_full) {
|
||||
const GAttributeReader old_attribute = attributes.lookup_or_default(id, domain, data_type);
|
||||
old_attribute.varray.materialize(buffer);
|
||||
|
||||
@@ -82,7 +82,7 @@ void Bundle::add_new(SocketInterfaceKey key, const bNodeSocketType &type, const
|
||||
BLI_assert(!this->contains(key));
|
||||
BLI_assert(type.geometry_nodes_cpp_type);
|
||||
const CPPType &cpp_type = *type.geometry_nodes_cpp_type;
|
||||
void *buffer = MEM_mallocN_aligned(cpp_type.size(), cpp_type.alignment(), __func__);
|
||||
void *buffer = MEM_mallocN_aligned(cpp_type.size, cpp_type.alignment, __func__);
|
||||
cpp_type.copy_construct(value, buffer);
|
||||
items_.append(StoredItem{std::move(key), &type, buffer});
|
||||
buffers_.append(buffer);
|
||||
|
||||
@@ -98,13 +98,61 @@ ENUM_OPERATORS(CPPTypeFlags, CPPTypeFlags::EqualityComparable)
|
||||
namespace blender {
|
||||
|
||||
class CPPType : NonCopyable, NonMovable {
|
||||
public:
|
||||
/**
|
||||
* Required memory in bytes for an instance of this type.
|
||||
*
|
||||
* C++ equivalent:
|
||||
* `sizeof(T);`
|
||||
*/
|
||||
int64_t size = 0;
|
||||
|
||||
/**
|
||||
* Required memory alignment for an instance of this type.
|
||||
*
|
||||
* C++ equivalent:
|
||||
* alignof(T);
|
||||
*/
|
||||
int64_t alignment = 0;
|
||||
|
||||
/**
|
||||
* When true, the value is like a normal C type, it can be copied around with #memcpy and does
|
||||
* not have to be destructed.
|
||||
*
|
||||
* C++ equivalent:
|
||||
* std::is_trivial_v<T>;
|
||||
*/
|
||||
bool is_trivial = false;
|
||||
|
||||
/**
|
||||
* When true, the destructor does not have to be called on this type. This can sometimes be used
|
||||
* for optimization purposes.
|
||||
*
|
||||
* C++ equivalent:
|
||||
* std::is_trivially_destructible_v<T>;
|
||||
*/
|
||||
bool is_trivially_destructible = false;
|
||||
|
||||
/**
|
||||
* Returns true, when the type has the following functions:
|
||||
* - Default constructor.
|
||||
* - Copy constructor.
|
||||
* - Move constructor.
|
||||
* - Copy assignment operator.
|
||||
* - Move assignment operator.
|
||||
* - Destructor.
|
||||
*/
|
||||
bool has_special_member_functions = false;
|
||||
|
||||
bool is_default_constructible = false;
|
||||
bool is_copy_constructible = false;
|
||||
bool is_move_constructible = false;
|
||||
bool is_destructible = false;
|
||||
bool is_copy_assignable = false;
|
||||
bool is_move_assignable = false;
|
||||
|
||||
private:
|
||||
int64_t size_ = 0;
|
||||
int64_t alignment_ = 0;
|
||||
uintptr_t alignment_mask_ = 0;
|
||||
bool is_trivial_ = false;
|
||||
bool is_trivially_destructible_ = false;
|
||||
bool has_special_member_functions_ = false;
|
||||
|
||||
void (*default_construct_)(void *ptr) = nullptr;
|
||||
void (*default_construct_indices_)(void *ptr, const IndexMask &mask) = nullptr;
|
||||
@@ -165,60 +213,10 @@ class CPPType : NonCopyable, NonMovable {
|
||||
*/
|
||||
StringRefNull name() const;
|
||||
|
||||
/**
|
||||
* Required memory in bytes for an instance of this type.
|
||||
*
|
||||
* C++ equivalent:
|
||||
* `sizeof(T);`
|
||||
*/
|
||||
int64_t size() const;
|
||||
|
||||
/**
|
||||
* Required memory alignment for an instance of this type.
|
||||
*
|
||||
* C++ equivalent:
|
||||
* alignof(T);
|
||||
*/
|
||||
int64_t alignment() const;
|
||||
|
||||
/**
|
||||
* When true, the destructor does not have to be called on this type. This can sometimes be used
|
||||
* for optimization purposes.
|
||||
*
|
||||
* C++ equivalent:
|
||||
* std::is_trivially_destructible_v<T>;
|
||||
*/
|
||||
bool is_trivially_destructible() const;
|
||||
|
||||
/**
|
||||
* When true, the value is like a normal C type, it can be copied around with #memcpy and does
|
||||
* not have to be destructed.
|
||||
*
|
||||
* C++ equivalent:
|
||||
* std::is_trivial_v<T>;
|
||||
*/
|
||||
bool is_trivial() const;
|
||||
bool is_default_constructible() const;
|
||||
bool is_copy_constructible() const;
|
||||
bool is_move_constructible() const;
|
||||
bool is_destructible() const;
|
||||
bool is_copy_assignable() const;
|
||||
bool is_move_assignable() const;
|
||||
bool is_printable() const;
|
||||
bool is_equality_comparable() const;
|
||||
bool is_hashable() const;
|
||||
|
||||
/**
|
||||
* Returns true, when the type has the following functions:
|
||||
* - Default constructor.
|
||||
* - Copy constructor.
|
||||
* - Move constructor.
|
||||
* - Copy assignment operator.
|
||||
* - Move assignment operator.
|
||||
* - Destructor.
|
||||
*/
|
||||
bool has_special_member_functions() const;
|
||||
|
||||
/**
|
||||
* Returns true, when the given pointer fulfills the alignment requirement of this type.
|
||||
*/
|
||||
@@ -432,8 +430,8 @@ void register_cpp_types();
|
||||
|
||||
/* Utility for allocating an uninitialized buffer for a single value of the given #CPPType. */
|
||||
#define BUFFER_FOR_CPP_TYPE_VALUE(type, variable_name) \
|
||||
blender::DynamicStackBuffer<64, 64> stack_buffer_for_##variable_name((type).size(), \
|
||||
(type).alignment()); \
|
||||
blender::DynamicStackBuffer<64, 64> stack_buffer_for_##variable_name((type).size, \
|
||||
(type).alignment); \
|
||||
void *variable_name = stack_buffer_for_##variable_name.buffer();
|
||||
|
||||
namespace blender {
|
||||
@@ -464,56 +462,6 @@ inline StringRefNull CPPType::name() const
|
||||
return debug_name_;
|
||||
}
|
||||
|
||||
inline int64_t CPPType::size() const
|
||||
{
|
||||
return size_;
|
||||
}
|
||||
|
||||
inline int64_t CPPType::alignment() const
|
||||
{
|
||||
return alignment_;
|
||||
}
|
||||
|
||||
inline bool CPPType::is_trivially_destructible() const
|
||||
{
|
||||
return is_trivially_destructible_;
|
||||
}
|
||||
|
||||
inline bool CPPType::is_trivial() const
|
||||
{
|
||||
return is_trivial_;
|
||||
}
|
||||
|
||||
inline bool CPPType::is_default_constructible() const
|
||||
{
|
||||
return default_construct_ != nullptr;
|
||||
}
|
||||
|
||||
inline bool CPPType::is_copy_constructible() const
|
||||
{
|
||||
return copy_assign_ != nullptr;
|
||||
}
|
||||
|
||||
inline bool CPPType::is_move_constructible() const
|
||||
{
|
||||
return move_assign_ != nullptr;
|
||||
}
|
||||
|
||||
inline bool CPPType::is_destructible() const
|
||||
{
|
||||
return destruct_ != nullptr;
|
||||
}
|
||||
|
||||
inline bool CPPType::is_copy_assignable() const
|
||||
{
|
||||
return copy_assign_ != nullptr;
|
||||
}
|
||||
|
||||
inline bool CPPType::is_move_assignable() const
|
||||
{
|
||||
return copy_construct_ != nullptr;
|
||||
}
|
||||
|
||||
inline bool CPPType::is_printable() const
|
||||
{
|
||||
return print_ != nullptr;
|
||||
@@ -529,11 +477,6 @@ inline bool CPPType::is_hashable() const
|
||||
return hash_ != nullptr;
|
||||
}
|
||||
|
||||
inline bool CPPType::has_special_member_functions() const
|
||||
{
|
||||
return has_special_member_functions_;
|
||||
}
|
||||
|
||||
inline bool CPPType::pointer_has_valid_alignment(const void *ptr) const
|
||||
{
|
||||
return (uintptr_t(ptr) & alignment_mask_) == 0;
|
||||
@@ -636,7 +579,7 @@ inline void CPPType::copy_assign_compressed(const void *src,
|
||||
|
||||
inline void CPPType::copy_construct(const void *src, void *dst) const
|
||||
{
|
||||
BLI_assert(src != dst || is_trivial_);
|
||||
BLI_assert(src != dst || this->is_trivial);
|
||||
BLI_assert(this->pointer_can_point_to_instance(src));
|
||||
BLI_assert(this->pointer_can_point_to_instance(dst));
|
||||
|
||||
@@ -694,7 +637,7 @@ inline void CPPType::move_assign_indices(void *src, void *dst, const IndexMask &
|
||||
|
||||
inline void CPPType::move_construct(void *src, void *dst) const
|
||||
{
|
||||
BLI_assert(src != dst || is_trivial_);
|
||||
BLI_assert(src != dst || this->is_trivial);
|
||||
BLI_assert(this->pointer_can_point_to_instance(src));
|
||||
BLI_assert(this->pointer_can_point_to_instance(dst));
|
||||
|
||||
@@ -717,7 +660,7 @@ inline void CPPType::move_construct_indices(void *src, void *dst, const IndexMas
|
||||
|
||||
inline void CPPType::relocate_assign(void *src, void *dst) const
|
||||
{
|
||||
BLI_assert(src != dst || is_trivial_);
|
||||
BLI_assert(src != dst || this->is_trivial);
|
||||
BLI_assert(this->pointer_can_point_to_instance(src));
|
||||
BLI_assert(this->pointer_can_point_to_instance(dst));
|
||||
|
||||
@@ -740,7 +683,7 @@ inline void CPPType::relocate_assign_indices(void *src, void *dst, const IndexMa
|
||||
|
||||
inline void CPPType::relocate_construct(void *src, void *dst) const
|
||||
{
|
||||
BLI_assert(src != dst || is_trivial_);
|
||||
BLI_assert(src != dst || this->is_trivial);
|
||||
BLI_assert(this->pointer_can_point_to_instance(src));
|
||||
BLI_assert(this->pointer_can_point_to_instance(dst));
|
||||
|
||||
@@ -792,7 +735,7 @@ inline void CPPType::fill_construct_indices(const void *value,
|
||||
inline bool CPPType::can_exist_in_buffer(const int64_t buffer_size,
|
||||
const int64_t buffer_alignment) const
|
||||
{
|
||||
return size_ <= buffer_size && alignment_ <= buffer_alignment;
|
||||
return this->size <= buffer_size && this->alignment <= buffer_alignment;
|
||||
}
|
||||
|
||||
inline void CPPType::print(const void *value, std::stringstream &ss) const
|
||||
|
||||
@@ -222,10 +222,10 @@ CPPType::CPPType(TypeTag<T> /*type*/,
|
||||
using namespace cpp_type_util;
|
||||
|
||||
debug_name_ = debug_name;
|
||||
size_ = int64_t(sizeof(T));
|
||||
alignment_ = int64_t(alignof(T));
|
||||
is_trivial_ = std::is_trivial_v<T>;
|
||||
is_trivially_destructible_ = std::is_trivially_destructible_v<T>;
|
||||
this->size = int64_t(sizeof(T));
|
||||
this->alignment = int64_t(alignof(T));
|
||||
this->is_trivial = std::is_trivial_v<T>;
|
||||
this->is_trivially_destructible = std::is_trivially_destructible_v<T>;
|
||||
if constexpr (std::is_default_constructible_v<T>) {
|
||||
default_construct_ = default_construct_cb<T>;
|
||||
default_construct_indices_ = default_construct_indices_cb<T>;
|
||||
@@ -324,9 +324,15 @@ CPPType::CPPType(TypeTag<T> /*type*/,
|
||||
is_equal_ = is_equal_cb<T>;
|
||||
}
|
||||
|
||||
alignment_mask_ = uintptr_t(alignment_) - uintptr_t(1);
|
||||
has_special_member_functions_ = (default_construct_ && copy_construct_ && copy_assign_ &&
|
||||
move_construct_ && move_assign_ && destruct_);
|
||||
alignment_mask_ = uintptr_t(this->alignment) - uintptr_t(1);
|
||||
this->has_special_member_functions = (default_construct_ && copy_construct_ && copy_assign_ &&
|
||||
move_construct_ && move_assign_ && destruct_);
|
||||
this->is_default_constructible = default_construct_ != nullptr;
|
||||
this->is_copy_constructible = copy_construct_ != nullptr;
|
||||
this->is_move_constructible = move_construct_ != nullptr;
|
||||
this->is_destructible = destruct_ != nullptr;
|
||||
this->is_copy_assignable = copy_assign_ != nullptr;
|
||||
this->is_move_assignable = move_assign_ != nullptr;
|
||||
}
|
||||
|
||||
} // namespace blender
|
||||
|
||||
@@ -158,13 +158,13 @@ class GArray {
|
||||
const void *operator[](int64_t index) const
|
||||
{
|
||||
BLI_assert(index < size_);
|
||||
return POINTER_OFFSET(data_, type_->size() * index);
|
||||
return POINTER_OFFSET(data_, type_->size * index);
|
||||
}
|
||||
|
||||
void *operator[](int64_t index)
|
||||
{
|
||||
BLI_assert(index < size_);
|
||||
return POINTER_OFFSET(data_, type_->size() * index);
|
||||
return POINTER_OFFSET(data_, type_->size * index);
|
||||
}
|
||||
|
||||
operator GSpan() const
|
||||
@@ -237,8 +237,8 @@ class GArray {
|
||||
private:
|
||||
void *allocate(int64_t size)
|
||||
{
|
||||
const int64_t item_size = type_->size();
|
||||
const int64_t alignment = type_->alignment();
|
||||
const int64_t item_size = type_->size;
|
||||
const int64_t alignment = type_->alignment;
|
||||
return allocator_.allocate(size_t(size) * item_size, alignment, AT);
|
||||
}
|
||||
|
||||
|
||||
@@ -69,7 +69,7 @@ class GSpan {
|
||||
|
||||
int64_t size_in_bytes() const
|
||||
{
|
||||
return type_->size() * size_;
|
||||
return type_->size * size_;
|
||||
}
|
||||
|
||||
const void *data() const
|
||||
@@ -80,7 +80,7 @@ class GSpan {
|
||||
const void *operator[](int64_t index) const
|
||||
{
|
||||
BLI_assert(index < size_);
|
||||
return POINTER_OFFSET(data_, type_->size() * index);
|
||||
return POINTER_OFFSET(data_, type_->size * index);
|
||||
}
|
||||
|
||||
template<typename T> Span<T> typed() const
|
||||
@@ -95,7 +95,7 @@ class GSpan {
|
||||
BLI_assert(start >= 0);
|
||||
BLI_assert(size >= 0);
|
||||
BLI_assert(start + size <= size_ || size == 0);
|
||||
return GSpan(type_, POINTER_OFFSET(data_, type_->size() * start), size);
|
||||
return GSpan(type_, POINTER_OFFSET(data_, type_->size * start), size);
|
||||
}
|
||||
|
||||
GSpan slice(const IndexRange range) const
|
||||
@@ -107,7 +107,7 @@ class GSpan {
|
||||
{
|
||||
BLI_assert(n >= 0);
|
||||
const int64_t new_size = std::max<int64_t>(0, size_ - n);
|
||||
return GSpan(*type_, POINTER_OFFSET(data_, type_->size() * n), new_size);
|
||||
return GSpan(*type_, POINTER_OFFSET(data_, type_->size * n), new_size);
|
||||
}
|
||||
|
||||
GSpan drop_back(const int64_t n) const
|
||||
@@ -128,7 +128,7 @@ class GSpan {
|
||||
{
|
||||
BLI_assert(n >= 0);
|
||||
const int64_t new_size = std::min<int64_t>(size_, n);
|
||||
return GSpan(*type_, POINTER_OFFSET(data_, type_->size() * (size_ - new_size)), new_size);
|
||||
return GSpan(*type_, POINTER_OFFSET(data_, type_->size * (size_ - new_size)), new_size);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -196,7 +196,7 @@ class GMutableSpan {
|
||||
|
||||
int64_t size_in_bytes() const
|
||||
{
|
||||
return type_->size() * size_;
|
||||
return type_->size * size_;
|
||||
}
|
||||
|
||||
void *data() const
|
||||
@@ -208,7 +208,7 @@ class GMutableSpan {
|
||||
{
|
||||
BLI_assert(index >= 0);
|
||||
BLI_assert(index < size_);
|
||||
return POINTER_OFFSET(data_, type_->size() * index);
|
||||
return POINTER_OFFSET(data_, type_->size * index);
|
||||
}
|
||||
|
||||
template<typename T> MutableSpan<T> typed() const
|
||||
@@ -223,7 +223,7 @@ class GMutableSpan {
|
||||
BLI_assert(start >= 0);
|
||||
BLI_assert(size >= 0);
|
||||
BLI_assert(start + size <= size_ || size == 0);
|
||||
return GMutableSpan(type_, POINTER_OFFSET(data_, type_->size() * start), size);
|
||||
return GMutableSpan(type_, POINTER_OFFSET(data_, type_->size * start), size);
|
||||
}
|
||||
|
||||
GMutableSpan slice(IndexRange range) const
|
||||
@@ -235,7 +235,7 @@ class GMutableSpan {
|
||||
{
|
||||
BLI_assert(n >= 0);
|
||||
const int64_t new_size = std::max<int64_t>(0, size_ - n);
|
||||
return GMutableSpan(*type_, POINTER_OFFSET(data_, type_->size() * n), new_size);
|
||||
return GMutableSpan(*type_, POINTER_OFFSET(data_, type_->size * n), new_size);
|
||||
}
|
||||
|
||||
GMutableSpan drop_back(const int64_t n) const
|
||||
@@ -256,8 +256,7 @@ class GMutableSpan {
|
||||
{
|
||||
BLI_assert(n >= 0);
|
||||
const int64_t new_size = std::min<int64_t>(size_, n);
|
||||
return GMutableSpan(
|
||||
*type_, POINTER_OFFSET(data_, type_->size() * (size_ - new_size)), new_size);
|
||||
return GMutableSpan(*type_, POINTER_OFFSET(data_, type_->size * (size_ - new_size)), new_size);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -48,7 +48,7 @@ template<typename Key> class GValueMap {
|
||||
template<typename ForwardKey> void add_new_by_move(ForwardKey &&key, GMutablePointer value)
|
||||
{
|
||||
const CPPType &type = *value.type();
|
||||
void *buffer = allocator_.allocate(type.size(), type.alignment());
|
||||
void *buffer = allocator_.allocate(type.size, type.alignment);
|
||||
type.move_construct(value.get(), buffer);
|
||||
values_.add_new_as(std::forward<ForwardKey>(key), GMutablePointer{type, buffer});
|
||||
}
|
||||
@@ -58,7 +58,7 @@ template<typename Key> class GValueMap {
|
||||
template<typename ForwardKey> void add_new_by_copy(ForwardKey &&key, GPointer value)
|
||||
{
|
||||
const CPPType &type = *value.type();
|
||||
void *buffer = allocator_.allocate(type.size(), type.alignment());
|
||||
void *buffer = allocator_.allocate(type.size, type.alignment);
|
||||
type.copy_construct(value.get(), buffer);
|
||||
values_.add_new_as(std::forward<ForwardKey>(key), GMutablePointer{type, buffer});
|
||||
}
|
||||
|
||||
@@ -574,13 +574,13 @@ class GVArrayImpl_For_GSpan : public GVMutableArrayImpl {
|
||||
GVArrayImpl_For_GSpan(const GMutableSpan span)
|
||||
: GVMutableArrayImpl(span.type(), span.size()),
|
||||
data_(span.data()),
|
||||
element_size_(span.type().size())
|
||||
element_size_(span.type().size)
|
||||
{
|
||||
}
|
||||
|
||||
protected:
|
||||
GVArrayImpl_For_GSpan(const CPPType &type, int64_t size)
|
||||
: GVMutableArrayImpl(type, size), element_size_(type.size())
|
||||
: GVMutableArrayImpl(type, size), element_size_(type.size)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -11,13 +11,13 @@
|
||||
namespace blender {
|
||||
|
||||
GVectorArray::GVectorArray(const CPPType &type, const int64_t array_size)
|
||||
: type_(type), element_size_(type.size()), items_(array_size)
|
||||
: type_(type), element_size_(type.size), items_(array_size)
|
||||
{
|
||||
}
|
||||
|
||||
GVectorArray::~GVectorArray()
|
||||
{
|
||||
if (type_.is_trivially_destructible()) {
|
||||
if (type_.is_trivially_destructible) {
|
||||
return;
|
||||
}
|
||||
for (Item &item : items_) {
|
||||
@@ -92,7 +92,7 @@ void GVectorArray::realloc_to_at_least(Item &item, int64_t min_capacity)
|
||||
{
|
||||
const int64_t new_capacity = std::max(min_capacity, item.length * 2);
|
||||
|
||||
void *new_buffer = allocator_.allocate(element_size_ * new_capacity, type_.alignment());
|
||||
void *new_buffer = allocator_.allocate(element_size_ * new_capacity, type_.alignment);
|
||||
type_.relocate_assign_n(item.start, new_buffer, item.length);
|
||||
|
||||
item.start = new_buffer;
|
||||
|
||||
@@ -19,7 +19,7 @@ namespace blender {
|
||||
void GVArrayImpl::materialize(const IndexMask &mask, void *dst) const
|
||||
{
|
||||
mask.foreach_index_optimized<int64_t>([&](const int64_t i) {
|
||||
void *elem_dst = POINTER_OFFSET(dst, type_->size() * i);
|
||||
void *elem_dst = POINTER_OFFSET(dst, type_->size * i);
|
||||
this->get(i, elem_dst);
|
||||
});
|
||||
}
|
||||
@@ -27,7 +27,7 @@ void GVArrayImpl::materialize(const IndexMask &mask, void *dst) const
|
||||
void GVArrayImpl::materialize_to_uninitialized(const IndexMask &mask, void *dst) const
|
||||
{
|
||||
mask.foreach_index_optimized<int64_t>([&](const int64_t i) {
|
||||
void *elem_dst = POINTER_OFFSET(dst, type_->size() * i);
|
||||
void *elem_dst = POINTER_OFFSET(dst, type_->size * i);
|
||||
this->get_to_uninitialized(i, elem_dst);
|
||||
});
|
||||
}
|
||||
@@ -35,7 +35,7 @@ void GVArrayImpl::materialize_to_uninitialized(const IndexMask &mask, void *dst)
|
||||
void GVArrayImpl::materialize_compressed(const IndexMask &mask, void *dst) const
|
||||
{
|
||||
mask.foreach_index_optimized<int64_t>([&](const int64_t i, const int64_t pos) {
|
||||
void *elem_dst = POINTER_OFFSET(dst, type_->size() * pos);
|
||||
void *elem_dst = POINTER_OFFSET(dst, type_->size * pos);
|
||||
this->get(i, elem_dst);
|
||||
});
|
||||
}
|
||||
@@ -43,7 +43,7 @@ void GVArrayImpl::materialize_compressed(const IndexMask &mask, void *dst) const
|
||||
void GVArrayImpl::materialize_compressed_to_uninitialized(const IndexMask &mask, void *dst) const
|
||||
{
|
||||
mask.foreach_index_optimized<int64_t>([&](const int64_t i, const int64_t pos) {
|
||||
void *elem_dst = POINTER_OFFSET(dst, type_->size() * pos);
|
||||
void *elem_dst = POINTER_OFFSET(dst, type_->size * pos);
|
||||
this->get_to_uninitialized(i, elem_dst);
|
||||
});
|
||||
}
|
||||
@@ -92,7 +92,7 @@ void GVMutableArrayImpl::set_all(const void *src)
|
||||
}
|
||||
else {
|
||||
for (int64_t i : IndexRange(size_)) {
|
||||
this->set_by_copy(i, POINTER_OFFSET(src, type_->size() * i));
|
||||
this->set_by_copy(i, POINTER_OFFSET(src, type_->size * i));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -231,7 +231,7 @@ class GVArrayImpl_For_SingleValue : public GVArrayImpl_For_SingleValueRef,
|
||||
GVArrayImpl_For_SingleValue(const CPPType &type, const int64_t size, const void *value)
|
||||
: GVArrayImpl_For_SingleValueRef(type, size)
|
||||
{
|
||||
value_ = MEM_mallocN_aligned(type.size(), type.alignment(), __func__);
|
||||
value_ = MEM_mallocN_aligned(type.size, type.alignment, __func__);
|
||||
type.copy_construct(value, (void *)value_);
|
||||
}
|
||||
|
||||
@@ -262,9 +262,9 @@ template<int BufferSize> class GVArrayImpl_For_SmallTrivialSingleValue : public
|
||||
const void *value)
|
||||
: GVArrayImpl(type, size)
|
||||
{
|
||||
BLI_assert(type.is_trivial());
|
||||
BLI_assert(type.alignment() <= 8);
|
||||
BLI_assert(type.size() <= BufferSize);
|
||||
BLI_assert(type.is_trivial);
|
||||
BLI_assert(type.alignment <= 8);
|
||||
BLI_assert(type.size <= BufferSize);
|
||||
type.copy_construct(value, &buffer_);
|
||||
}
|
||||
|
||||
@@ -275,7 +275,7 @@ template<int BufferSize> class GVArrayImpl_For_SmallTrivialSingleValue : public
|
||||
}
|
||||
void get_to_uninitialized(const int64_t /*index*/, void *r_value) const final
|
||||
{
|
||||
memcpy(r_value, &buffer_, type_->size());
|
||||
memcpy(r_value, &buffer_, type_->size);
|
||||
}
|
||||
|
||||
void materialize(const IndexMask &mask, void *dst) const final
|
||||
@@ -322,7 +322,7 @@ GVArraySpan::GVArraySpan(GVArray varray)
|
||||
data_ = info.data;
|
||||
}
|
||||
else {
|
||||
owned_data_ = MEM_mallocN_aligned(type_->size() * size_, type_->alignment(), __func__);
|
||||
owned_data_ = MEM_mallocN_aligned(type_->size * size_, type_->alignment, __func__);
|
||||
varray_.materialize_to_uninitialized(IndexRange(size_), owned_data_);
|
||||
data_ = owned_data_;
|
||||
}
|
||||
@@ -386,7 +386,7 @@ GMutableVArraySpan::GMutableVArraySpan(GVMutableArray varray, const bool copy_va
|
||||
data_ = const_cast<void *>(info.data);
|
||||
}
|
||||
else {
|
||||
owned_data_ = MEM_mallocN_aligned(type_->size() * size_, type_->alignment(), __func__);
|
||||
owned_data_ = MEM_mallocN_aligned(type_->size * size_, type_->alignment, __func__);
|
||||
if (copy_values_to_span) {
|
||||
varray_.materialize_to_uninitialized(IndexRange(size_), owned_data_);
|
||||
}
|
||||
@@ -505,7 +505,7 @@ class GVArrayImpl_For_SlicedGVArray : public GVArrayImpl {
|
||||
case CommonVArrayInfo::Type::Span: {
|
||||
return CommonVArrayInfo(CommonVArrayInfo::Type::Span,
|
||||
internal_info.may_have_ownership,
|
||||
POINTER_OFFSET(internal_info.data, type_->size() * offset_));
|
||||
POINTER_OFFSET(internal_info.data, type_->size * offset_));
|
||||
}
|
||||
case CommonVArrayInfo::Type::Single: {
|
||||
return internal_info;
|
||||
@@ -519,14 +519,14 @@ class GVArrayImpl_For_SlicedGVArray : public GVArrayImpl {
|
||||
{
|
||||
IndexMaskMemory memory;
|
||||
const IndexMask shifted_mask = mask.shift(offset_, memory);
|
||||
void *shifted_dst = POINTER_OFFSET(dst, -offset_ * type_->size());
|
||||
void *shifted_dst = POINTER_OFFSET(dst, -offset_ * type_->size);
|
||||
varray_.materialize(shifted_mask, shifted_dst);
|
||||
}
|
||||
void materialize_to_uninitialized(const IndexMask &mask, void *dst) const final
|
||||
{
|
||||
IndexMaskMemory memory;
|
||||
const IndexMask shifted_mask = mask.shift(offset_, memory);
|
||||
void *shifted_dst = POINTER_OFFSET(dst, -offset_ * type_->size());
|
||||
void *shifted_dst = POINTER_OFFSET(dst, -offset_ * type_->size);
|
||||
varray_.materialize_to_uninitialized(shifted_mask, shifted_dst);
|
||||
}
|
||||
void materialize_compressed(const IndexMask &mask, void *dst) const final
|
||||
@@ -687,7 +687,7 @@ GVArray::GVArray(std::shared_ptr<const GVArrayImpl> impl) : GVArrayCommon(std::m
|
||||
|
||||
GVArray::GVArray(varray_tag::single /*tag*/, const CPPType &type, int64_t size, const void *value)
|
||||
{
|
||||
if (type.is_trivial() && type.size() <= 16 && type.alignment() <= 8) {
|
||||
if (type.is_trivial && type.size <= 16 && type.alignment <= 8) {
|
||||
this->emplace<GVArrayImpl_For_SmallTrivialSingleValue<16>>(type, size, value);
|
||||
}
|
||||
else {
|
||||
|
||||
@@ -86,12 +86,12 @@ static const CPPType &CPPType_TestType = CPPType::get<TestType>();
|
||||
|
||||
TEST(cpp_type, Size)
|
||||
{
|
||||
EXPECT_EQ(CPPType_TestType.size(), sizeof(TestType));
|
||||
EXPECT_EQ(CPPType_TestType.size, sizeof(TestType));
|
||||
}
|
||||
|
||||
TEST(cpp_type, Alignment)
|
||||
{
|
||||
EXPECT_EQ(CPPType_TestType.alignment(), alignof(TestType));
|
||||
EXPECT_EQ(CPPType_TestType.alignment, alignof(TestType));
|
||||
}
|
||||
|
||||
TEST(cpp_type, Is)
|
||||
|
||||
@@ -706,8 +706,8 @@ void Result::allocate_data(int2 size, bool from_pool)
|
||||
storage_type_ = ResultStorageType::CPU;
|
||||
|
||||
const CPPType &cpp_type = this->get_cpp_type();
|
||||
const int64_t item_size = cpp_type.size();
|
||||
const int64_t alignment = cpp_type.alignment();
|
||||
const int64_t item_size = cpp_type.size;
|
||||
const int64_t alignment = cpp_type.alignment;
|
||||
const int64_t array_size = int64_t(size.x) * int64_t(size.y);
|
||||
const int64_t memory_size = array_size * item_size;
|
||||
|
||||
|
||||
@@ -809,7 +809,7 @@ static wmOperatorStatus curves_set_selection_domain_exec(bContext *C, wmOperator
|
||||
for (const StringRef selection_name : get_curves_selection_attribute_names(curves)) {
|
||||
if (const GVArray src = *attributes.lookup(selection_name, domain)) {
|
||||
const CPPType &type = src.type();
|
||||
void *dst = MEM_malloc_arrayN(attributes.domain_size(domain), type.size(), __func__);
|
||||
void *dst = MEM_malloc_arrayN(attributes.domain_size(domain), type.size, __func__);
|
||||
src.materialize(dst);
|
||||
|
||||
attributes.remove(selection_name);
|
||||
|
||||
@@ -494,7 +494,7 @@ bool convert_attribute(bke::MutableAttributeAccessor attributes,
|
||||
|
||||
const CPPType &cpp_type = varray.type();
|
||||
void *new_data = MEM_mallocN_aligned(
|
||||
varray.size() * cpp_type.size(), cpp_type.alignment(), __func__);
|
||||
varray.size() * cpp_type.size, cpp_type.alignment, __func__);
|
||||
varray.materialize_to_uninitialized(new_data);
|
||||
attributes.remove(name_copy);
|
||||
if (!attributes.add(name_copy, dst_domain, dst_type, bke::AttributeInitMoveArray(new_data))) {
|
||||
|
||||
@@ -889,7 +889,7 @@ bool ensure_selection_domain(ToolSettings *ts, Object *object)
|
||||
const GVArray src = *attributes.lookup(".selection", domain);
|
||||
if (src) {
|
||||
const CPPType &type = src.type();
|
||||
void *dst = MEM_malloc_arrayN(attributes.domain_size(domain), type.size(), __func__);
|
||||
void *dst = MEM_malloc_arrayN(attributes.domain_size(domain), type.size, __func__);
|
||||
src.materialize(dst);
|
||||
|
||||
attributes.remove(".selection");
|
||||
|
||||
@@ -391,9 +391,9 @@ Vector<GVArray> evaluate_fields(ResourceScope &scope,
|
||||
void *buffer;
|
||||
if (!dst_varray || !dst_varray.is_span()) {
|
||||
/* Allocate a new buffer for the computed result. */
|
||||
buffer = scope.allocator().allocate(type.size() * array_size, type.alignment());
|
||||
buffer = scope.allocator().allocate(type.size * array_size, type.alignment);
|
||||
|
||||
if (!type.is_trivially_destructible()) {
|
||||
if (!type.is_trivially_destructible) {
|
||||
/* Destruct values in the end. */
|
||||
scope.add_destruct_call(
|
||||
[buffer, mask, &type]() { type.destruct_indices(buffer, mask); });
|
||||
@@ -437,9 +437,9 @@ Vector<GVArray> evaluate_fields(ResourceScope &scope,
|
||||
const GFieldRef &field = constant_fields_to_evaluate[i];
|
||||
const CPPType &type = field.cpp_type();
|
||||
/* Allocate memory where the computed value will be stored in. */
|
||||
void *buffer = scope.allocator().allocate(type.size(), type.alignment());
|
||||
void *buffer = scope.allocator().allocate(type.size, type.alignment);
|
||||
|
||||
if (!type.is_trivially_destructible()) {
|
||||
if (!type.is_trivially_destructible) {
|
||||
/* Destruct value in the end. */
|
||||
scope.add_destruct_call([buffer, &type]() { type.destruct(buffer); });
|
||||
}
|
||||
@@ -700,7 +700,7 @@ FieldInput::~FieldInput() = default;
|
||||
FieldConstant::FieldConstant(const CPPType &type, const void *value)
|
||||
: FieldNode(FieldNodeType::Constant), type_(type)
|
||||
{
|
||||
value_ = MEM_mallocN_aligned(type.size(), type.alignment(), __func__);
|
||||
value_ = MEM_mallocN_aligned(type.size, type.alignment, __func__);
|
||||
type.copy_construct(value, value_);
|
||||
}
|
||||
|
||||
|
||||
@@ -658,7 +658,7 @@ class Executor {
|
||||
{
|
||||
const OutputSocket &socket = *self_.graph_inputs_[graph_input_index];
|
||||
const CPPType &type = socket.type();
|
||||
void *buffer = local_data.allocator->allocate(type.size(), type.alignment());
|
||||
void *buffer = local_data.allocator->allocate(type.size, type.alignment);
|
||||
type.move_construct(input_data, buffer);
|
||||
this->forward_value_to_linked_inputs(socket, {type, buffer}, current_task, local_data);
|
||||
}
|
||||
@@ -910,7 +910,7 @@ class Executor {
|
||||
self_.logger_->log_socket_value(input_socket, {type, default_value}, local_context);
|
||||
}
|
||||
BLI_assert(input_state.value == nullptr);
|
||||
input_state.value = allocator.allocate(type.size(), type.alignment());
|
||||
input_state.value = allocator.allocate(type.size, type.alignment);
|
||||
type.copy_construct(default_value, input_state.value);
|
||||
input_state.was_ready_for_execution = true;
|
||||
}
|
||||
@@ -1172,7 +1172,7 @@ class Executor {
|
||||
value_to_forward = {};
|
||||
}
|
||||
else {
|
||||
void *buffer = local_data.allocator->allocate(type.size(), type.alignment());
|
||||
void *buffer = local_data.allocator->allocate(type.size, type.alignment);
|
||||
type.copy_construct(value_to_forward.get(), buffer);
|
||||
this->forward_value_to_input(locked_node, input_state, {type, buffer}, current_task);
|
||||
}
|
||||
@@ -1367,7 +1367,7 @@ class GraphExecutorLFParams final : public Params {
|
||||
if (output_state.value == nullptr) {
|
||||
LinearAllocator<> &allocator = *this->get_local_data().allocator;
|
||||
const CPPType &type = node_.output(index).type();
|
||||
output_state.value = allocator.allocate(type.size(), type.alignment());
|
||||
output_state.value = allocator.allocate(type.size, type.alignment);
|
||||
}
|
||||
return output_state.value;
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ CustomMF_GenericConstant::CustomMF_GenericConstant(const CPPType &type,
|
||||
: type_(type), owns_value_(make_value_copy)
|
||||
{
|
||||
if (make_value_copy) {
|
||||
void *copied_value = MEM_mallocN_aligned(type.size(), type.alignment(), __func__);
|
||||
void *copied_value = MEM_mallocN_aligned(type.size, type.alignment, __func__);
|
||||
type.copy_construct(value, copied_value);
|
||||
value = copied_value;
|
||||
}
|
||||
|
||||
@@ -9,10 +9,10 @@ namespace blender::fn::multi_function {
|
||||
void ParamsBuilder::add_unused_output_for_unsupporting_function(const CPPType &type)
|
||||
{
|
||||
ResourceScope &scope = this->resource_scope();
|
||||
void *buffer = scope.allocator().allocate(type.size() * min_array_size_, type.alignment());
|
||||
void *buffer = scope.allocator().allocate(type.size * min_array_size_, type.alignment);
|
||||
const GMutableSpan span{type, buffer, min_array_size_};
|
||||
actual_params_.append_unchecked_as(std::in_place_type<GMutableSpan>, span);
|
||||
if (!type.is_trivially_destructible()) {
|
||||
if (!type.is_trivially_destructible) {
|
||||
scope.add_destruct_call(
|
||||
[&type, buffer, mask = mask_]() { type.destruct_indices(buffer, mask); });
|
||||
}
|
||||
|
||||
@@ -170,8 +170,8 @@ class ValueAllocator : NonCopyable, NonMovable {
|
||||
{
|
||||
void *buffer = nullptr;
|
||||
|
||||
const int64_t element_size = type.size();
|
||||
const int64_t alignment = type.alignment();
|
||||
const int64_t element_size = type.size;
|
||||
const int64_t alignment = type.alignment;
|
||||
|
||||
if (alignment > min_alignment) {
|
||||
/* In this rare case we fallback to not reusing existing buffers. */
|
||||
@@ -215,8 +215,8 @@ class ValueAllocator : NonCopyable, NonMovable {
|
||||
void *buffer;
|
||||
if (stack.is_empty()) {
|
||||
buffer = linear_allocator_.allocate(
|
||||
std::max<int>(small_value_max_size, type.size()),
|
||||
std::max<int>(small_value_max_alignment, type.alignment()));
|
||||
std::max<int>(small_value_max_size, type.size),
|
||||
std::max<int>(small_value_max_alignment, type.alignment));
|
||||
}
|
||||
else {
|
||||
buffer = stack.pop();
|
||||
@@ -244,7 +244,7 @@ class ValueAllocator : NonCopyable, NonMovable {
|
||||
Stack<void *> &buffers = type.can_exist_in_buffer(small_value_max_size,
|
||||
small_value_max_alignment) ?
|
||||
small_span_buffers_free_list_ :
|
||||
span_buffers_free_lists_.lookup_or_add_default(type.size());
|
||||
span_buffers_free_lists_.lookup_or_add_default(type.size);
|
||||
buffers.push(value_typed->data);
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -375,7 +375,7 @@ class LazyFunctionForBakeNode final : public LazyFunction {
|
||||
LinearAllocator<> allocator;
|
||||
for (const int i : bake_items_.index_range()) {
|
||||
const CPPType &type = *outputs_[i].type;
|
||||
next_values[i] = allocator.allocate(type.size(), type.alignment());
|
||||
next_values[i] = allocator.allocate(type.size, type.alignment);
|
||||
}
|
||||
this->copy_bake_state_to_values(
|
||||
next_state, data_block_map, self_object, compute_context, next_values);
|
||||
|
||||
@@ -657,7 +657,7 @@ class LazyFunctionForSimulationOutputNode final : public LazyFunction {
|
||||
LinearAllocator<> allocator;
|
||||
for (const int i : simulation_items_.index_range()) {
|
||||
const CPPType &type = *outputs_[i].type;
|
||||
next_values[i] = allocator.allocate(type.size(), type.alignment());
|
||||
next_values[i] = allocator.allocate(type.size, type.alignment);
|
||||
}
|
||||
copy_simulation_state_to_values(simulation_items_,
|
||||
next_state,
|
||||
|
||||
@@ -83,7 +83,7 @@ void Bundle::add_new(SocketInterfaceKey key, const bke::bNodeSocketType &type, c
|
||||
BLI_assert(!this->contains(key));
|
||||
BLI_assert(type.geometry_nodes_cpp_type);
|
||||
const CPPType &cpp_type = *type.geometry_nodes_cpp_type;
|
||||
void *buffer = MEM_mallocN_aligned(cpp_type.size(), cpp_type.alignment(), __func__);
|
||||
void *buffer = MEM_mallocN_aligned(cpp_type.size, cpp_type.alignment, __func__);
|
||||
cpp_type.copy_construct(value, buffer);
|
||||
items_.append(StoredItem{std::move(key), &type, buffer});
|
||||
buffers_.append(buffer);
|
||||
|
||||
@@ -126,10 +126,10 @@ class LazyFunctionForClosureZone : public LazyFunction {
|
||||
lf_graph.add_link(lf_body_node.output(body_fn_.indices.outputs.input_usages[i]),
|
||||
lf_graph_input_usage);
|
||||
|
||||
void *default_value = closure_allocator.allocate(cpp_type.size(), cpp_type.alignment());
|
||||
void *default_value = closure_allocator.allocate(cpp_type.size, cpp_type.alignment);
|
||||
construct_socket_default_value(*bsocket.typeinfo, default_value);
|
||||
default_input_values.append(default_value);
|
||||
if (!cpp_type.is_trivially_destructible()) {
|
||||
if (!cpp_type.is_trivially_destructible) {
|
||||
closure_scope->add_destruct_call(
|
||||
[&cpp_type, default_value]() { cpp_type.destruct(default_value); });
|
||||
}
|
||||
@@ -160,9 +160,9 @@ class LazyFunctionForClosureZone : public LazyFunction {
|
||||
for (const int i : zone_.border_links.index_range()) {
|
||||
const CPPType &cpp_type = *zone_.border_links[i]->tosock->typeinfo->geometry_nodes_cpp_type;
|
||||
void *input_ptr = params.try_get_input_data_ptr(zone_info_.indices.inputs.border_links[i]);
|
||||
void *stored_ptr = closure_allocator.allocate(cpp_type.size(), cpp_type.alignment());
|
||||
void *stored_ptr = closure_allocator.allocate(cpp_type.size, cpp_type.alignment);
|
||||
cpp_type.move_construct(input_ptr, stored_ptr);
|
||||
if (!cpp_type.is_trivially_destructible()) {
|
||||
if (!cpp_type.is_trivially_destructible) {
|
||||
closure_scope->add_destruct_call(
|
||||
[&cpp_type, stored_ptr]() { cpp_type.destruct(stored_ptr); });
|
||||
}
|
||||
@@ -521,10 +521,10 @@ class LazyFunctionForEvaluateClosureNode : public LazyFunction {
|
||||
|
||||
auto get_output_default_value = [&](const bke::bNodeSocketType &type) {
|
||||
const CPPType &cpp_type = *type.geometry_nodes_cpp_type;
|
||||
void *fallback_value = eval_storage.scope.allocator().allocate(cpp_type.size(),
|
||||
cpp_type.alignment());
|
||||
void *fallback_value = eval_storage.scope.allocator().allocate(cpp_type.size,
|
||||
cpp_type.alignment);
|
||||
construct_socket_default_value(type, fallback_value);
|
||||
if (!cpp_type.is_trivially_destructible()) {
|
||||
if (!cpp_type.is_trivially_destructible) {
|
||||
eval_storage.scope.add_destruct_call(
|
||||
[fallback_value, type = &cpp_type]() { type->destruct(fallback_value); });
|
||||
}
|
||||
@@ -730,7 +730,7 @@ void evaluate_closure_eagerly(const Closure &closure, ClosureEagerEvalParams &pa
|
||||
const bke::bNodeSocketType &from_type = *item.type;
|
||||
const bke::bNodeSocketType &to_type = *signature.inputs[*mapped_i].type;
|
||||
const CPPType &to_cpp_type = *to_type.geometry_nodes_cpp_type;
|
||||
void *value = allocator.allocate(to_cpp_type.size(), to_cpp_type.alignment());
|
||||
void *value = allocator.allocate(to_cpp_type.size, to_cpp_type.alignment);
|
||||
if (&from_type == &to_type) {
|
||||
to_cpp_type.copy_construct(item.value, value);
|
||||
}
|
||||
@@ -762,7 +762,7 @@ void evaluate_closure_eagerly(const Closure &closure, ClosureEagerEvalParams &pa
|
||||
const bke::bNodeSocketType &type = *signature.inputs[main_input_i].type;
|
||||
const CPPType &cpp_type = *type.geometry_nodes_cpp_type;
|
||||
const void *default_value = closure.default_input_value(main_input_i);
|
||||
void *value = allocator.allocate(cpp_type.size(), cpp_type.alignment());
|
||||
void *value = allocator.allocate(cpp_type.size, cpp_type.alignment);
|
||||
cpp_type.copy_construct(default_value, value);
|
||||
lf_input_values[lf_input_i] = {cpp_type, value};
|
||||
}
|
||||
@@ -787,7 +787,7 @@ void evaluate_closure_eagerly(const Closure &closure, ClosureEagerEvalParams &pa
|
||||
const bke::bNodeSocketType &type = *signature.outputs[main_output_i].type;
|
||||
const CPPType &cpp_type = *type.geometry_nodes_cpp_type;
|
||||
lf_output_values[indices.outputs.main[main_output_i]] = {
|
||||
cpp_type, allocator.allocate(cpp_type.size(), cpp_type.alignment())};
|
||||
cpp_type, allocator.allocate(cpp_type.size, cpp_type.alignment)};
|
||||
}
|
||||
|
||||
lf::BasicParams lf_params{
|
||||
|
||||
@@ -730,10 +730,9 @@ static Vector<OutputAttributeToStore> compute_attributes_to_store(
|
||||
component_type,
|
||||
domain,
|
||||
output_info.name,
|
||||
GMutableSpan{
|
||||
type,
|
||||
MEM_mallocN_aligned(type.size() * domain_size, type.alignment(), __func__),
|
||||
domain_size}};
|
||||
GMutableSpan{type,
|
||||
MEM_mallocN_aligned(type.size * domain_size, type.alignment, __func__),
|
||||
domain_size}};
|
||||
fn::GField field = validator.validate_field_if_necessary(output_info.field);
|
||||
field_evaluator.add_with_destination(std::move(field), store.data);
|
||||
attributes_to_store.append(store);
|
||||
@@ -869,7 +868,7 @@ bke::GeometrySet execute_geometry_nodes_on_geometry(const bNodeTree &btree,
|
||||
|
||||
const CPPType *type = typeinfo->geometry_nodes_cpp_type;
|
||||
BLI_assert(type != nullptr);
|
||||
void *value = allocator.allocate(type->size(), type->alignment());
|
||||
void *value = allocator.allocate(type->size, type->alignment);
|
||||
initialize_group_input(btree, properties_set, i, value);
|
||||
param_inputs[function.inputs.main[i]] = {type, value};
|
||||
inputs_to_destruct.append({type, value});
|
||||
@@ -892,7 +891,7 @@ bke::GeometrySet execute_geometry_nodes_on_geometry(const bNodeTree &btree,
|
||||
for (const int i : IndexRange(num_outputs)) {
|
||||
const lf::Output &lf_output = lazy_function.outputs()[i];
|
||||
const CPPType &type = *lf_output.type;
|
||||
void *buffer = allocator.allocate(type.size(), type.alignment());
|
||||
void *buffer = allocator.allocate(type.size, type.alignment);
|
||||
param_outputs[i] = {type, buffer};
|
||||
}
|
||||
|
||||
@@ -1081,10 +1080,10 @@ void get_geometry_nodes_input_base_values(const bNodeTree &btree,
|
||||
continue;
|
||||
}
|
||||
|
||||
void *value_buffer = scope.allocator().allocate(stype->geometry_nodes_cpp_type->size(),
|
||||
stype->geometry_nodes_cpp_type->alignment());
|
||||
void *value_buffer = scope.allocator().allocate(stype->geometry_nodes_cpp_type->size,
|
||||
stype->geometry_nodes_cpp_type->alignment);
|
||||
init_socket_cpp_value_from_property(*property, socket_type, value_buffer);
|
||||
if (!stype->geometry_nodes_cpp_type->is_trivially_destructible()) {
|
||||
if (!stype->geometry_nodes_cpp_type->is_trivially_destructible) {
|
||||
scope.add_destruct_call([type = stype->geometry_nodes_cpp_type, value_buffer]() {
|
||||
type->destruct(value_buffer);
|
||||
});
|
||||
|
||||
@@ -70,7 +70,7 @@ static const CPPType *get_socket_cpp_type(const bke::bNodeSocketType &typeinfo)
|
||||
if (type == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
BLI_assert(type->has_special_member_functions());
|
||||
BLI_assert(type->has_special_member_functions);
|
||||
return type;
|
||||
}
|
||||
|
||||
@@ -1232,7 +1232,7 @@ static GMutablePointer get_socket_default_value(LinearAllocator<> &allocator,
|
||||
if (type == nullptr) {
|
||||
return {};
|
||||
}
|
||||
void *buffer = allocator.allocate(type->size(), type->alignment());
|
||||
void *buffer = allocator.allocate(type->size, type->alignment);
|
||||
typeinfo.get_geometry_nodes_cpp_value(bsocket.default_value, buffer);
|
||||
return {type, buffer};
|
||||
}
|
||||
@@ -3840,7 +3840,7 @@ struct GeometryNodesLazyFunctionBuilder {
|
||||
return;
|
||||
}
|
||||
input_lf_socket.set_default_value(value.get());
|
||||
if (!value.type()->is_trivially_destructible()) {
|
||||
if (!value.type()->is_trivially_destructible) {
|
||||
scope_.add_destruct_call([value]() mutable { value.destruct(); });
|
||||
}
|
||||
}
|
||||
|
||||
@@ -267,7 +267,7 @@ void GeoTreeLogger::log_value(const bNode &node, const bNodeSocket &socket, cons
|
||||
};
|
||||
|
||||
auto log_generic_value = [&](const CPPType &type, const void *value) {
|
||||
void *buffer = this->allocator->allocate(type.size(), type.alignment());
|
||||
void *buffer = this->allocator->allocate(type.size, type.alignment);
|
||||
type.copy_construct(value, buffer);
|
||||
store_logged_value(this->allocator->construct<GenericValueLog>(GMutablePointer{type, buffer}));
|
||||
};
|
||||
|
||||
@@ -746,10 +746,10 @@ struct SocketUsageInferencer {
|
||||
}
|
||||
/* Allocate memory for the output value. */
|
||||
const CPPType &base_type = *output_socket->typeinfo->base_cpp_type;
|
||||
void *value = scope_.allocator().allocate(base_type.size(), base_type.alignment());
|
||||
void *value = scope_.allocator().allocate(base_type.size, base_type.alignment);
|
||||
params.add_uninitialized_single_output(GMutableSpan(base_type, value, 1));
|
||||
all_socket_values_.add_new(output_socket, value);
|
||||
if (!base_type.is_trivially_destructible()) {
|
||||
if (!base_type.is_trivially_destructible) {
|
||||
scope_.add_destruct_call(
|
||||
[type = &base_type, value]() { type->destruct(const_cast<void *>(value)); });
|
||||
}
|
||||
@@ -828,10 +828,10 @@ struct SocketUsageInferencer {
|
||||
}
|
||||
|
||||
const CPPType &base_type = *socket->typeinfo->base_cpp_type;
|
||||
void *value_buffer = scope_.allocator().allocate(base_type.size(), base_type.alignment());
|
||||
void *value_buffer = scope_.allocator().allocate(base_type.size, base_type.alignment);
|
||||
socket->typeinfo->get_base_cpp_value(socket->default_value, value_buffer);
|
||||
all_socket_values_.add_new(socket, value_buffer);
|
||||
if (!base_type.is_trivially_destructible()) {
|
||||
if (!base_type.is_trivially_destructible) {
|
||||
scope_.add_destruct_call(
|
||||
[type = &base_type, value_buffer]() { type->destruct(value_buffer); });
|
||||
}
|
||||
@@ -869,9 +869,9 @@ struct SocketUsageInferencer {
|
||||
if (!conversions.is_convertible(*from_type, *to_type)) {
|
||||
return nullptr;
|
||||
}
|
||||
void *dst = scope_.allocator().allocate(to_type->size(), to_type->alignment());
|
||||
void *dst = scope_.allocator().allocate(to_type->size, to_type->alignment);
|
||||
conversions.convert_to_uninitialized(*from_type, *to_type, src, dst);
|
||||
if (!to_type->is_trivially_destructible()) {
|
||||
if (!to_type->is_trivially_destructible) {
|
||||
scope_.add_destruct_call([to_type, dst]() { to_type->destruct(dst); });
|
||||
}
|
||||
return dst;
|
||||
@@ -1079,7 +1079,7 @@ void infer_group_interface_inputs_usage(const bNodeTree &group,
|
||||
if (base_type == nullptr) {
|
||||
continue;
|
||||
}
|
||||
void *value = allocator.allocate(base_type->size(), base_type->alignment());
|
||||
void *value = allocator.allocate(base_type->size, base_type->alignment);
|
||||
stype.get_base_cpp_value(socket.default_value, value);
|
||||
input_values[i] = GPointer(base_type, value);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user