This patch implements basic support for evaluating function nodes on volume grids. Conceptually, a function node always creates a new grid for the output, though the output is often a modified version of the input. The topology of the output grid is a union of all the input grids. All input grids have to have the same transform. Otherwise one has to use resampling to make grids compatible. Non-grid inputs are allowed to be single values or fields. The fields are evaluated in a voxel/tile context, so they compute a value per voxel or per tile. One optimization is missing that will probably be key in the future: the ability to merge multiple function nodes and execute them at the same time. Currently the entire function evaluation is started and finished for every function node that outputs a grid. This will add significant overhead in some situations. Implementing this optimization requires some more changes outside of the scope of this patch though. It's good to have something that works first. Note: Not all function nodes are supported yet, because we don't have grid types for all of them yet. Most notably, there are no color/float4 grids yet. Implementing those properly is not super straight forward and may require some more changes, because there isn't a 1-to-1 mapping between grid types and socket types (a float4 grid may correspond to a color or vector socket later on). Using grids with function nodes and fields can result in false positive warnings in the UI currently. That's a limitation of our current socket type inferencing and can be improved once we have better socket shape inferencing. Pull Request: https://projects.blender.org/blender/blender/pulls/125110
282 lines
6.5 KiB
C++
282 lines
6.5 KiB
C++
/* SPDX-FileCopyrightText: 2023 Blender Authors
|
|
*
|
|
* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
|
|
#pragma once
|
|
|
|
/** \file
|
|
* \ingroup bli
|
|
*/
|
|
|
|
#include "BLI_cpp_type.hh"
|
|
#include "BLI_span.hh"
|
|
|
|
namespace blender {
|
|
|
|
/**
|
|
* A generic span. It behaves just like a blender::Span<T>, but the type is only known at run-time.
|
|
*/
|
|
class GSpan {
|
|
protected:
|
|
const CPPType *type_ = nullptr;
|
|
const void *data_ = nullptr;
|
|
int64_t size_ = 0;
|
|
|
|
public:
|
|
GSpan() = default;
|
|
|
|
GSpan(const CPPType *type, const void *buffer, int64_t size)
|
|
: type_(type), data_(buffer), size_(size)
|
|
{
|
|
BLI_assert(size >= 0);
|
|
BLI_assert(buffer != nullptr || size == 0);
|
|
BLI_assert(size == 0 || type != nullptr);
|
|
BLI_assert(type == nullptr || type->pointer_has_valid_alignment(buffer));
|
|
}
|
|
|
|
GSpan(const CPPType &type, const void *buffer, int64_t size) : GSpan(&type, buffer, size) {}
|
|
|
|
GSpan(const CPPType &type) : type_(&type) {}
|
|
|
|
GSpan(const CPPType *type) : type_(type) {}
|
|
|
|
template<typename T>
|
|
GSpan(Span<T> array)
|
|
: GSpan(CPPType::get<T>(), static_cast<const void *>(array.data()), array.size())
|
|
{
|
|
}
|
|
|
|
template<typename T>
|
|
GSpan(MutableSpan<T> array)
|
|
: GSpan(CPPType::get<T>(), static_cast<const void *>(array.data()), array.size())
|
|
{
|
|
}
|
|
|
|
const CPPType &type() const
|
|
{
|
|
BLI_assert(type_ != nullptr);
|
|
return *type_;
|
|
}
|
|
|
|
const CPPType *type_ptr() const
|
|
{
|
|
return type_;
|
|
}
|
|
|
|
bool is_empty() const
|
|
{
|
|
return size_ == 0;
|
|
}
|
|
|
|
int64_t size() const
|
|
{
|
|
return size_;
|
|
}
|
|
|
|
int64_t size_in_bytes() const
|
|
{
|
|
return type_->size * size_;
|
|
}
|
|
|
|
const void *data() const
|
|
{
|
|
return data_;
|
|
}
|
|
|
|
const void *operator[](int64_t index) const
|
|
{
|
|
BLI_assert(index < size_);
|
|
return POINTER_OFFSET(data_, type_->size * index);
|
|
}
|
|
|
|
template<typename T> Span<T> typed() const
|
|
{
|
|
BLI_assert(size_ == 0 || type_ != nullptr);
|
|
BLI_assert(type_ == nullptr || type_->is<T>());
|
|
return Span<T>(static_cast<const T *>(data_), size_);
|
|
}
|
|
|
|
GSpan slice(const int64_t start, int64_t size) const
|
|
{
|
|
BLI_assert(start >= 0);
|
|
BLI_assert(size >= 0);
|
|
BLI_assert(start + size <= size_ || size == 0);
|
|
return GSpan(type_, POINTER_OFFSET(data_, type_->size * start), size);
|
|
}
|
|
|
|
GSpan slice(const IndexRange range) const
|
|
{
|
|
return this->slice(range.start(), range.size());
|
|
}
|
|
|
|
GSpan drop_front(const int64_t n) const
|
|
{
|
|
BLI_assert(n >= 0);
|
|
const int64_t new_size = std::max<int64_t>(0, size_ - n);
|
|
return GSpan(*type_, POINTER_OFFSET(data_, type_->size * n), new_size);
|
|
}
|
|
|
|
GSpan drop_back(const int64_t n) const
|
|
{
|
|
BLI_assert(n >= 0);
|
|
const int64_t new_size = std::max<int64_t>(0, size_ - n);
|
|
return GSpan(*type_, data_, new_size);
|
|
}
|
|
|
|
GSpan take_front(const int64_t n) const
|
|
{
|
|
BLI_assert(n >= 0);
|
|
const int64_t new_size = std::min<int64_t>(size_, n);
|
|
return GSpan(*type_, data_, new_size);
|
|
}
|
|
|
|
GSpan take_back(const int64_t n) const
|
|
{
|
|
BLI_assert(n >= 0);
|
|
const int64_t new_size = std::min<int64_t>(size_, n);
|
|
return GSpan(*type_, POINTER_OFFSET(data_, type_->size * (size_ - new_size)), new_size);
|
|
}
|
|
};
|
|
|
|
/**
|
|
* A generic mutable span. It behaves just like a blender::MutableSpan<T>, but the type is only
|
|
* known at run-time.
|
|
*/
|
|
class GMutableSpan {
|
|
protected:
|
|
const CPPType *type_ = nullptr;
|
|
void *data_ = nullptr;
|
|
int64_t size_ = 0;
|
|
|
|
public:
|
|
GMutableSpan() = default;
|
|
|
|
GMutableSpan(const CPPType *type, void *buffer, int64_t size)
|
|
: type_(type), data_(buffer), size_(size)
|
|
{
|
|
BLI_assert(size >= 0);
|
|
BLI_assert(buffer != nullptr || size == 0);
|
|
BLI_assert(size == 0 || type != nullptr);
|
|
BLI_assert(type == nullptr || type->pointer_has_valid_alignment(buffer));
|
|
}
|
|
|
|
GMutableSpan(const CPPType &type, void *buffer, int64_t size) : GMutableSpan(&type, buffer, size)
|
|
{
|
|
}
|
|
|
|
GMutableSpan(const CPPType &type) : type_(&type) {}
|
|
|
|
GMutableSpan(const CPPType *type) : type_(type) {}
|
|
|
|
template<typename T>
|
|
GMutableSpan(MutableSpan<T> array)
|
|
: GMutableSpan(CPPType::get<T>(), static_cast<void *>(array.begin()), array.size())
|
|
{
|
|
}
|
|
|
|
operator GSpan() const
|
|
{
|
|
return GSpan(type_, data_, size_);
|
|
}
|
|
|
|
const CPPType &type() const
|
|
{
|
|
BLI_assert(type_ != nullptr);
|
|
return *type_;
|
|
}
|
|
|
|
const CPPType *type_ptr() const
|
|
{
|
|
return type_;
|
|
}
|
|
|
|
bool is_empty() const
|
|
{
|
|
return size_ == 0;
|
|
}
|
|
|
|
int64_t size() const
|
|
{
|
|
return size_;
|
|
}
|
|
|
|
int64_t size_in_bytes() const
|
|
{
|
|
return type_->size * size_;
|
|
}
|
|
|
|
void *data() const
|
|
{
|
|
return data_;
|
|
}
|
|
|
|
void *operator[](int64_t index) const
|
|
{
|
|
BLI_assert(index >= 0);
|
|
BLI_assert(index < size_);
|
|
return POINTER_OFFSET(data_, type_->size * index);
|
|
}
|
|
|
|
template<typename T> MutableSpan<T> typed() const
|
|
{
|
|
BLI_assert(size_ == 0 || type_ != nullptr);
|
|
BLI_assert(type_ == nullptr || type_->is<T>());
|
|
return MutableSpan<T>(static_cast<T *>(data_), size_);
|
|
}
|
|
|
|
GMutableSpan slice(const int64_t start, int64_t size) const
|
|
{
|
|
BLI_assert(start >= 0);
|
|
BLI_assert(size >= 0);
|
|
BLI_assert(start + size <= size_ || size == 0);
|
|
return GMutableSpan(type_, POINTER_OFFSET(data_, type_->size * start), size);
|
|
}
|
|
|
|
GMutableSpan slice(IndexRange range) const
|
|
{
|
|
return this->slice(range.start(), range.size());
|
|
}
|
|
|
|
GMutableSpan drop_front(const int64_t n) const
|
|
{
|
|
BLI_assert(n >= 0);
|
|
const int64_t new_size = std::max<int64_t>(0, size_ - n);
|
|
return GMutableSpan(*type_, POINTER_OFFSET(data_, type_->size * n), new_size);
|
|
}
|
|
|
|
GMutableSpan drop_back(const int64_t n) const
|
|
{
|
|
BLI_assert(n >= 0);
|
|
const int64_t new_size = std::max<int64_t>(0, size_ - n);
|
|
return GMutableSpan(*type_, data_, new_size);
|
|
}
|
|
|
|
GMutableSpan take_front(const int64_t n) const
|
|
{
|
|
BLI_assert(n >= 0);
|
|
const int64_t new_size = std::min<int64_t>(size_, n);
|
|
return GMutableSpan(*type_, data_, new_size);
|
|
}
|
|
|
|
GMutableSpan take_back(const int64_t n) const
|
|
{
|
|
BLI_assert(n >= 0);
|
|
const int64_t new_size = std::min<int64_t>(size_, n);
|
|
return GMutableSpan(*type_, POINTER_OFFSET(data_, type_->size * (size_ - new_size)), new_size);
|
|
}
|
|
|
|
/**
|
|
* Copy all values from another span into this span. This invokes undefined behavior when the
|
|
* destination contains uninitialized data and T is not trivially copy constructible.
|
|
* The size of both spans is expected to be the same.
|
|
*/
|
|
void copy_from(GSpan values)
|
|
{
|
|
BLI_assert(type_ == &values.type());
|
|
BLI_assert(size_ == values.size());
|
|
type_->copy_assign_n(values.data(), data_, size_);
|
|
}
|
|
};
|
|
|
|
} // namespace blender
|