Cycles: Remove Alembic procedural
This was added for a fairly specialezed use case and is no longer being used as far as we know. A future replacement would be to add a USD/Hydra procedural, for which most of the groundwork already exists. Pull Request: https://projects.blender.org/blender/blender/pulls/146021
This commit is contained in:
committed by
Brecht Van Lommel
parent
6148d50105
commit
6d25aad41f
@@ -325,14 +325,6 @@ if(WITH_OPENIMAGEDENOISE)
|
||||
)
|
||||
endif()
|
||||
|
||||
if(WITH_ALEMBIC)
|
||||
add_definitions(-DWITH_ALEMBIC)
|
||||
include_directories(
|
||||
SYSTEM
|
||||
${ALEMBIC_INCLUDE_DIRS}
|
||||
)
|
||||
endif()
|
||||
|
||||
# Includes that might be overrides by USD last, to avoid compiling
|
||||
# against the wrong versions of other libraries.
|
||||
include_directories(
|
||||
|
||||
@@ -23,16 +23,6 @@ set(LIB
|
||||
cycles_util
|
||||
)
|
||||
|
||||
if(WITH_ALEMBIC)
|
||||
add_definitions(-DWITH_ALEMBIC)
|
||||
list(APPEND INC_SYS
|
||||
${ALEMBIC_INCLUDE_DIRS}
|
||||
)
|
||||
list(APPEND LIB
|
||||
${ALEMBIC_LIBRARIES}
|
||||
)
|
||||
endif()
|
||||
|
||||
if(WITH_CYCLES_OSL)
|
||||
list(APPEND LIB cycles_kernel_osl)
|
||||
endif()
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
|
||||
#include "graph/node_xml.h"
|
||||
|
||||
#include "scene/alembic.h"
|
||||
#include "scene/background.h"
|
||||
#include "scene/camera.h"
|
||||
#include "scene/film.h"
|
||||
@@ -196,30 +195,6 @@ static void xml_read_camera(XMLReadState &state, const xml_node node)
|
||||
cam->update(state.scene);
|
||||
}
|
||||
|
||||
/* Alembic */
|
||||
|
||||
#ifdef WITH_ALEMBIC
|
||||
static void xml_read_alembic(XMLReadState &state, const xml_node graph_node)
|
||||
{
|
||||
AlembicProcedural *proc = state.scene->create_node<AlembicProcedural>();
|
||||
xml_read_node(state, proc, graph_node);
|
||||
|
||||
for (xml_node node = graph_node.first_child(); node; node = node.next_sibling()) {
|
||||
if (string_iequals(node.name(), "object")) {
|
||||
string path;
|
||||
if (xml_read_string(&path, node, "path")) {
|
||||
const ustring object_path(path, 0);
|
||||
AlembicObject *object = proc->get_or_create_object(object_path);
|
||||
|
||||
array<Node *> used_shaders = object->get_used_shaders();
|
||||
used_shaders.push_back_slow(state.shader);
|
||||
object->set_used_shaders(used_shaders);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Shader */
|
||||
|
||||
static void xml_read_shader_graph(XMLReadState &state, Shader *shader, const xml_node graph_node)
|
||||
@@ -801,11 +776,6 @@ static void xml_read_scene(XMLReadState &state, const xml_node scene_node)
|
||||
xml_read_object(substate, node);
|
||||
xml_read_scene(substate, node);
|
||||
}
|
||||
#ifdef WITH_ALEMBIC
|
||||
else if (string_iequals(node.name(), "alembic")) {
|
||||
xml_read_alembic(state, node);
|
||||
}
|
||||
#endif
|
||||
else {
|
||||
LOG_ERROR << "Unknown node \"" << node.name() << "\"";
|
||||
}
|
||||
|
||||
@@ -112,16 +112,6 @@ if(WITH_OPENVDB)
|
||||
)
|
||||
endif()
|
||||
|
||||
if(WITH_ALEMBIC)
|
||||
add_definitions(-DWITH_ALEMBIC)
|
||||
list(APPEND INC_SYS
|
||||
${ALEMBIC_INCLUDE_DIRS}
|
||||
)
|
||||
list(APPEND LIB
|
||||
${ALEMBIC_LIBRARIES}
|
||||
)
|
||||
endif()
|
||||
|
||||
if(WITH_OPENIMAGEDENOISE)
|
||||
add_definitions(-DWITH_OPENIMAGEDENOISE)
|
||||
list(APPEND INC_SYS
|
||||
|
||||
@@ -47,7 +47,6 @@ class CyclesRender(bpy.types.RenderEngine):
|
||||
bl_use_exclude_layers = True
|
||||
bl_use_spherical_stereo = True
|
||||
bl_use_custom_freestyle = True
|
||||
bl_use_alembic_procedural = True
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
#include "blender/sync.h"
|
||||
#include "blender/util.h"
|
||||
|
||||
#include "scene/alembic.h"
|
||||
#include "scene/camera.h"
|
||||
#include "scene/integrator.h"
|
||||
#include "scene/light.h"
|
||||
@@ -439,83 +438,6 @@ bool BlenderSync::sync_object_attributes(BL::DepsgraphObjectInstance &b_instance
|
||||
|
||||
/* Object Loop */
|
||||
|
||||
void BlenderSync::sync_procedural(BL::Object &b_ob,
|
||||
BL::MeshSequenceCacheModifier &b_mesh_cache,
|
||||
bool has_subdivision_modifier)
|
||||
{
|
||||
#ifdef WITH_ALEMBIC
|
||||
BL::CacheFile cache_file = b_mesh_cache.cache_file();
|
||||
void *cache_file_key = cache_file.ptr.data;
|
||||
|
||||
AlembicProcedural *procedural = static_cast<AlembicProcedural *>(
|
||||
procedural_map.find(cache_file_key));
|
||||
|
||||
if (procedural == nullptr) {
|
||||
procedural = scene->create_node<AlembicProcedural>();
|
||||
procedural_map.add(cache_file_key, procedural);
|
||||
}
|
||||
else {
|
||||
procedural_map.used(procedural);
|
||||
}
|
||||
|
||||
float current_frame = static_cast<float>(b_scene.frame_current());
|
||||
if (cache_file.override_frame()) {
|
||||
current_frame = cache_file.frame();
|
||||
}
|
||||
|
||||
if (!cache_file.override_frame()) {
|
||||
procedural->set_start_frame(static_cast<float>(b_scene.frame_start()));
|
||||
procedural->set_end_frame(static_cast<float>(b_scene.frame_end()));
|
||||
}
|
||||
|
||||
procedural->set_frame(current_frame);
|
||||
procedural->set_frame_rate(b_scene.render().fps() / b_scene.render().fps_base());
|
||||
procedural->set_frame_offset(cache_file.frame_offset());
|
||||
|
||||
string absolute_path = blender_absolute_path(b_data, b_ob, b_mesh_cache.cache_file().filepath());
|
||||
procedural->set_filepath(ustring(absolute_path));
|
||||
|
||||
array<ustring> layers;
|
||||
for (BL::CacheFileLayer &layer : cache_file.layers) {
|
||||
if (layer.hide_layer()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
absolute_path = blender_absolute_path(b_data, b_ob, layer.filepath());
|
||||
layers.push_back_slow(ustring(absolute_path));
|
||||
}
|
||||
procedural->set_layers(layers);
|
||||
|
||||
procedural->set_scale(cache_file.scale());
|
||||
|
||||
procedural->set_use_prefetch(cache_file.use_prefetch());
|
||||
procedural->set_prefetch_cache_size(cache_file.prefetch_cache_size());
|
||||
|
||||
/* create or update existing AlembicObjects */
|
||||
const ustring object_path = ustring(b_mesh_cache.object_path());
|
||||
|
||||
AlembicObject *abc_object = procedural->get_or_create_object(object_path);
|
||||
|
||||
array<Node *> used_shaders = find_used_shaders(b_ob);
|
||||
abc_object->set_used_shaders(used_shaders);
|
||||
|
||||
PointerRNA cobj = RNA_pointer_get(&b_ob.ptr, "cycles");
|
||||
const float subd_dicing_rate = max(0.1f, RNA_float_get(&cobj, "dicing_rate") * dicing_rate);
|
||||
abc_object->set_subd_dicing_rate(subd_dicing_rate);
|
||||
abc_object->set_subd_max_level(max_subdivisions);
|
||||
|
||||
abc_object->set_ignore_subdivision(!has_subdivision_modifier);
|
||||
|
||||
if (abc_object->is_modified() || procedural->is_modified()) {
|
||||
procedural->tag_update(scene);
|
||||
}
|
||||
#else
|
||||
(void)b_ob;
|
||||
(void)b_mesh_cache;
|
||||
(void)has_subdivision_modifier;
|
||||
#endif
|
||||
}
|
||||
|
||||
void BlenderSync::sync_objects(BL::Depsgraph &b_depsgraph,
|
||||
BL::SpaceView3D &b_v3d,
|
||||
const float motion_time)
|
||||
@@ -577,35 +499,13 @@ void BlenderSync::sync_objects(BL::Depsgraph &b_depsgraph,
|
||||
|
||||
/* Object itself. */
|
||||
if (b_instance.show_self()) {
|
||||
#ifdef WITH_ALEMBIC
|
||||
bool use_procedural = false;
|
||||
bool has_subdivision_modifier = false;
|
||||
BL::MeshSequenceCacheModifier b_mesh_cache(PointerRNA_NULL);
|
||||
|
||||
/* Experimental as Blender does not have good support for procedurals at the moment. */
|
||||
if (use_experimental_procedural) {
|
||||
b_mesh_cache = object_mesh_cache_find(b_ob, &has_subdivision_modifier);
|
||||
use_procedural = b_mesh_cache && b_mesh_cache.cache_file().use_render_procedural();
|
||||
}
|
||||
|
||||
if (use_procedural) {
|
||||
/* Skip in the motion case, as generating motion blur data will be handled in the
|
||||
* procedural. */
|
||||
if (!motion) {
|
||||
sync_procedural(b_ob, b_mesh_cache, has_subdivision_modifier);
|
||||
}
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
sync_object(b_view_layer,
|
||||
b_instance,
|
||||
motion_time,
|
||||
false,
|
||||
show_lights,
|
||||
culling,
|
||||
sync_hair ? nullptr : &geom_task_pool);
|
||||
}
|
||||
sync_object(b_view_layer,
|
||||
b_instance,
|
||||
motion_time,
|
||||
false,
|
||||
show_lights,
|
||||
culling,
|
||||
sync_hair ? nullptr : &geom_task_pool);
|
||||
}
|
||||
|
||||
/* Particle hair as separate object. */
|
||||
|
||||
@@ -118,9 +118,6 @@ macro(cycles_external_libraries_append libraries)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
if(WITH_ALEMBIC)
|
||||
list(APPEND ${libraries} ${ALEMBIC_LIBRARIES})
|
||||
endif()
|
||||
if(WITH_PATH_GUIDING)
|
||||
list(APPEND ${libraries} ${OPENPGL_LIBRARIES})
|
||||
endif()
|
||||
|
||||
@@ -9,8 +9,6 @@ set(INC
|
||||
)
|
||||
|
||||
set(SRC
|
||||
alembic.cpp
|
||||
alembic_read.cpp
|
||||
attribute.cpp
|
||||
background.cpp
|
||||
bake.cpp
|
||||
@@ -54,8 +52,6 @@ set(SRC
|
||||
)
|
||||
|
||||
set(SRC_HEADERS
|
||||
alembic.h
|
||||
alembic_read.h
|
||||
attribute.h
|
||||
bake.h
|
||||
background.h
|
||||
@@ -133,16 +129,6 @@ if(WITH_OPENVDB)
|
||||
)
|
||||
endif()
|
||||
|
||||
if(WITH_ALEMBIC)
|
||||
add_definitions(-DWITH_ALEMBIC)
|
||||
list(APPEND INC_SYS
|
||||
${ALEMBIC_INCLUDE_DIRS}
|
||||
)
|
||||
list(APPEND LIB
|
||||
${ALEMBIC_LIBRARIES}
|
||||
)
|
||||
endif()
|
||||
|
||||
if(WITH_NANOVDB)
|
||||
list(APPEND INC_SYS
|
||||
${NANOVDB_INCLUDE_DIRS}
|
||||
|
||||
@@ -1,1566 +0,0 @@
|
||||
/* SPDX-FileCopyrightText: 2011-2022 Blender Foundation
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0 */
|
||||
|
||||
#include "scene/alembic.h"
|
||||
|
||||
#include "scene/alembic_read.h"
|
||||
#include "scene/camera.h"
|
||||
#include "scene/curves.h"
|
||||
#include "scene/hair.h"
|
||||
#include "scene/mesh.h"
|
||||
#include "scene/object.h"
|
||||
#include "scene/pointcloud.h"
|
||||
#include "scene/scene.h"
|
||||
#include "scene/shader.h"
|
||||
|
||||
#include "util/log.h"
|
||||
#include "util/progress.h"
|
||||
#include "util/set.h"
|
||||
#include "util/transform.h"
|
||||
#include "util/vector.h"
|
||||
|
||||
#ifdef WITH_ALEMBIC
|
||||
|
||||
using namespace Alembic::AbcGeom;
|
||||
|
||||
CCL_NAMESPACE_BEGIN
|
||||
|
||||
/* TODO(kevindietrich): motion blur support. */
|
||||
|
||||
template<typename SchemaType>
|
||||
static vector<FaceSetShaderIndexPair> parse_face_sets_for_shader_assignment(
|
||||
SchemaType &schema, const array<Node *> &used_shaders)
|
||||
{
|
||||
vector<FaceSetShaderIndexPair> result;
|
||||
|
||||
std::vector<std::string> face_set_names;
|
||||
schema.getFaceSetNames(face_set_names);
|
||||
|
||||
if (face_set_names.empty()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
for (const std::string &face_set_name : face_set_names) {
|
||||
int shader_index = 0;
|
||||
|
||||
for (Node *node : used_shaders) {
|
||||
if (node->name == face_set_name) {
|
||||
break;
|
||||
}
|
||||
|
||||
++shader_index;
|
||||
}
|
||||
|
||||
if (shader_index >= used_shaders.size()) {
|
||||
/* use the first shader instead if none was found */
|
||||
shader_index = 0;
|
||||
}
|
||||
|
||||
const Alembic::AbcGeom::IFaceSet face_set = schema.getFaceSet(face_set_name);
|
||||
|
||||
if (!face_set.valid()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
result.push_back({face_set, shader_index});
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void CachedData::clear()
|
||||
{
|
||||
attributes.clear();
|
||||
curve_first_key.clear();
|
||||
curve_keys.clear();
|
||||
curve_radius.clear();
|
||||
curve_shader.clear();
|
||||
shader.clear();
|
||||
subd_creases_edge.clear();
|
||||
subd_creases_weight.clear();
|
||||
subd_face_corners.clear();
|
||||
subd_num_corners.clear();
|
||||
subd_ptex_offset.clear();
|
||||
subd_smooth.clear();
|
||||
subd_start_corner.clear();
|
||||
transforms.clear();
|
||||
triangles.clear();
|
||||
uv_loops.clear();
|
||||
vertices.clear();
|
||||
points.clear();
|
||||
radiuses.clear();
|
||||
points_shader.clear();
|
||||
|
||||
for (CachedAttribute &attr : attributes) {
|
||||
attr.data.clear();
|
||||
}
|
||||
|
||||
attributes.clear();
|
||||
}
|
||||
|
||||
CachedData::CachedAttribute &CachedData::add_attribute(const ustring &name,
|
||||
const TimeSampling &time_sampling)
|
||||
{
|
||||
for (auto &attr : attributes) {
|
||||
if (attr.name == name) {
|
||||
return attr;
|
||||
}
|
||||
}
|
||||
|
||||
CachedAttribute &attr = attributes.emplace_back();
|
||||
attr.name = name;
|
||||
attr.data.set_time_sampling(time_sampling);
|
||||
return attr;
|
||||
}
|
||||
|
||||
bool CachedData::is_constant() const
|
||||
{
|
||||
# define CHECK_IF_CONSTANT(data) \
|
||||
if (!data.is_constant()) { \
|
||||
return false; \
|
||||
}
|
||||
|
||||
CHECK_IF_CONSTANT(curve_first_key)
|
||||
CHECK_IF_CONSTANT(curve_keys)
|
||||
CHECK_IF_CONSTANT(curve_radius)
|
||||
CHECK_IF_CONSTANT(curve_shader)
|
||||
CHECK_IF_CONSTANT(shader)
|
||||
CHECK_IF_CONSTANT(subd_creases_edge)
|
||||
CHECK_IF_CONSTANT(subd_creases_weight)
|
||||
CHECK_IF_CONSTANT(subd_face_corners)
|
||||
CHECK_IF_CONSTANT(subd_num_corners)
|
||||
CHECK_IF_CONSTANT(subd_ptex_offset)
|
||||
CHECK_IF_CONSTANT(subd_smooth)
|
||||
CHECK_IF_CONSTANT(subd_start_corner)
|
||||
CHECK_IF_CONSTANT(transforms)
|
||||
CHECK_IF_CONSTANT(triangles)
|
||||
CHECK_IF_CONSTANT(uv_loops)
|
||||
CHECK_IF_CONSTANT(vertices)
|
||||
CHECK_IF_CONSTANT(points)
|
||||
CHECK_IF_CONSTANT(radiuses)
|
||||
CHECK_IF_CONSTANT(points_shader)
|
||||
|
||||
for (const CachedAttribute &attr : attributes) {
|
||||
if (!attr.data.is_constant()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
# undef CHECK_IF_CONSTANT
|
||||
}
|
||||
|
||||
void CachedData::invalidate_last_loaded_time(bool attributes_only)
|
||||
{
|
||||
if (attributes_only) {
|
||||
for (CachedAttribute &attr : attributes) {
|
||||
attr.data.invalidate_last_loaded_time();
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
curve_first_key.invalidate_last_loaded_time();
|
||||
curve_keys.invalidate_last_loaded_time();
|
||||
curve_radius.invalidate_last_loaded_time();
|
||||
curve_shader.invalidate_last_loaded_time();
|
||||
shader.invalidate_last_loaded_time();
|
||||
subd_creases_edge.invalidate_last_loaded_time();
|
||||
subd_creases_weight.invalidate_last_loaded_time();
|
||||
subd_face_corners.invalidate_last_loaded_time();
|
||||
subd_num_corners.invalidate_last_loaded_time();
|
||||
subd_ptex_offset.invalidate_last_loaded_time();
|
||||
subd_smooth.invalidate_last_loaded_time();
|
||||
subd_start_corner.invalidate_last_loaded_time();
|
||||
transforms.invalidate_last_loaded_time();
|
||||
triangles.invalidate_last_loaded_time();
|
||||
uv_loops.invalidate_last_loaded_time();
|
||||
vertices.invalidate_last_loaded_time();
|
||||
points.invalidate_last_loaded_time();
|
||||
radiuses.invalidate_last_loaded_time();
|
||||
points_shader.invalidate_last_loaded_time();
|
||||
}
|
||||
|
||||
void CachedData::set_time_sampling(TimeSampling time_sampling)
|
||||
{
|
||||
curve_first_key.set_time_sampling(time_sampling);
|
||||
curve_keys.set_time_sampling(time_sampling);
|
||||
curve_radius.set_time_sampling(time_sampling);
|
||||
curve_shader.set_time_sampling(time_sampling);
|
||||
shader.set_time_sampling(time_sampling);
|
||||
subd_creases_edge.set_time_sampling(time_sampling);
|
||||
subd_creases_weight.set_time_sampling(time_sampling);
|
||||
subd_face_corners.set_time_sampling(time_sampling);
|
||||
subd_num_corners.set_time_sampling(time_sampling);
|
||||
subd_ptex_offset.set_time_sampling(time_sampling);
|
||||
subd_smooth.set_time_sampling(time_sampling);
|
||||
subd_start_corner.set_time_sampling(time_sampling);
|
||||
transforms.set_time_sampling(time_sampling);
|
||||
triangles.set_time_sampling(time_sampling);
|
||||
uv_loops.set_time_sampling(time_sampling);
|
||||
vertices.set_time_sampling(time_sampling);
|
||||
points.set_time_sampling(time_sampling);
|
||||
radiuses.set_time_sampling(time_sampling);
|
||||
points_shader.set_time_sampling(time_sampling);
|
||||
|
||||
for (CachedAttribute &attr : attributes) {
|
||||
attr.data.set_time_sampling(time_sampling);
|
||||
}
|
||||
}
|
||||
|
||||
size_t CachedData::memory_used() const
|
||||
{
|
||||
size_t mem_used = 0;
|
||||
|
||||
mem_used += curve_first_key.memory_used();
|
||||
mem_used += curve_keys.memory_used();
|
||||
mem_used += curve_radius.memory_used();
|
||||
mem_used += curve_shader.memory_used();
|
||||
mem_used += shader.memory_used();
|
||||
mem_used += subd_creases_edge.memory_used();
|
||||
mem_used += subd_creases_weight.memory_used();
|
||||
mem_used += subd_face_corners.memory_used();
|
||||
mem_used += subd_num_corners.memory_used();
|
||||
mem_used += subd_ptex_offset.memory_used();
|
||||
mem_used += subd_smooth.memory_used();
|
||||
mem_used += subd_start_corner.memory_used();
|
||||
mem_used += transforms.memory_used();
|
||||
mem_used += triangles.memory_used();
|
||||
mem_used += uv_loops.memory_used();
|
||||
mem_used += vertices.memory_used();
|
||||
mem_used += points.memory_used();
|
||||
mem_used += radiuses.memory_used();
|
||||
mem_used += points_shader.memory_used();
|
||||
|
||||
for (const CachedAttribute &attr : attributes) {
|
||||
mem_used += attr.data.memory_used();
|
||||
}
|
||||
|
||||
return mem_used;
|
||||
}
|
||||
|
||||
static M44d convert_yup_zup(const M44d &mtx, const float scale_mult)
|
||||
{
|
||||
V3d scale;
|
||||
V3d shear;
|
||||
V3d rotation;
|
||||
V3d translation;
|
||||
|
||||
if (!extractSHRT(mtx,
|
||||
scale,
|
||||
shear,
|
||||
rotation,
|
||||
translation,
|
||||
true,
|
||||
IMATH_INTERNAL_NAMESPACE::Euler<double>::XZY))
|
||||
{
|
||||
return mtx;
|
||||
}
|
||||
|
||||
M44d rot_mat;
|
||||
M44d scale_mat;
|
||||
M44d trans_mat;
|
||||
rot_mat.setEulerAngles(V3d(rotation.x, -rotation.z, rotation.y));
|
||||
scale_mat.setScale(V3d(scale.x, scale.z, scale.y));
|
||||
trans_mat.setTranslation(V3d(translation.x, -translation.z, translation.y));
|
||||
|
||||
const M44d temp_mat = scale_mat * rot_mat * trans_mat;
|
||||
|
||||
scale_mat.setScale(static_cast<double>(scale_mult));
|
||||
|
||||
return temp_mat * scale_mat;
|
||||
}
|
||||
|
||||
static void transform_decompose(
|
||||
const M44d &mat, V3d &scale, V3d &shear, Quatd &rotation, V3d &translation)
|
||||
{
|
||||
M44d mat_remainder(mat);
|
||||
|
||||
/* extract scale and shear */
|
||||
Imath::extractAndRemoveScalingAndShear(mat_remainder, scale, shear);
|
||||
|
||||
/* extract translation */
|
||||
translation.x = mat_remainder[3][0];
|
||||
translation.y = mat_remainder[3][1];
|
||||
translation.z = mat_remainder[3][2];
|
||||
|
||||
/* extract rotation */
|
||||
rotation = extractQuat(mat_remainder);
|
||||
}
|
||||
|
||||
static M44d transform_compose(const V3d &scale,
|
||||
const V3d &shear,
|
||||
const Quatd &rotation,
|
||||
const V3d &translation)
|
||||
{
|
||||
M44d scale_mat;
|
||||
M44d shear_mat;
|
||||
M44d rot_mat;
|
||||
M44d trans_mat;
|
||||
|
||||
scale_mat.setScale(scale);
|
||||
shear_mat.setShear(shear);
|
||||
rot_mat = rotation.toMatrix44();
|
||||
trans_mat.setTranslation(translation);
|
||||
|
||||
return scale_mat * shear_mat * rot_mat * trans_mat;
|
||||
}
|
||||
|
||||
/* get the matrix for the specified time, or return the identity matrix if there is no exact match
|
||||
*/
|
||||
static M44d get_matrix_for_time(const MatrixSampleMap &samples, chrono_t time)
|
||||
{
|
||||
const MatrixSampleMap::const_iterator iter = samples.find(time);
|
||||
if (iter != samples.end()) {
|
||||
return iter->second;
|
||||
}
|
||||
|
||||
return M44d();
|
||||
}
|
||||
|
||||
/* get the matrix for the specified time, or interpolate between samples if there is no exact match
|
||||
*/
|
||||
static M44d get_interpolated_matrix_for_time(const MatrixSampleMap &samples, chrono_t time)
|
||||
{
|
||||
if (samples.empty()) {
|
||||
return M44d();
|
||||
}
|
||||
|
||||
/* see if exact match */
|
||||
const MatrixSampleMap::const_iterator iter = samples.find(time);
|
||||
if (iter != samples.end()) {
|
||||
return iter->second;
|
||||
}
|
||||
|
||||
if (samples.size() == 1) {
|
||||
return samples.begin()->second;
|
||||
}
|
||||
|
||||
if (time <= samples.begin()->first) {
|
||||
return samples.begin()->second;
|
||||
}
|
||||
|
||||
if (time >= samples.rbegin()->first) {
|
||||
return samples.rbegin()->second;
|
||||
}
|
||||
|
||||
/* find previous and next time sample to interpolate */
|
||||
chrono_t prev_time = samples.begin()->first;
|
||||
chrono_t next_time = samples.rbegin()->first;
|
||||
|
||||
for (MatrixSampleMap::const_iterator I = samples.begin(); I != samples.end(); ++I) {
|
||||
const chrono_t current_time = (*I).first;
|
||||
|
||||
if (current_time > prev_time && current_time <= time) {
|
||||
prev_time = current_time;
|
||||
}
|
||||
|
||||
if (current_time > next_time && current_time >= time) {
|
||||
next_time = current_time;
|
||||
}
|
||||
}
|
||||
|
||||
const M44d prev_mat = get_matrix_for_time(samples, prev_time);
|
||||
const M44d next_mat = get_matrix_for_time(samples, next_time);
|
||||
|
||||
V3d prev_scale;
|
||||
V3d next_scale;
|
||||
V3d prev_shear;
|
||||
V3d next_shear;
|
||||
V3d prev_translation;
|
||||
V3d next_translation;
|
||||
Quatd prev_rotation;
|
||||
Quatd next_rotation;
|
||||
|
||||
transform_decompose(prev_mat, prev_scale, prev_shear, prev_rotation, prev_translation);
|
||||
transform_decompose(next_mat, next_scale, next_shear, next_rotation, next_translation);
|
||||
|
||||
const chrono_t t = (time - prev_time) / (next_time - prev_time);
|
||||
|
||||
/* Ensure rotation around the shortest angle. */
|
||||
if ((prev_rotation ^ next_rotation) < 0) {
|
||||
next_rotation = -next_rotation;
|
||||
}
|
||||
|
||||
return transform_compose(Imath::lerp(prev_scale, next_scale, t),
|
||||
Imath::lerp(prev_shear, next_shear, t),
|
||||
Imath::slerp(prev_rotation, next_rotation, t),
|
||||
Imath::lerp(prev_translation, next_translation, t));
|
||||
}
|
||||
|
||||
static void concatenate_xform_samples(const MatrixSampleMap &parent_samples,
|
||||
const MatrixSampleMap &local_samples,
|
||||
MatrixSampleMap &output_samples)
|
||||
{
|
||||
set<chrono_t> union_of_samples;
|
||||
|
||||
for (const std::pair<chrono_t, M44d> pair : parent_samples) {
|
||||
union_of_samples.insert(pair.first);
|
||||
}
|
||||
|
||||
for (const std::pair<chrono_t, M44d> pair : local_samples) {
|
||||
union_of_samples.insert(pair.first);
|
||||
}
|
||||
|
||||
for (const chrono_t time : union_of_samples) {
|
||||
const M44d parent_matrix = get_interpolated_matrix_for_time(parent_samples, time);
|
||||
const M44d local_matrix = get_interpolated_matrix_for_time(local_samples, time);
|
||||
|
||||
output_samples[time] = local_matrix * parent_matrix;
|
||||
}
|
||||
}
|
||||
|
||||
static Transform make_transform(const M44d &a, const float scale)
|
||||
{
|
||||
M44d m = convert_yup_zup(a, scale);
|
||||
Transform trans;
|
||||
for (int j = 0; j < 3; j++) {
|
||||
for (int i = 0; i < 4; i++) {
|
||||
trans[j][i] = static_cast<float>(m[i][j]);
|
||||
}
|
||||
}
|
||||
return trans;
|
||||
}
|
||||
|
||||
NODE_DEFINE(AlembicObject)
|
||||
{
|
||||
NodeType *type = NodeType::add("alembic_object", create);
|
||||
|
||||
SOCKET_STRING(path, "Alembic Path", ustring());
|
||||
SOCKET_NODE_ARRAY(used_shaders, "Used Shaders", Shader::get_node_type());
|
||||
|
||||
SOCKET_BOOLEAN(ignore_subdivision, "Ignore Subdivision", true);
|
||||
|
||||
SOCKET_INT(subd_max_level, "Max Subdivision Level", 1);
|
||||
SOCKET_FLOAT(subd_dicing_rate, "Subdivision Dicing Rate", 1.0f);
|
||||
|
||||
SOCKET_FLOAT(radius_scale, "Radius Scale", 1.0f);
|
||||
|
||||
return type;
|
||||
}
|
||||
|
||||
AlembicObject::AlembicObject() : Node(get_node_type())
|
||||
{
|
||||
schema_type = INVALID;
|
||||
}
|
||||
|
||||
AlembicObject::~AlembicObject() = default;
|
||||
|
||||
void AlembicObject::set_object(Object *object_)
|
||||
{
|
||||
object = object_;
|
||||
}
|
||||
|
||||
Object *AlembicObject::get_object()
|
||||
{
|
||||
return object;
|
||||
}
|
||||
|
||||
bool AlembicObject::has_data_loaded() const
|
||||
{
|
||||
return data_loaded;
|
||||
}
|
||||
|
||||
void AlembicObject::load_data_in_cache(CachedData &cached_data,
|
||||
AlembicProcedural *proc,
|
||||
IPolyMeshSchema &schema,
|
||||
Progress &progress)
|
||||
{
|
||||
/* Only load data for the original Geometry. */
|
||||
if (instance_of) {
|
||||
return;
|
||||
}
|
||||
|
||||
cached_data.clear();
|
||||
|
||||
PolyMeshSchemaData data;
|
||||
data.topology_variance = schema.getTopologyVariance();
|
||||
data.time_sampling = schema.getTimeSampling();
|
||||
data.positions = schema.getPositionsProperty();
|
||||
data.face_counts = schema.getFaceCountsProperty();
|
||||
data.face_indices = schema.getFaceIndicesProperty();
|
||||
data.normals = schema.getNormalsParam();
|
||||
data.num_samples = schema.getNumSamples();
|
||||
data.shader_face_sets = parse_face_sets_for_shader_assignment(schema, get_used_shaders());
|
||||
|
||||
read_geometry_data(proc, cached_data, data, progress);
|
||||
|
||||
if (progress.get_cancel()) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Use the schema as the base compound property to also be able to look for top level properties.
|
||||
*/
|
||||
read_attributes(
|
||||
proc, cached_data, schema, schema.getUVsParam(), get_requested_attributes(), progress);
|
||||
|
||||
if (progress.get_cancel()) {
|
||||
return;
|
||||
}
|
||||
|
||||
cached_data.invalidate_last_loaded_time(true);
|
||||
data_loaded = true;
|
||||
}
|
||||
|
||||
void AlembicObject::load_data_in_cache(CachedData &cached_data,
|
||||
AlembicProcedural *proc,
|
||||
ISubDSchema &schema,
|
||||
Progress &progress)
|
||||
{
|
||||
/* Only load data for the original Geometry. */
|
||||
if (instance_of) {
|
||||
return;
|
||||
}
|
||||
|
||||
cached_data.clear();
|
||||
|
||||
if (this->get_ignore_subdivision()) {
|
||||
PolyMeshSchemaData data;
|
||||
data.topology_variance = schema.getTopologyVariance();
|
||||
data.time_sampling = schema.getTimeSampling();
|
||||
data.positions = schema.getPositionsProperty();
|
||||
data.face_counts = schema.getFaceCountsProperty();
|
||||
data.face_indices = schema.getFaceIndicesProperty();
|
||||
data.num_samples = schema.getNumSamples();
|
||||
data.velocities = schema.getVelocitiesProperty();
|
||||
data.shader_face_sets = parse_face_sets_for_shader_assignment(schema, get_used_shaders());
|
||||
|
||||
read_geometry_data(proc, cached_data, data, progress);
|
||||
|
||||
if (progress.get_cancel()) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Use the schema as the base compound property to also be able to look for top level
|
||||
* properties. */
|
||||
read_attributes(
|
||||
proc, cached_data, schema, schema.getUVsParam(), get_requested_attributes(), progress);
|
||||
|
||||
cached_data.invalidate_last_loaded_time(true);
|
||||
data_loaded = true;
|
||||
return;
|
||||
}
|
||||
|
||||
SubDSchemaData data;
|
||||
data.time_sampling = schema.getTimeSampling();
|
||||
data.num_samples = schema.getNumSamples();
|
||||
data.topology_variance = schema.getTopologyVariance();
|
||||
data.face_counts = schema.getFaceCountsProperty();
|
||||
data.face_indices = schema.getFaceIndicesProperty();
|
||||
data.positions = schema.getPositionsProperty();
|
||||
data.face_varying_interpolate_boundary = schema.getFaceVaryingInterpolateBoundaryProperty();
|
||||
data.face_varying_propagate_corners = schema.getFaceVaryingPropagateCornersProperty();
|
||||
data.interpolate_boundary = schema.getInterpolateBoundaryProperty();
|
||||
data.crease_indices = schema.getCreaseIndicesProperty();
|
||||
data.crease_lengths = schema.getCreaseLengthsProperty();
|
||||
data.crease_sharpnesses = schema.getCreaseSharpnessesProperty();
|
||||
data.corner_indices = schema.getCornerIndicesProperty();
|
||||
data.corner_sharpnesses = schema.getCornerSharpnessesProperty();
|
||||
data.holes = schema.getHolesProperty();
|
||||
data.subdivision_scheme = schema.getSubdivisionSchemeProperty();
|
||||
data.velocities = schema.getVelocitiesProperty();
|
||||
data.shader_face_sets = parse_face_sets_for_shader_assignment(schema, get_used_shaders());
|
||||
|
||||
read_geometry_data(proc, cached_data, data, progress);
|
||||
|
||||
if (progress.get_cancel()) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Use the schema as the base compound property to also be able to look for top level properties.
|
||||
*/
|
||||
read_attributes(
|
||||
proc, cached_data, schema, schema.getUVsParam(), get_requested_attributes(), progress);
|
||||
|
||||
cached_data.invalidate_last_loaded_time(true);
|
||||
data_loaded = true;
|
||||
}
|
||||
|
||||
void AlembicObject::load_data_in_cache(CachedData &cached_data,
|
||||
AlembicProcedural *proc,
|
||||
const ICurvesSchema &schema,
|
||||
Progress &progress)
|
||||
{
|
||||
/* Only load data for the original Geometry. */
|
||||
if (instance_of) {
|
||||
return;
|
||||
}
|
||||
|
||||
cached_data.clear();
|
||||
|
||||
CurvesSchemaData data;
|
||||
data.positions = schema.getPositionsProperty();
|
||||
data.position_weights = schema.getPositionWeightsProperty();
|
||||
data.normals = schema.getNormalsParam();
|
||||
data.knots = schema.getKnotsProperty();
|
||||
data.orders = schema.getOrdersProperty();
|
||||
data.widths = schema.getWidthsParam();
|
||||
data.velocities = schema.getVelocitiesProperty();
|
||||
data.time_sampling = schema.getTimeSampling();
|
||||
data.topology_variance = schema.getTopologyVariance();
|
||||
data.num_samples = schema.getNumSamples();
|
||||
data.num_vertices = schema.getNumVerticesProperty();
|
||||
data.default_radius = proc->get_default_radius();
|
||||
data.radius_scale = get_radius_scale();
|
||||
|
||||
read_geometry_data(proc, cached_data, data, progress);
|
||||
|
||||
if (progress.get_cancel()) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Use the schema as the base compound property to also be able to look for top level properties.
|
||||
*/
|
||||
read_attributes(
|
||||
proc, cached_data, schema, schema.getUVsParam(), get_requested_attributes(), progress);
|
||||
|
||||
cached_data.invalidate_last_loaded_time(true);
|
||||
data_loaded = true;
|
||||
}
|
||||
|
||||
void AlembicObject::load_data_in_cache(CachedData &cached_data,
|
||||
AlembicProcedural *proc,
|
||||
const IPointsSchema &schema,
|
||||
Progress &progress)
|
||||
{
|
||||
/* Only load data for the original Geometry. */
|
||||
if (instance_of) {
|
||||
return;
|
||||
}
|
||||
|
||||
cached_data.clear();
|
||||
|
||||
PointsSchemaData data;
|
||||
data.positions = schema.getPositionsProperty();
|
||||
data.radiuses = schema.getWidthsParam();
|
||||
data.velocities = schema.getVelocitiesProperty();
|
||||
data.time_sampling = schema.getTimeSampling();
|
||||
data.num_samples = schema.getNumSamples();
|
||||
data.default_radius = proc->get_default_radius();
|
||||
data.radius_scale = get_radius_scale();
|
||||
|
||||
read_geometry_data(proc, cached_data, data, progress);
|
||||
|
||||
if (progress.get_cancel()) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Use the schema as the base compound property to also be able to look for top level properties.
|
||||
*/
|
||||
read_attributes(proc, cached_data, schema, {}, get_requested_attributes(), progress);
|
||||
|
||||
cached_data.invalidate_last_loaded_time(true);
|
||||
data_loaded = true;
|
||||
}
|
||||
|
||||
void AlembicObject::setup_transform_cache(CachedData &cached_data, float scale)
|
||||
{
|
||||
cached_data.transforms.clear();
|
||||
cached_data.transforms.invalidate_last_loaded_time();
|
||||
|
||||
if (scale == 0.0f) {
|
||||
scale = 1.0f;
|
||||
}
|
||||
|
||||
if (xform_time_sampling) {
|
||||
cached_data.transforms.set_time_sampling(*xform_time_sampling);
|
||||
}
|
||||
|
||||
if (xform_samples.empty()) {
|
||||
Transform tfm = transform_scale(make_float3(scale));
|
||||
cached_data.transforms.add_data(tfm, 0.0);
|
||||
}
|
||||
else {
|
||||
/* It is possible for a leaf node of the hierarchy to have multiple samples for its transforms
|
||||
* if a sibling has animated transforms. So check if we indeed have animated transformations.
|
||||
*/
|
||||
const M44d first_matrix = xform_samples.begin()->first;
|
||||
bool has_animation = false;
|
||||
for (const std::pair<chrono_t, M44d> pair : xform_samples) {
|
||||
if (pair.second != first_matrix) {
|
||||
has_animation = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!has_animation) {
|
||||
Transform tfm = make_transform(first_matrix, scale);
|
||||
cached_data.transforms.add_data(tfm, 0.0);
|
||||
}
|
||||
else {
|
||||
for (const std::pair<chrono_t, M44d> pair : xform_samples) {
|
||||
Transform tfm = make_transform(pair.second, scale);
|
||||
cached_data.transforms.add_data(tfm, pair.first);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
AttributeRequestSet AlembicObject::get_requested_attributes()
|
||||
{
|
||||
AttributeRequestSet requested_attributes;
|
||||
|
||||
Geometry *geometry = object->get_geometry();
|
||||
assert(geometry);
|
||||
|
||||
for (Node *node : geometry->get_used_shaders()) {
|
||||
Shader *shader = static_cast<Shader *>(node);
|
||||
|
||||
for (const AttributeRequest &attr : shader->attributes.requests) {
|
||||
if (!attr.name.empty()) {
|
||||
requested_attributes.add(attr.name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return requested_attributes;
|
||||
}
|
||||
|
||||
/* Update existing attributes and remove any attribute not in the cached_data, those attributes
|
||||
* were added by Cycles (e.g. face normals) */
|
||||
static void update_attributes(AttributeSet &attributes,
|
||||
CachedData &cached_data,
|
||||
const double frame_time)
|
||||
{
|
||||
set<Attribute *> cached_attributes;
|
||||
|
||||
for (CachedData::CachedAttribute &attribute : cached_data.attributes) {
|
||||
const CacheLookupResult<array<char>> result = attribute.data.data_for_time(frame_time);
|
||||
|
||||
if (result.has_no_data_for_time()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Attribute *attr = nullptr;
|
||||
if (attribute.std != ATTR_STD_NONE) {
|
||||
attr = attributes.add(attribute.std, attribute.name);
|
||||
}
|
||||
else {
|
||||
attr = attributes.add(attribute.name, attribute.type_desc, attribute.element);
|
||||
}
|
||||
assert(attr);
|
||||
|
||||
cached_attributes.insert(attr);
|
||||
|
||||
if (!result.has_new_data()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const array<char> &attr_data = result.get_data();
|
||||
|
||||
/* weak way of detecting if the topology has changed
|
||||
* todo: reuse code from device_update patch */
|
||||
if (attr->buffer.size() != attr_data.size()) {
|
||||
attr->buffer.resize(attr_data.size());
|
||||
}
|
||||
|
||||
memcpy(attr->data(), attr_data.data(), attr_data.size());
|
||||
attr->modified = true;
|
||||
}
|
||||
|
||||
/* remove any attributes not in cached_attributes */
|
||||
list<Attribute>::iterator it;
|
||||
for (it = attributes.attributes.begin(); it != attributes.attributes.end();) {
|
||||
if (cached_attributes.find(&(*it)) == cached_attributes.end()) {
|
||||
attributes.remove(it++);
|
||||
continue;
|
||||
}
|
||||
|
||||
it++;
|
||||
}
|
||||
}
|
||||
|
||||
NODE_DEFINE(AlembicProcedural)
|
||||
{
|
||||
NodeType *type = NodeType::add("alembic", create);
|
||||
|
||||
SOCKET_STRING(filepath, "Filename", ustring());
|
||||
SOCKET_STRING_ARRAY(layers, "Layers", array<ustring>());
|
||||
SOCKET_FLOAT(frame, "Frame", 1.0f);
|
||||
SOCKET_FLOAT(start_frame, "Start Frame", 1.0f);
|
||||
SOCKET_FLOAT(end_frame, "End Frame", 1.0f);
|
||||
SOCKET_FLOAT(frame_rate, "Frame Rate", 24.0f);
|
||||
SOCKET_FLOAT(frame_offset, "Frame Offset", 0.0f);
|
||||
SOCKET_FLOAT(default_radius, "Default Radius", 0.01f);
|
||||
SOCKET_FLOAT(scale, "Scale", 1.0f);
|
||||
|
||||
SOCKET_BOOLEAN(use_prefetch, "Use Prefetch", true);
|
||||
SOCKET_INT(prefetch_cache_size, "Prefetch Cache Size", 4096);
|
||||
|
||||
return type;
|
||||
}
|
||||
|
||||
AlembicProcedural::AlembicProcedural() : Procedural(get_node_type())
|
||||
{
|
||||
objects_loaded = false;
|
||||
scene_ = nullptr;
|
||||
}
|
||||
|
||||
AlembicProcedural::~AlembicProcedural()
|
||||
{
|
||||
ccl::set<Geometry *> geometries_set;
|
||||
ccl::set<Object *> objects_set;
|
||||
const ccl::set<AlembicObject *> abc_objects_set;
|
||||
|
||||
for (Node *node : nodes) {
|
||||
AlembicObject *abc_object = static_cast<AlembicObject *>(node);
|
||||
|
||||
if (abc_object->get_object()) {
|
||||
objects_set.insert(abc_object->get_object());
|
||||
|
||||
if (abc_object->get_object()->get_geometry()) {
|
||||
geometries_set.insert(abc_object->get_object()->get_geometry());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* We may delete a Procedural before rendering started, so scene_ can be null. */
|
||||
if (!scene_) {
|
||||
assert(geometries_set.empty());
|
||||
assert(objects_set.empty());
|
||||
return;
|
||||
}
|
||||
|
||||
scene_->delete_nodes(geometries_set, this);
|
||||
scene_->delete_nodes(objects_set, this);
|
||||
}
|
||||
|
||||
void AlembicProcedural::generate(Scene *scene, Progress &progress)
|
||||
{
|
||||
assert(scene_ == nullptr || scene_ == scene);
|
||||
scene_ = scene;
|
||||
|
||||
if (frame < start_frame || frame > end_frame) {
|
||||
clear_modified();
|
||||
objects_modified = false;
|
||||
return;
|
||||
}
|
||||
|
||||
bool need_shader_updates = false;
|
||||
bool need_data_updates = false;
|
||||
|
||||
for (Node *object_node : nodes) {
|
||||
AlembicObject *object = static_cast<AlembicObject *>(object_node);
|
||||
|
||||
if (object->is_modified()) {
|
||||
need_data_updates = true;
|
||||
}
|
||||
|
||||
/* Check if the shaders were modified. */
|
||||
if (object->used_shaders_is_modified() && object->get_object() &&
|
||||
object->get_object()->get_geometry())
|
||||
{
|
||||
Geometry *geometry = object->get_object()->get_geometry();
|
||||
array<Node *> used_shaders = object->get_used_shaders();
|
||||
geometry->set_used_shaders(used_shaders);
|
||||
need_shader_updates = true;
|
||||
}
|
||||
|
||||
/* Check for changes in shaders (e.g. newly requested attributes). */
|
||||
for (Node *shader_node : object->get_used_shaders()) {
|
||||
Shader *shader = static_cast<Shader *>(shader_node);
|
||||
|
||||
if (shader->need_update_geometry()) {
|
||||
object->need_shader_update = true;
|
||||
need_shader_updates = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!(is_modified() || objects_modified) && !need_shader_updates && !need_data_updates) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!archive.valid() || filepath_is_modified() || layers_is_modified()) {
|
||||
Alembic::AbcCoreFactory::IFactory factory;
|
||||
factory.setPolicy(Alembic::Abc::ErrorHandler::kQuietNoopPolicy);
|
||||
|
||||
std::vector<std::string> filenames;
|
||||
filenames.emplace_back(filepath.c_str());
|
||||
|
||||
for (const ustring &layer : layers) {
|
||||
filenames.emplace_back(layer.c_str());
|
||||
}
|
||||
|
||||
/* We need to reverse the order as overriding archives should come first. */
|
||||
std::reverse(filenames.begin(), filenames.end());
|
||||
|
||||
archive = factory.getArchive(filenames);
|
||||
|
||||
if (!archive.valid()) {
|
||||
/* avoid potential infinite update loops in viewport synchronization */
|
||||
filepath.clear();
|
||||
layers.clear();
|
||||
clear_modified();
|
||||
objects_modified = false;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (!objects_loaded || objects_modified) {
|
||||
load_objects(progress);
|
||||
objects_loaded = true;
|
||||
}
|
||||
|
||||
const chrono_t frame_time = (chrono_t)((frame - frame_offset) / frame_rate);
|
||||
|
||||
/* Clear the subdivision caches as the data is stored differently. */
|
||||
for (Node *node : nodes) {
|
||||
AlembicObject *object = static_cast<AlembicObject *>(node);
|
||||
|
||||
if (object->schema_type != AlembicObject::SUBD) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (object->ignore_subdivision_is_modified()) {
|
||||
object->clear_cache();
|
||||
}
|
||||
}
|
||||
|
||||
if (use_prefetch_is_modified()) {
|
||||
if (!use_prefetch) {
|
||||
for (Node *node : nodes) {
|
||||
AlembicObject *object = static_cast<AlembicObject *>(node);
|
||||
object->clear_cache();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (prefetch_cache_size_is_modified()) {
|
||||
/* Check whether the current memory usage fits in the new requested size,
|
||||
* abort the render if it is any higher. */
|
||||
size_t memory_used = 0ul;
|
||||
for (Node *node : nodes) {
|
||||
AlembicObject *object = static_cast<AlembicObject *>(node);
|
||||
memory_used += object->get_cached_data().memory_used();
|
||||
}
|
||||
|
||||
if (memory_used > get_prefetch_cache_size_in_bytes()) {
|
||||
progress.set_error("Error: Alembic Procedural memory limit reached");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
build_caches(progress);
|
||||
|
||||
for (Node *node : nodes) {
|
||||
AlembicObject *object = static_cast<AlembicObject *>(node);
|
||||
|
||||
if (progress.get_cancel()) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* skip constant objects */
|
||||
if (object->is_constant() && !object->is_modified() && !object->need_shader_update &&
|
||||
!scale_is_modified())
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if (object->schema_type == AlembicObject::POLY_MESH) {
|
||||
read_mesh(object, frame_time);
|
||||
}
|
||||
else if (object->schema_type == AlembicObject::CURVES) {
|
||||
read_curves(object, frame_time);
|
||||
}
|
||||
else if (object->schema_type == AlembicObject::POINTS) {
|
||||
read_points(object, frame_time);
|
||||
}
|
||||
else if (object->schema_type == AlembicObject::SUBD) {
|
||||
read_subd(object, frame_time);
|
||||
}
|
||||
|
||||
object->need_shader_update = false;
|
||||
object->clear_modified();
|
||||
}
|
||||
|
||||
clear_modified();
|
||||
objects_modified = false;
|
||||
}
|
||||
|
||||
void AlembicProcedural::tag_update(Scene *scene)
|
||||
{
|
||||
scene->procedural_manager->tag_update();
|
||||
}
|
||||
|
||||
AlembicObject *AlembicProcedural::get_or_create_object(const ustring &path)
|
||||
{
|
||||
for (Node *node : nodes) {
|
||||
AlembicObject *object = static_cast<AlembicObject *>(node);
|
||||
|
||||
if (object->get_path() == path) {
|
||||
return object;
|
||||
}
|
||||
}
|
||||
|
||||
AlembicObject *object = create_node<AlembicObject>();
|
||||
object->set_path(path);
|
||||
objects_modified = true;
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
void AlembicProcedural::load_objects(Progress &progress)
|
||||
{
|
||||
unordered_map<string, AlembicObject *> object_map;
|
||||
|
||||
for (Node *node : nodes) {
|
||||
AlembicObject *object = static_cast<AlembicObject *>(node);
|
||||
|
||||
/* only consider newly added objects */
|
||||
if (object->get_object() == nullptr) {
|
||||
object_map.insert({object->get_path().c_str(), object});
|
||||
}
|
||||
}
|
||||
|
||||
const IObject root = archive.getTop();
|
||||
|
||||
for (size_t i = 0; i < root.getNumChildren(); ++i) {
|
||||
walk_hierarchy(root, root.getChildHeader(i), {}, object_map, progress);
|
||||
}
|
||||
|
||||
/* Create nodes in the scene. */
|
||||
for (const std::pair<string, AlembicObject *> pair : object_map) {
|
||||
AlembicObject *abc_object = pair.second;
|
||||
|
||||
Geometry *geometry = nullptr;
|
||||
|
||||
if (!abc_object->instance_of) {
|
||||
if (abc_object->schema_type == AlembicObject::CURVES) {
|
||||
geometry = scene_->create_node<Hair>();
|
||||
}
|
||||
else if (abc_object->schema_type == AlembicObject::POINTS) {
|
||||
geometry = scene_->create_node<PointCloud>();
|
||||
}
|
||||
else if (abc_object->schema_type == AlembicObject::POLY_MESH ||
|
||||
abc_object->schema_type == AlembicObject::SUBD)
|
||||
{
|
||||
geometry = scene_->create_node<Mesh>();
|
||||
}
|
||||
else {
|
||||
continue;
|
||||
}
|
||||
|
||||
geometry->set_owner(this);
|
||||
geometry->name = abc_object->iobject.getName();
|
||||
|
||||
array<Node *> used_shaders = abc_object->get_used_shaders();
|
||||
geometry->set_used_shaders(used_shaders);
|
||||
}
|
||||
|
||||
Object *object = scene_->create_node<Object>();
|
||||
object->set_owner(this);
|
||||
object->set_geometry(geometry);
|
||||
object->name = abc_object->iobject.getName();
|
||||
|
||||
abc_object->set_object(object);
|
||||
}
|
||||
|
||||
/* Share geometries between instances. */
|
||||
for (Node *node : nodes) {
|
||||
AlembicObject *abc_object = static_cast<AlembicObject *>(node);
|
||||
|
||||
if (abc_object->instance_of) {
|
||||
abc_object->get_object()->set_geometry(
|
||||
abc_object->instance_of->get_object()->get_geometry());
|
||||
abc_object->schema_type = abc_object->instance_of->schema_type;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AlembicProcedural::read_mesh(AlembicObject *abc_object, Abc::chrono_t frame_time)
|
||||
{
|
||||
CachedData &cached_data = abc_object->get_cached_data();
|
||||
|
||||
/* update sockets */
|
||||
|
||||
Object *object = abc_object->get_object();
|
||||
cached_data.transforms.copy_to_socket(frame_time, object, object->get_tfm_socket());
|
||||
|
||||
if (object->is_modified()) {
|
||||
object->tag_update(scene_);
|
||||
}
|
||||
|
||||
/* Only update sockets for the original Geometry. */
|
||||
if (abc_object->instance_of) {
|
||||
return;
|
||||
}
|
||||
|
||||
Mesh *mesh = static_cast<Mesh *>(object->get_geometry());
|
||||
|
||||
/* Make sure shader ids are also updated. */
|
||||
if (mesh->used_shaders_is_modified()) {
|
||||
mesh->tag_shader_modified();
|
||||
}
|
||||
|
||||
cached_data.vertices.copy_to_socket(frame_time, mesh, mesh->get_verts_socket());
|
||||
|
||||
cached_data.shader.copy_to_socket(frame_time, mesh, mesh->get_shader_socket());
|
||||
|
||||
array<int3> *triangle_data = cached_data.triangles.data_for_time(frame_time).get_data_or_null();
|
||||
if (triangle_data) {
|
||||
array<int> triangles;
|
||||
array<bool> smooth;
|
||||
|
||||
triangles.reserve(triangle_data->size() * 3);
|
||||
smooth.reserve(triangle_data->size());
|
||||
|
||||
for (size_t i = 0; i < triangle_data->size(); ++i) {
|
||||
const int3 tri = (*triangle_data)[i];
|
||||
triangles.push_back_reserved(tri.x);
|
||||
triangles.push_back_reserved(tri.y);
|
||||
triangles.push_back_reserved(tri.z);
|
||||
smooth.push_back_reserved(true);
|
||||
}
|
||||
|
||||
mesh->set_triangles(triangles);
|
||||
mesh->set_smooth(smooth);
|
||||
}
|
||||
|
||||
/* update attributes */
|
||||
|
||||
update_attributes(mesh->attributes, cached_data, frame_time);
|
||||
|
||||
if (mesh->is_modified()) {
|
||||
const bool need_rebuild = mesh->triangles_is_modified();
|
||||
mesh->tag_update(scene_, need_rebuild);
|
||||
}
|
||||
}
|
||||
|
||||
void AlembicProcedural::read_subd(AlembicObject *abc_object, Abc::chrono_t frame_time)
|
||||
{
|
||||
if (abc_object->get_ignore_subdivision()) {
|
||||
read_mesh(abc_object, frame_time);
|
||||
return;
|
||||
}
|
||||
|
||||
CachedData &cached_data = abc_object->get_cached_data();
|
||||
|
||||
/* Update sockets. */
|
||||
|
||||
Object *object = abc_object->get_object();
|
||||
cached_data.transforms.copy_to_socket(frame_time, object, object->get_tfm_socket());
|
||||
|
||||
if (object->is_modified()) {
|
||||
object->tag_update(scene_);
|
||||
}
|
||||
|
||||
/* Only update sockets for the original Geometry. */
|
||||
if (abc_object->instance_of) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (abc_object->subd_max_level_is_modified() || abc_object->subd_dicing_rate_is_modified()) {
|
||||
/* need to reset the current data is something changed */
|
||||
cached_data.invalidate_last_loaded_time();
|
||||
}
|
||||
|
||||
Mesh *mesh = static_cast<Mesh *>(object->get_geometry());
|
||||
|
||||
/* Make sure shader ids are also updated. */
|
||||
if (mesh->used_shaders_is_modified()) {
|
||||
mesh->tag_shader_modified();
|
||||
}
|
||||
|
||||
/* Cycles overwrites the original triangles when computing displacement, so we always have to
|
||||
* repass the data if something is animated (vertices most likely) to avoid buffer overflows. */
|
||||
if (!cached_data.is_constant()) {
|
||||
cached_data.invalidate_last_loaded_time();
|
||||
|
||||
/* remove previous triangles, if any */
|
||||
array<int> triangles;
|
||||
mesh->set_triangles(triangles);
|
||||
}
|
||||
|
||||
mesh->clear_non_sockets();
|
||||
|
||||
/* Alembic is OpenSubDiv compliant, there is no option to set another subdivision type. */
|
||||
mesh->set_subdivision_type(Mesh::SubdivisionType::SUBDIVISION_CATMULL_CLARK);
|
||||
mesh->set_subd_max_level(abc_object->get_subd_max_level());
|
||||
mesh->set_subd_dicing_rate(abc_object->get_subd_dicing_rate());
|
||||
|
||||
cached_data.vertices.copy_to_socket(frame_time, mesh, mesh->get_verts_socket());
|
||||
|
||||
/* cached_data.shader is also used for subd_shader */
|
||||
cached_data.shader.copy_to_socket(frame_time, mesh, mesh->get_subd_shader_socket());
|
||||
|
||||
cached_data.subd_start_corner.copy_to_socket(
|
||||
frame_time, mesh, mesh->get_subd_start_corner_socket());
|
||||
|
||||
cached_data.subd_num_corners.copy_to_socket(
|
||||
frame_time, mesh, mesh->get_subd_num_corners_socket());
|
||||
|
||||
cached_data.subd_smooth.copy_to_socket(frame_time, mesh, mesh->get_subd_smooth_socket());
|
||||
|
||||
cached_data.subd_ptex_offset.copy_to_socket(
|
||||
frame_time, mesh, mesh->get_subd_ptex_offset_socket());
|
||||
|
||||
cached_data.subd_face_corners.copy_to_socket(
|
||||
frame_time, mesh, mesh->get_subd_face_corners_socket());
|
||||
|
||||
cached_data.subd_creases_edge.copy_to_socket(
|
||||
frame_time, mesh, mesh->get_subd_creases_edge_socket());
|
||||
|
||||
cached_data.subd_creases_weight.copy_to_socket(
|
||||
frame_time, mesh, mesh->get_subd_creases_weight_socket());
|
||||
|
||||
cached_data.subd_vertex_crease_indices.copy_to_socket(
|
||||
frame_time, mesh, mesh->get_subd_vert_creases_socket());
|
||||
|
||||
cached_data.subd_vertex_crease_weights.copy_to_socket(
|
||||
frame_time, mesh, mesh->get_subd_vert_creases_weight_socket());
|
||||
|
||||
mesh->set_num_subd_faces(mesh->get_subd_shader().size());
|
||||
|
||||
/* Update attributes. */
|
||||
|
||||
update_attributes(mesh->subd_attributes, cached_data, frame_time);
|
||||
|
||||
if (mesh->is_modified()) {
|
||||
const bool need_rebuild = (mesh->triangles_is_modified()) ||
|
||||
(mesh->subd_num_corners_is_modified()) ||
|
||||
(mesh->subd_shader_is_modified()) ||
|
||||
(mesh->subd_smooth_is_modified()) ||
|
||||
(mesh->subd_ptex_offset_is_modified()) ||
|
||||
(mesh->subd_start_corner_is_modified()) ||
|
||||
(mesh->subd_face_corners_is_modified());
|
||||
|
||||
mesh->tag_update(scene_, need_rebuild);
|
||||
}
|
||||
}
|
||||
|
||||
void AlembicProcedural::read_curves(AlembicObject *abc_object, Abc::chrono_t frame_time)
|
||||
{
|
||||
CachedData &cached_data = abc_object->get_cached_data();
|
||||
|
||||
/* update sockets */
|
||||
|
||||
Object *object = abc_object->get_object();
|
||||
cached_data.transforms.copy_to_socket(frame_time, object, object->get_tfm_socket());
|
||||
|
||||
if (object->is_modified()) {
|
||||
object->tag_update(scene_);
|
||||
}
|
||||
|
||||
/* Only update sockets for the original Geometry. */
|
||||
if (abc_object->instance_of) {
|
||||
return;
|
||||
}
|
||||
|
||||
Hair *hair = static_cast<Hair *>(object->get_geometry());
|
||||
|
||||
/* Make sure shader ids are also updated. */
|
||||
if (hair->used_shaders_is_modified()) {
|
||||
hair->tag_curve_shader_modified();
|
||||
}
|
||||
|
||||
cached_data.curve_keys.copy_to_socket(frame_time, hair, hair->get_curve_keys_socket());
|
||||
|
||||
cached_data.curve_radius.copy_to_socket(frame_time, hair, hair->get_curve_radius_socket());
|
||||
|
||||
cached_data.curve_shader.copy_to_socket(frame_time, hair, hair->get_curve_shader_socket());
|
||||
|
||||
cached_data.curve_first_key.copy_to_socket(frame_time, hair, hair->get_curve_first_key_socket());
|
||||
|
||||
/* update attributes */
|
||||
|
||||
update_attributes(hair->attributes, cached_data, frame_time);
|
||||
|
||||
const bool rebuild = (hair->curve_keys_is_modified() || hair->curve_radius_is_modified());
|
||||
hair->tag_update(scene_, rebuild);
|
||||
}
|
||||
|
||||
void AlembicProcedural::read_points(AlembicObject *abc_object, Abc::chrono_t frame_time)
|
||||
{
|
||||
CachedData &cached_data = abc_object->get_cached_data();
|
||||
|
||||
/* update sockets */
|
||||
|
||||
Object *object = abc_object->get_object();
|
||||
cached_data.transforms.copy_to_socket(frame_time, object, object->get_tfm_socket());
|
||||
|
||||
if (object->is_modified()) {
|
||||
object->tag_update(scene_);
|
||||
}
|
||||
|
||||
/* Only update sockets for the original Geometry. */
|
||||
if (abc_object->instance_of) {
|
||||
return;
|
||||
}
|
||||
|
||||
PointCloud *point_cloud = static_cast<PointCloud *>(object->get_geometry());
|
||||
|
||||
/* Make sure shader ids are also updated. */
|
||||
if (point_cloud->used_shaders_is_modified()) {
|
||||
point_cloud->tag_shader_modified();
|
||||
}
|
||||
|
||||
cached_data.points.copy_to_socket(frame_time, point_cloud, point_cloud->get_points_socket());
|
||||
cached_data.radiuses.copy_to_socket(frame_time, point_cloud, point_cloud->get_radius_socket());
|
||||
cached_data.points_shader.copy_to_socket(
|
||||
frame_time, point_cloud, point_cloud->get_shader_socket());
|
||||
|
||||
/* update attributes */
|
||||
|
||||
update_attributes(point_cloud->attributes, cached_data, frame_time);
|
||||
|
||||
const bool rebuild = (point_cloud->points_is_modified() || point_cloud->radius_is_modified() ||
|
||||
point_cloud->shader_is_modified());
|
||||
point_cloud->tag_update(scene_, rebuild);
|
||||
}
|
||||
|
||||
void AlembicProcedural::walk_hierarchy(
|
||||
IObject parent,
|
||||
const ObjectHeader &header,
|
||||
MatrixSamplesData matrix_samples_data,
|
||||
const unordered_map<std::string, AlembicObject *> &object_map,
|
||||
Progress &progress)
|
||||
{
|
||||
if (progress.get_cancel()) {
|
||||
return;
|
||||
}
|
||||
|
||||
IObject next_object;
|
||||
|
||||
MatrixSampleMap concatenated_xform_samples;
|
||||
|
||||
if (IXform::matches(header)) {
|
||||
IXform xform(parent, header.getName());
|
||||
|
||||
const IXformSchema &xs = xform.getSchema();
|
||||
|
||||
if (xs.getNumOps() > 0) {
|
||||
const TimeSamplingPtr ts = xs.getTimeSampling();
|
||||
MatrixSampleMap local_xform_samples;
|
||||
|
||||
MatrixSampleMap *temp_xform_samples = nullptr;
|
||||
if (matrix_samples_data.samples == nullptr) {
|
||||
/* If there is no parent transforms, fill the map directly. */
|
||||
temp_xform_samples = &concatenated_xform_samples;
|
||||
}
|
||||
else {
|
||||
/* use a temporary map */
|
||||
temp_xform_samples = &local_xform_samples;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < xs.getNumSamples(); ++i) {
|
||||
const chrono_t sample_time = ts->getSampleTime(index_t(i));
|
||||
const XformSample sample = xs.getValue(ISampleSelector(sample_time));
|
||||
temp_xform_samples->insert({sample_time, sample.getMatrix()});
|
||||
}
|
||||
|
||||
if (matrix_samples_data.samples != nullptr) {
|
||||
concatenate_xform_samples(
|
||||
*matrix_samples_data.samples, local_xform_samples, concatenated_xform_samples);
|
||||
}
|
||||
|
||||
matrix_samples_data.samples = &concatenated_xform_samples;
|
||||
matrix_samples_data.time_sampling = ts;
|
||||
}
|
||||
|
||||
next_object = xform;
|
||||
}
|
||||
else if (ISubD::matches(header)) {
|
||||
const ISubD subd(parent, header.getName());
|
||||
|
||||
unordered_map<std::string, AlembicObject *>::const_iterator iter;
|
||||
iter = object_map.find(subd.getFullName());
|
||||
|
||||
if (iter != object_map.end()) {
|
||||
AlembicObject *abc_object = iter->second;
|
||||
abc_object->iobject = subd;
|
||||
abc_object->schema_type = AlembicObject::SUBD;
|
||||
|
||||
if (matrix_samples_data.samples) {
|
||||
abc_object->xform_samples = *matrix_samples_data.samples;
|
||||
abc_object->xform_time_sampling = matrix_samples_data.time_sampling;
|
||||
}
|
||||
}
|
||||
|
||||
next_object = subd;
|
||||
}
|
||||
else if (IPolyMesh::matches(header)) {
|
||||
const IPolyMesh mesh(parent, header.getName());
|
||||
|
||||
unordered_map<std::string, AlembicObject *>::const_iterator iter;
|
||||
iter = object_map.find(mesh.getFullName());
|
||||
|
||||
if (iter != object_map.end()) {
|
||||
AlembicObject *abc_object = iter->second;
|
||||
abc_object->iobject = mesh;
|
||||
abc_object->schema_type = AlembicObject::POLY_MESH;
|
||||
|
||||
if (matrix_samples_data.samples) {
|
||||
abc_object->xform_samples = *matrix_samples_data.samples;
|
||||
abc_object->xform_time_sampling = matrix_samples_data.time_sampling;
|
||||
}
|
||||
}
|
||||
|
||||
next_object = mesh;
|
||||
}
|
||||
else if (ICurves::matches(header)) {
|
||||
const ICurves curves(parent, header.getName());
|
||||
|
||||
unordered_map<std::string, AlembicObject *>::const_iterator iter;
|
||||
iter = object_map.find(curves.getFullName());
|
||||
|
||||
if (iter != object_map.end()) {
|
||||
AlembicObject *abc_object = iter->second;
|
||||
abc_object->iobject = curves;
|
||||
abc_object->schema_type = AlembicObject::CURVES;
|
||||
|
||||
if (matrix_samples_data.samples) {
|
||||
abc_object->xform_samples = *matrix_samples_data.samples;
|
||||
abc_object->xform_time_sampling = matrix_samples_data.time_sampling;
|
||||
}
|
||||
}
|
||||
|
||||
next_object = curves;
|
||||
}
|
||||
else if (IFaceSet::matches(header)) {
|
||||
// ignore the face set, it will be read along with the data
|
||||
}
|
||||
else if (IPoints::matches(header)) {
|
||||
const IPoints points(parent, header.getName());
|
||||
|
||||
unordered_map<std::string, AlembicObject *>::const_iterator iter;
|
||||
iter = object_map.find(points.getFullName());
|
||||
|
||||
if (iter != object_map.end()) {
|
||||
AlembicObject *abc_object = iter->second;
|
||||
abc_object->iobject = points;
|
||||
abc_object->schema_type = AlembicObject::POINTS;
|
||||
|
||||
if (matrix_samples_data.samples) {
|
||||
abc_object->xform_samples = *matrix_samples_data.samples;
|
||||
abc_object->xform_time_sampling = matrix_samples_data.time_sampling;
|
||||
}
|
||||
}
|
||||
|
||||
next_object = points;
|
||||
}
|
||||
else if (INuPatch::matches(header)) {
|
||||
// unsupported for now
|
||||
}
|
||||
else {
|
||||
next_object = parent.getChild(header.getName());
|
||||
|
||||
if (next_object.isInstanceRoot()) {
|
||||
unordered_map<std::string, AlembicObject *>::const_iterator iter;
|
||||
|
||||
/* Was this object asked to be rendered? */
|
||||
iter = object_map.find(next_object.getFullName());
|
||||
|
||||
if (iter != object_map.end()) {
|
||||
AlembicObject *abc_object = iter->second;
|
||||
|
||||
/* Only try to render an instance if the original object is also rendered. */
|
||||
iter = object_map.find(next_object.instanceSourcePath());
|
||||
|
||||
if (iter != object_map.end()) {
|
||||
abc_object->iobject = next_object;
|
||||
abc_object->instance_of = iter->second;
|
||||
|
||||
if (matrix_samples_data.samples) {
|
||||
abc_object->xform_samples = *matrix_samples_data.samples;
|
||||
abc_object->xform_time_sampling = matrix_samples_data.time_sampling;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (next_object.valid()) {
|
||||
for (size_t i = 0; i < next_object.getNumChildren(); ++i) {
|
||||
walk_hierarchy(
|
||||
next_object, next_object.getChildHeader(i), matrix_samples_data, object_map, progress);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AlembicProcedural::build_caches(Progress &progress)
|
||||
{
|
||||
size_t memory_used = 0;
|
||||
|
||||
for (Node *node : nodes) {
|
||||
AlembicObject *object = static_cast<AlembicObject *>(node);
|
||||
|
||||
if (progress.get_cancel()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (object->schema_type == AlembicObject::POLY_MESH) {
|
||||
if (!object->has_data_loaded()) {
|
||||
IPolyMesh polymesh(object->iobject, Alembic::Abc::kWrapExisting);
|
||||
IPolyMeshSchema schema = polymesh.getSchema();
|
||||
object->load_data_in_cache(object->get_cached_data(), this, schema, progress);
|
||||
}
|
||||
else if (object->need_shader_update) {
|
||||
IPolyMesh polymesh(object->iobject, Alembic::Abc::kWrapExisting);
|
||||
const IPolyMeshSchema schema = polymesh.getSchema();
|
||||
read_attributes(this,
|
||||
object->get_cached_data(),
|
||||
schema,
|
||||
schema.getUVsParam(),
|
||||
object->get_requested_attributes(),
|
||||
progress);
|
||||
}
|
||||
}
|
||||
else if (object->schema_type == AlembicObject::CURVES) {
|
||||
if (!object->has_data_loaded() || default_radius_is_modified() ||
|
||||
object->radius_scale_is_modified())
|
||||
{
|
||||
ICurves curves(object->iobject, Alembic::Abc::kWrapExisting);
|
||||
const ICurvesSchema schema = curves.getSchema();
|
||||
object->load_data_in_cache(object->get_cached_data(), this, schema, progress);
|
||||
}
|
||||
}
|
||||
else if (object->schema_type == AlembicObject::POINTS) {
|
||||
if (!object->has_data_loaded() || default_radius_is_modified() ||
|
||||
object->radius_scale_is_modified())
|
||||
{
|
||||
IPoints points(object->iobject, Alembic::Abc::kWrapExisting);
|
||||
const IPointsSchema schema = points.getSchema();
|
||||
object->load_data_in_cache(object->get_cached_data(), this, schema, progress);
|
||||
}
|
||||
}
|
||||
else if (object->schema_type == AlembicObject::SUBD) {
|
||||
if (!object->has_data_loaded()) {
|
||||
ISubD subd_mesh(object->iobject, Alembic::Abc::kWrapExisting);
|
||||
ISubDSchema schema = subd_mesh.getSchema();
|
||||
object->load_data_in_cache(object->get_cached_data(), this, schema, progress);
|
||||
}
|
||||
else if (object->need_shader_update) {
|
||||
ISubD subd_mesh(object->iobject, Alembic::Abc::kWrapExisting);
|
||||
const ISubDSchema schema = subd_mesh.getSchema();
|
||||
read_attributes(this,
|
||||
object->get_cached_data(),
|
||||
schema,
|
||||
schema.getUVsParam(),
|
||||
object->get_requested_attributes(),
|
||||
progress);
|
||||
}
|
||||
}
|
||||
|
||||
if (scale_is_modified() || object->get_cached_data().transforms.size() == 0) {
|
||||
object->setup_transform_cache(object->get_cached_data(), scale);
|
||||
}
|
||||
|
||||
memory_used += object->get_cached_data().memory_used();
|
||||
|
||||
if (use_prefetch) {
|
||||
if (memory_used > get_prefetch_cache_size_in_bytes()) {
|
||||
progress.set_error("Error: Alembic Procedural memory limit reached");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
LOG_DEBUG << "AlembicProcedural memory usage : " << string_human_readable_size(memory_used);
|
||||
}
|
||||
|
||||
CCL_NAMESPACE_END
|
||||
|
||||
#endif
|
||||
@@ -1,567 +0,0 @@
|
||||
/* SPDX-FileCopyrightText: 2011-2022 Blender Foundation
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0 */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "graph/node.h"
|
||||
#include "scene/attribute.h"
|
||||
#include "scene/procedural.h"
|
||||
#include "util/transform.h"
|
||||
#include "util/vector.h"
|
||||
|
||||
#ifdef WITH_ALEMBIC
|
||||
|
||||
# include <Alembic/AbcCoreFactory/All.h>
|
||||
# include <Alembic/AbcGeom/All.h>
|
||||
|
||||
CCL_NAMESPACE_BEGIN
|
||||
|
||||
class AlembicProcedural;
|
||||
class Geometry;
|
||||
class Object;
|
||||
class Progress;
|
||||
class Shader;
|
||||
|
||||
using MatrixSampleMap = std::map<Alembic::Abc::chrono_t, Alembic::Abc::M44d>;
|
||||
|
||||
struct MatrixSamplesData {
|
||||
MatrixSampleMap *samples = nullptr;
|
||||
Alembic::AbcCoreAbstract::TimeSamplingPtr time_sampling;
|
||||
};
|
||||
|
||||
/* Helpers to detect if some type is a `ccl::array`. */
|
||||
template<typename> struct is_array : public std::false_type {};
|
||||
|
||||
template<typename T> struct is_array<array<T>> : public std::true_type {};
|
||||
|
||||
/* Holds the data for a cache lookup at a given time, as well as information to
|
||||
* help disambiguate successes or failures to get data from the cache. */
|
||||
template<typename T> class CacheLookupResult {
|
||||
enum class State {
|
||||
NEW_DATA,
|
||||
ALREADY_LOADED,
|
||||
NO_DATA_FOR_TIME,
|
||||
};
|
||||
|
||||
T *data;
|
||||
State state;
|
||||
|
||||
protected:
|
||||
/* Prevent default construction outside of the class: for a valid result, we
|
||||
* should use the static functions below. */
|
||||
CacheLookupResult() = default;
|
||||
|
||||
public:
|
||||
static CacheLookupResult new_data(T *data_)
|
||||
{
|
||||
CacheLookupResult result;
|
||||
result.data = data_;
|
||||
result.state = State::NEW_DATA;
|
||||
return result;
|
||||
}
|
||||
|
||||
static CacheLookupResult no_data_found_for_time()
|
||||
{
|
||||
CacheLookupResult result;
|
||||
result.data = nullptr;
|
||||
result.state = State::NO_DATA_FOR_TIME;
|
||||
return result;
|
||||
}
|
||||
|
||||
static CacheLookupResult already_loaded()
|
||||
{
|
||||
CacheLookupResult result;
|
||||
result.data = nullptr;
|
||||
result.state = State::ALREADY_LOADED;
|
||||
return result;
|
||||
}
|
||||
|
||||
/* This should only be call if new data is available. */
|
||||
const T &get_data() const
|
||||
{
|
||||
assert(state == State::NEW_DATA);
|
||||
assert(data != nullptr);
|
||||
return *data;
|
||||
}
|
||||
|
||||
T *get_data_or_null() const
|
||||
{
|
||||
// data_ should already be null if there is no new data so no need to check
|
||||
return data;
|
||||
}
|
||||
|
||||
bool has_new_data() const
|
||||
{
|
||||
return state == State::NEW_DATA;
|
||||
}
|
||||
|
||||
bool has_already_loaded() const
|
||||
{
|
||||
return state == State::ALREADY_LOADED;
|
||||
}
|
||||
|
||||
bool has_no_data_for_time() const
|
||||
{
|
||||
return state == State::NO_DATA_FOR_TIME;
|
||||
}
|
||||
};
|
||||
|
||||
/* Store the data set for an animation at every time points, or at the beginning of the animation
|
||||
* for constant data.
|
||||
*
|
||||
* The data is supposed to be stored in chronological order, and is looked up using the current
|
||||
* animation time in seconds using the TimeSampling from the Alembic property. */
|
||||
template<typename T> class DataStore {
|
||||
/* Holds information to map a cache entry for a given time to an index into the data array. */
|
||||
struct TimeIndexPair {
|
||||
/* Frame time for this entry. */
|
||||
double time = 0;
|
||||
/* Frame time for the data pointed to by `index`. */
|
||||
double source_time = 0;
|
||||
/* Index into the data array. */
|
||||
size_t index = 0;
|
||||
};
|
||||
|
||||
/* This is the actual data that is stored. We deduplicate data across frames to avoid storing
|
||||
* values if they have not changed yet (e.g. the triangles for a building before fracturing, or a
|
||||
* fluid simulation before a break or splash) */
|
||||
vector<T> data{};
|
||||
|
||||
/* This is used to map they entry for a given time to an index into the data array, multiple
|
||||
* frames can point to the same index. */
|
||||
vector<TimeIndexPair> index_data_map{};
|
||||
|
||||
Alembic::AbcCoreAbstract::TimeSampling time_sampling{};
|
||||
|
||||
double last_loaded_time = std::numeric_limits<double>::max();
|
||||
|
||||
public:
|
||||
/* Keys used to compare values. */
|
||||
Alembic::AbcCoreAbstract::ArraySample::Key key1;
|
||||
Alembic::AbcCoreAbstract::ArraySample::Key key2;
|
||||
|
||||
void set_time_sampling(Alembic::AbcCoreAbstract::TimeSampling time_sampling_)
|
||||
{
|
||||
time_sampling = time_sampling_;
|
||||
}
|
||||
|
||||
Alembic::AbcCoreAbstract::TimeSampling get_time_sampling() const
|
||||
{
|
||||
return time_sampling;
|
||||
}
|
||||
|
||||
/* Get the data for the specified time.
|
||||
* Return nullptr if there is no data or if the data for this time was already loaded. */
|
||||
CacheLookupResult<T> data_for_time(const double time)
|
||||
{
|
||||
if (size() == 0) {
|
||||
return CacheLookupResult<T>::no_data_found_for_time();
|
||||
}
|
||||
|
||||
const TimeIndexPair &index = get_index_for_time(time);
|
||||
|
||||
if (index.index == -1ul) {
|
||||
return CacheLookupResult<T>::no_data_found_for_time();
|
||||
}
|
||||
|
||||
if (last_loaded_time == index.time || last_loaded_time == index.source_time) {
|
||||
return CacheLookupResult<T>::already_loaded();
|
||||
}
|
||||
|
||||
last_loaded_time = index.source_time;
|
||||
|
||||
assert(index.index < data.size());
|
||||
|
||||
return CacheLookupResult<T>::new_data(&data[index.index]);
|
||||
}
|
||||
|
||||
/* get the data for the specified time, but do not check if the data was already loaded for this
|
||||
* time return nullptr if there is no data */
|
||||
CacheLookupResult<T> data_for_time_no_check(const double time)
|
||||
{
|
||||
if (size() == 0) {
|
||||
return CacheLookupResult<T>::no_data_found_for_time();
|
||||
}
|
||||
|
||||
const TimeIndexPair &index = get_index_for_time(time);
|
||||
|
||||
if (index.index == -1ul) {
|
||||
return CacheLookupResult<T>::no_data_found_for_time();
|
||||
}
|
||||
|
||||
assert(index.index < data.size());
|
||||
|
||||
return CacheLookupResult<T>::new_data(&data[index.index]);
|
||||
}
|
||||
|
||||
void add_data(T &data_, double time)
|
||||
{
|
||||
index_data_map.push_back({time, time, data.size()});
|
||||
|
||||
if constexpr (is_array<T>::value) {
|
||||
data.emplace_back();
|
||||
data.back().steal_data(data_);
|
||||
return;
|
||||
}
|
||||
|
||||
data.push_back(data_);
|
||||
}
|
||||
|
||||
void reuse_data_for_last_time(const double time)
|
||||
{
|
||||
const TimeIndexPair &data_index = index_data_map.back();
|
||||
index_data_map.push_back({time, data_index.source_time, data_index.index});
|
||||
}
|
||||
|
||||
void add_no_data(const double time)
|
||||
{
|
||||
index_data_map.push_back({time, time, -1ul});
|
||||
}
|
||||
|
||||
bool is_constant() const
|
||||
{
|
||||
return data.size() <= 1;
|
||||
}
|
||||
|
||||
size_t size() const
|
||||
{
|
||||
return data.size();
|
||||
}
|
||||
|
||||
void clear()
|
||||
{
|
||||
invalidate_last_loaded_time();
|
||||
data.clear();
|
||||
index_data_map.clear();
|
||||
}
|
||||
|
||||
void invalidate_last_loaded_time()
|
||||
{
|
||||
last_loaded_time = std::numeric_limits<double>::max();
|
||||
}
|
||||
|
||||
/* Copy the data for the specified time to the node's socket. If there is no
|
||||
* data for this time or it was already loaded, do nothing. */
|
||||
void copy_to_socket(const double time, Node *node, const SocketType *socket)
|
||||
{
|
||||
CacheLookupResult<T> result = data_for_time(time);
|
||||
|
||||
if (!result.has_new_data()) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* TODO(kevindietrich): arrays are emptied when passed to the sockets, so for now we copy the
|
||||
* arrays to avoid reloading the data */
|
||||
T value = result.get_data();
|
||||
node->set(*socket, value);
|
||||
}
|
||||
|
||||
size_t memory_used() const
|
||||
{
|
||||
if constexpr (is_array<T>::value) {
|
||||
size_t mem_used = 0;
|
||||
|
||||
for (const T &array : data) {
|
||||
mem_used += array.size() * sizeof(array[0]);
|
||||
}
|
||||
|
||||
return mem_used;
|
||||
}
|
||||
|
||||
return data.size() * sizeof(T);
|
||||
}
|
||||
|
||||
private:
|
||||
const TimeIndexPair &get_index_for_time(const double time) const
|
||||
{
|
||||
std::pair<size_t, Alembic::Abc::chrono_t> index_pair;
|
||||
index_pair = time_sampling.getNearIndex(time, index_data_map.size());
|
||||
return index_data_map[index_pair.first];
|
||||
}
|
||||
};
|
||||
|
||||
/* Actual cache for the stored data.
|
||||
* This caches the topological, transformation, and attribute data for a Mesh node or a Hair node
|
||||
* inside of DataStores.
|
||||
*/
|
||||
struct CachedData {
|
||||
DataStore<Transform> transforms{};
|
||||
|
||||
/* mesh data */
|
||||
DataStore<array<float3>> vertices;
|
||||
DataStore<array<int3>> triangles{};
|
||||
/* triangle "loops" are the polygons' vertices indices used for indexing face varying attributes
|
||||
* (like UVs) */
|
||||
DataStore<array<int>> uv_loops{};
|
||||
DataStore<array<int>> shader{};
|
||||
|
||||
/* subd data */
|
||||
DataStore<array<int>> subd_start_corner;
|
||||
DataStore<array<int>> subd_num_corners;
|
||||
DataStore<array<bool>> subd_smooth;
|
||||
DataStore<array<int>> subd_ptex_offset;
|
||||
DataStore<array<int>> subd_face_corners;
|
||||
DataStore<array<int>> subd_creases_edge;
|
||||
DataStore<array<float>> subd_creases_weight;
|
||||
DataStore<array<int>> subd_vertex_crease_indices;
|
||||
DataStore<array<float>> subd_vertex_crease_weights;
|
||||
|
||||
/* hair data */
|
||||
DataStore<array<float3>> curve_keys;
|
||||
DataStore<array<float>> curve_radius;
|
||||
DataStore<array<int>> curve_first_key;
|
||||
DataStore<array<int>> curve_shader;
|
||||
|
||||
/* point data */
|
||||
DataStore<array<float3>> points;
|
||||
DataStore<array<float>> radiuses;
|
||||
DataStore<array<int>> points_shader;
|
||||
|
||||
struct CachedAttribute {
|
||||
AttributeStandard std;
|
||||
AttributeElement element;
|
||||
TypeDesc type_desc;
|
||||
ustring name;
|
||||
DataStore<array<char>> data{};
|
||||
};
|
||||
|
||||
vector<CachedAttribute> attributes{};
|
||||
|
||||
void clear();
|
||||
|
||||
CachedAttribute &add_attribute(const ustring &name,
|
||||
const Alembic::Abc::TimeSampling &time_sampling);
|
||||
|
||||
bool is_constant() const;
|
||||
|
||||
void invalidate_last_loaded_time(bool attributes_only = false);
|
||||
|
||||
void set_time_sampling(Alembic::AbcCoreAbstract::TimeSampling time_sampling);
|
||||
|
||||
size_t memory_used() const;
|
||||
};
|
||||
|
||||
/* Representation of an Alembic object for the AlembicProcedural.
|
||||
*
|
||||
* The AlembicObject holds the path to the Alembic IObject inside of the archive that is desired
|
||||
* for rendering, as well as the list of shaders that it is using.
|
||||
*
|
||||
* The names of the shaders should correspond to the names of the FaceSets inside of the Alembic
|
||||
* archive for per-triangle shader association. If there is no FaceSets, or the names do not
|
||||
* match, the first shader is used for rendering for all triangles.
|
||||
*/
|
||||
class AlembicObject : public Node {
|
||||
public:
|
||||
NODE_DECLARE
|
||||
|
||||
/* Path to the IObject inside of the archive. */
|
||||
NODE_SOCKET_API(ustring, path)
|
||||
|
||||
/* Shaders used for rendering. */
|
||||
NODE_SOCKET_API_ARRAY(array<Node *>, used_shaders)
|
||||
|
||||
/* Treat this subdivision object as a regular polygon mesh, so no subdivision will be performed.
|
||||
*/
|
||||
NODE_SOCKET_API(bool, ignore_subdivision)
|
||||
|
||||
/* Maximum number of subdivisions for ISubD objects. */
|
||||
NODE_SOCKET_API(int, subd_max_level)
|
||||
|
||||
/* Finest level of detail (in pixels) for the subdivision. */
|
||||
NODE_SOCKET_API(float, subd_dicing_rate)
|
||||
|
||||
/* Scale the radius of points and curves. */
|
||||
NODE_SOCKET_API(float, radius_scale)
|
||||
|
||||
AlembicObject();
|
||||
~AlembicObject() override;
|
||||
|
||||
private:
|
||||
friend class AlembicProcedural;
|
||||
|
||||
void set_object(Object *object);
|
||||
Object *get_object();
|
||||
|
||||
void load_data_in_cache(CachedData &cached_data,
|
||||
AlembicProcedural *proc,
|
||||
Alembic::AbcGeom::IPolyMeshSchema &schema,
|
||||
Progress &progress);
|
||||
void load_data_in_cache(CachedData &cached_data,
|
||||
AlembicProcedural *proc,
|
||||
Alembic::AbcGeom::ISubDSchema &schema,
|
||||
Progress &progress);
|
||||
void load_data_in_cache(CachedData &cached_data,
|
||||
AlembicProcedural *proc,
|
||||
const Alembic::AbcGeom::ICurvesSchema &schema,
|
||||
Progress &progress);
|
||||
void load_data_in_cache(CachedData &cached_data,
|
||||
AlembicProcedural *proc,
|
||||
const Alembic::AbcGeom::IPointsSchema &schema,
|
||||
Progress &progress);
|
||||
|
||||
bool has_data_loaded() const;
|
||||
|
||||
/* Enumeration used to speed up the discrimination of an IObject as IObject::matches() methods
|
||||
* are too expensive and show up in profiles. */
|
||||
enum AbcSchemaType {
|
||||
INVALID,
|
||||
POLY_MESH,
|
||||
SUBD,
|
||||
CURVES,
|
||||
POINTS,
|
||||
};
|
||||
|
||||
bool need_shader_update = true;
|
||||
|
||||
AlembicObject *instance_of = nullptr;
|
||||
|
||||
Alembic::AbcCoreAbstract::TimeSamplingPtr xform_time_sampling;
|
||||
MatrixSampleMap xform_samples;
|
||||
Alembic::AbcGeom::IObject iobject;
|
||||
|
||||
/* Set if the path points to a valid IObject whose type is supported. */
|
||||
AbcSchemaType schema_type;
|
||||
|
||||
CachedData &get_cached_data()
|
||||
{
|
||||
return cached_data_;
|
||||
}
|
||||
|
||||
bool is_constant() const
|
||||
{
|
||||
return cached_data_.is_constant();
|
||||
}
|
||||
|
||||
void clear_cache()
|
||||
{
|
||||
cached_data_.clear();
|
||||
}
|
||||
|
||||
Object *object = nullptr;
|
||||
|
||||
bool data_loaded = false;
|
||||
|
||||
CachedData cached_data_;
|
||||
|
||||
void setup_transform_cache(CachedData &cached_data, const float scale);
|
||||
|
||||
AttributeRequestSet get_requested_attributes();
|
||||
};
|
||||
|
||||
/* Procedural to render objects from a single Alembic archive.
|
||||
*
|
||||
* Every object desired to be rendered should be passed as an AlembicObject through the objects
|
||||
* socket.
|
||||
*
|
||||
* This procedural will load the data set for the entire animation in memory on the first frame,
|
||||
* and directly set the data for the new frames on the created Nodes if needed. This allows for
|
||||
* faster updates between frames as it avoids reseeking the data on disk.
|
||||
*/
|
||||
class AlembicProcedural : public Procedural {
|
||||
Alembic::AbcGeom::IArchive archive;
|
||||
bool objects_loaded = false;
|
||||
bool objects_modified = false;
|
||||
Scene *scene_ = nullptr;
|
||||
|
||||
public:
|
||||
NODE_DECLARE
|
||||
|
||||
/* The file path to the Alembic archive */
|
||||
NODE_SOCKET_API(ustring, filepath)
|
||||
|
||||
/* Layers for the Alembic archive. Layers are in the order in which they override data, with the
|
||||
* latter elements overriding the former ones. */
|
||||
NODE_SOCKET_API_ARRAY(array<ustring>, layers)
|
||||
|
||||
/* The current frame to render. */
|
||||
NODE_SOCKET_API(float, frame)
|
||||
|
||||
/* The first frame to load data for. */
|
||||
NODE_SOCKET_API(float, start_frame)
|
||||
|
||||
/* The last frame to load data for. */
|
||||
NODE_SOCKET_API(float, end_frame)
|
||||
|
||||
/* Subtracted to the current frame. */
|
||||
NODE_SOCKET_API(float, frame_offset)
|
||||
|
||||
/* The frame rate used for rendering in units of frames per second. */
|
||||
NODE_SOCKET_API(float, frame_rate)
|
||||
|
||||
/* Set the default radius to use for curves when the Alembic Curves Schemas do not have radius
|
||||
* information. */
|
||||
NODE_SOCKET_API(float, default_radius)
|
||||
|
||||
/* Multiplier to account for differences in default units for measuring objects in various
|
||||
* software. */
|
||||
NODE_SOCKET_API(float, scale)
|
||||
|
||||
/* Cache controls */
|
||||
NODE_SOCKET_API(bool, use_prefetch)
|
||||
|
||||
/* Memory limit for the cache, if the data does not fit within this limit, rendering is aborted.
|
||||
*/
|
||||
NODE_SOCKET_API(int, prefetch_cache_size)
|
||||
|
||||
AlembicProcedural();
|
||||
~AlembicProcedural() override;
|
||||
|
||||
/* Populates the Cycles scene with Nodes for every contained AlembicObject on the first
|
||||
* invocation, and updates the data on subsequent invocations if the frame changed. */
|
||||
void generate(Scene *scene, Progress &progress) override;
|
||||
|
||||
/* Tag for an update only if something was modified. */
|
||||
void tag_update(Scene *scene);
|
||||
|
||||
/* This should be called by scene exporters to request the rendering of an object located
|
||||
* in the Alembic archive at the given path.
|
||||
*
|
||||
* Since we lazily load object, the function does not validate the existence of the object
|
||||
* in the archive. If no objects with such path if found in the archive during the next call
|
||||
* to `generate`, it will be ignored.
|
||||
*
|
||||
* Returns a pointer to an existing or a newly created AlembicObject for the given path. */
|
||||
AlembicObject *get_or_create_object(const ustring &path);
|
||||
|
||||
private:
|
||||
/* Load the data for all the objects whose data has not yet been loaded. */
|
||||
void load_objects(Progress &progress);
|
||||
|
||||
/* Traverse the Alembic hierarchy to lookup the IObjects for the AlembicObjects that were
|
||||
* specified in our objects socket, and accumulate all of the transformations samples along the
|
||||
* way for each IObject. */
|
||||
void walk_hierarchy(Alembic::AbcGeom::IObject parent,
|
||||
const Alembic::AbcGeom::ObjectHeader &header,
|
||||
MatrixSamplesData matrix_samples_data,
|
||||
const unordered_map<string, AlembicObject *> &object_map,
|
||||
Progress &progress);
|
||||
|
||||
/* Read the data for an IPolyMesh at the specified frame_time. Creates corresponding Geometry and
|
||||
* Object Nodes in the Cycles scene if none exist yet. */
|
||||
void read_mesh(AlembicObject *abc_object, Alembic::AbcGeom::Abc::chrono_t frame_time);
|
||||
|
||||
/* Read the data for an ICurves at the specified frame_time. Creates corresponding Geometry and
|
||||
* Object Nodes in the Cycles scene if none exist yet. */
|
||||
void read_curves(AlembicObject *abc_object, Alembic::AbcGeom::Abc::chrono_t frame_time);
|
||||
|
||||
/* Read the data for an IPoints at the specified frame_time. Creates corresponding Geometry and
|
||||
* Object Nodes in the Cycles scene if none exist yet. */
|
||||
void read_points(AlembicObject *abc_object, Alembic::AbcGeom::Abc::chrono_t frame_time);
|
||||
|
||||
/* Read the data for an ISubD at the specified frame_time. Creates corresponding Geometry and
|
||||
* Object Nodes in the Cycles scene if none exist yet. */
|
||||
void read_subd(AlembicObject *abc_object, Alembic::AbcGeom::Abc::chrono_t frame_time);
|
||||
|
||||
void build_caches(Progress &progress);
|
||||
|
||||
size_t get_prefetch_cache_size_in_bytes() const
|
||||
{
|
||||
/* prefetch_cache_size is in megabytes, so convert to bytes. */
|
||||
return static_cast<size_t>(prefetch_cache_size) * 1024 * 1024;
|
||||
}
|
||||
};
|
||||
|
||||
CCL_NAMESPACE_END
|
||||
|
||||
#endif
|
||||
@@ -1,1112 +0,0 @@
|
||||
/* SPDX-FileCopyrightText: 2021-2022 Blender Foundation
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0 */
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "scene/alembic.h"
|
||||
#include "scene/alembic_read.h"
|
||||
#include "scene/mesh.h"
|
||||
|
||||
#include "util/color.h"
|
||||
#include "util/progress.h"
|
||||
|
||||
#ifdef WITH_ALEMBIC
|
||||
|
||||
using namespace Alembic::AbcGeom;
|
||||
|
||||
CCL_NAMESPACE_BEGIN
|
||||
|
||||
static float3 make_float3_from_yup(const V3f &v)
|
||||
{
|
||||
return make_float3(v.x, -v.z, v.y);
|
||||
}
|
||||
|
||||
/* get the sample times to load data for the given the start and end frame of the procedural */
|
||||
static set<chrono_t> get_relevant_sample_times(AlembicProcedural *proc,
|
||||
const TimeSampling &time_sampling,
|
||||
const size_t num_samples)
|
||||
{
|
||||
set<chrono_t> result;
|
||||
|
||||
if (num_samples < 2) {
|
||||
result.insert(0.0);
|
||||
return result;
|
||||
}
|
||||
|
||||
double start_frame;
|
||||
double end_frame;
|
||||
|
||||
if (proc->get_use_prefetch()) {
|
||||
// load the data for the entire animation
|
||||
start_frame = static_cast<double>(proc->get_start_frame());
|
||||
end_frame = static_cast<double>(proc->get_end_frame());
|
||||
}
|
||||
else {
|
||||
// load the data for the current frame
|
||||
start_frame = static_cast<double>(proc->get_frame());
|
||||
end_frame = start_frame;
|
||||
}
|
||||
|
||||
const double frame_rate = static_cast<double>(proc->get_frame_rate());
|
||||
const double frame_offset = proc->get_frame_offset();
|
||||
const double start_time = (start_frame - frame_offset) / frame_rate;
|
||||
const double end_time = (end_frame - frame_offset + 1) / frame_rate;
|
||||
|
||||
const size_t start_index = time_sampling.getFloorIndex(start_time, num_samples).first;
|
||||
const size_t end_index = time_sampling.getCeilIndex(end_time, num_samples).first;
|
||||
|
||||
for (size_t i = start_index; i < end_index; ++i) {
|
||||
result.insert(time_sampling.getSampleTime(i));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/* Main function to read data, this will iterate over all the relevant sample times for the
|
||||
* duration of the requested animation, and call the DataReadingFunc for each of those sample time.
|
||||
*/
|
||||
template<typename Params, typename DataReadingFunc>
|
||||
static void read_data_loop(AlembicProcedural *proc,
|
||||
CachedData &cached_data,
|
||||
const Params ¶ms,
|
||||
DataReadingFunc &&func,
|
||||
Progress &progress)
|
||||
{
|
||||
const std::set<chrono_t> times = get_relevant_sample_times(
|
||||
proc, *params.time_sampling, params.num_samples);
|
||||
|
||||
cached_data.set_time_sampling(*params.time_sampling);
|
||||
|
||||
for (const chrono_t time : times) {
|
||||
if (progress.get_cancel()) {
|
||||
return;
|
||||
}
|
||||
|
||||
func(cached_data, params, time);
|
||||
}
|
||||
}
|
||||
|
||||
/* Polygon Mesh Geometries. */
|
||||
|
||||
/* Compute the vertex normals in case none are present in the IPolyMeshSchema, this is mostly used
|
||||
* to avoid computing them in the GeometryManager in order to speed up data updates. */
|
||||
static void compute_vertex_normals(CachedData &cache, const double current_time)
|
||||
{
|
||||
if (cache.vertices.size() == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
CachedData::CachedAttribute &attr_normal = cache.add_attribute(
|
||||
ustring("N"), cache.vertices.get_time_sampling());
|
||||
attr_normal.std = ATTR_STD_VERTEX_NORMAL;
|
||||
attr_normal.element = ATTR_ELEMENT_VERTEX;
|
||||
attr_normal.type_desc = TypeNormal;
|
||||
|
||||
const array<float3> *vertices =
|
||||
cache.vertices.data_for_time_no_check(current_time).get_data_or_null();
|
||||
const array<int3> *triangles =
|
||||
cache.triangles.data_for_time_no_check(current_time).get_data_or_null();
|
||||
|
||||
if (!vertices || !triangles) {
|
||||
attr_normal.data.add_no_data(current_time);
|
||||
return;
|
||||
}
|
||||
|
||||
array<char> attr_data(vertices->size() * sizeof(float3));
|
||||
float3 *attr_ptr = reinterpret_cast<float3 *>(attr_data.data());
|
||||
std::fill_n(attr_ptr, vertices->size(), zero_float3());
|
||||
|
||||
for (size_t t = 0; t < triangles->size(); ++t) {
|
||||
const int3 tri_int3 = triangles->data()[t];
|
||||
Mesh::Triangle tri{};
|
||||
tri.v[0] = tri_int3[0];
|
||||
tri.v[1] = tri_int3[1];
|
||||
tri.v[2] = tri_int3[2];
|
||||
|
||||
const float3 tri_N = tri.compute_normal(vertices->data());
|
||||
|
||||
for (int v = 0; v < 3; ++v) {
|
||||
attr_ptr[tri_int3[v]] += tri_N;
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t v = 0; v < vertices->size(); ++v) {
|
||||
attr_ptr[v] = normalize(attr_ptr[v]);
|
||||
}
|
||||
|
||||
attr_normal.data.add_data(attr_data, current_time);
|
||||
}
|
||||
|
||||
static void add_normals(const Int32ArraySamplePtr face_indices,
|
||||
const IN3fGeomParam &normals,
|
||||
const double time,
|
||||
CachedData &cached_data)
|
||||
{
|
||||
switch (normals.getScope()) {
|
||||
case kFacevaryingScope: {
|
||||
const ISampleSelector iss = ISampleSelector(time);
|
||||
const IN3fGeomParam::Sample sample = normals.getExpandedValue(iss);
|
||||
|
||||
if (!sample.valid()) {
|
||||
return;
|
||||
}
|
||||
|
||||
CachedData::CachedAttribute &attr = cached_data.add_attribute(ustring(normals.getName()),
|
||||
*normals.getTimeSampling());
|
||||
attr.std = ATTR_STD_VERTEX_NORMAL;
|
||||
|
||||
const array<float3> *vertices =
|
||||
cached_data.vertices.data_for_time_no_check(time).get_data_or_null();
|
||||
|
||||
if (!vertices) {
|
||||
return;
|
||||
}
|
||||
|
||||
array<char> data;
|
||||
data.resize(vertices->size() * sizeof(float3));
|
||||
|
||||
float3 *data_float3 = reinterpret_cast<float3 *>(data.data());
|
||||
|
||||
const int *face_indices_array = face_indices->get();
|
||||
const N3fArraySamplePtr values = sample.getVals();
|
||||
|
||||
for (size_t i = 0; i < face_indices->size(); ++i) {
|
||||
const int point_index = face_indices_array[i];
|
||||
data_float3[point_index] = make_float3_from_yup(values->get()[i]);
|
||||
}
|
||||
|
||||
attr.data.add_data(data, time);
|
||||
break;
|
||||
}
|
||||
case kVaryingScope:
|
||||
case kVertexScope: {
|
||||
const ISampleSelector iss = ISampleSelector(time);
|
||||
const IN3fGeomParam::Sample sample = normals.getExpandedValue(iss);
|
||||
|
||||
if (!sample.valid()) {
|
||||
return;
|
||||
}
|
||||
|
||||
CachedData::CachedAttribute &attr = cached_data.add_attribute(ustring(normals.getName()),
|
||||
*normals.getTimeSampling());
|
||||
attr.std = ATTR_STD_VERTEX_NORMAL;
|
||||
|
||||
const array<float3> *vertices =
|
||||
cached_data.vertices.data_for_time_no_check(time).get_data_or_null();
|
||||
|
||||
if (!vertices) {
|
||||
return;
|
||||
}
|
||||
|
||||
array<char> data;
|
||||
data.resize(vertices->size() * sizeof(float3));
|
||||
|
||||
float3 *data_float3 = reinterpret_cast<float3 *>(data.data());
|
||||
|
||||
const Imath::V3f *values = sample.getVals()->get();
|
||||
|
||||
for (size_t i = 0; i < vertices->size(); ++i) {
|
||||
data_float3[i] = make_float3_from_yup(values[i]);
|
||||
}
|
||||
|
||||
attr.data.add_data(data, time);
|
||||
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void add_positions(const P3fArraySamplePtr positions,
|
||||
const double time,
|
||||
CachedData &cached_data)
|
||||
{
|
||||
if (!positions) {
|
||||
return;
|
||||
}
|
||||
|
||||
array<float3> vertices;
|
||||
vertices.reserve(positions->size());
|
||||
|
||||
for (size_t i = 0; i < positions->size(); i++) {
|
||||
const V3f f = positions->get()[i];
|
||||
vertices.push_back_reserved(make_float3_from_yup(f));
|
||||
}
|
||||
|
||||
cached_data.vertices.add_data(vertices, time);
|
||||
}
|
||||
|
||||
static void add_triangles(const Int32ArraySamplePtr face_counts,
|
||||
const Int32ArraySamplePtr face_indices,
|
||||
const double time,
|
||||
CachedData &cached_data,
|
||||
const array<int> &polygon_to_shader)
|
||||
{
|
||||
if (!face_counts || !face_indices) {
|
||||
return;
|
||||
}
|
||||
|
||||
const size_t num_faces = face_counts->size();
|
||||
const int *face_counts_array = face_counts->get();
|
||||
const int *face_indices_array = face_indices->get();
|
||||
|
||||
size_t num_triangles = 0;
|
||||
for (size_t i = 0; i < face_counts->size(); i++) {
|
||||
num_triangles += face_counts_array[i] - 2;
|
||||
}
|
||||
|
||||
array<int> shader;
|
||||
array<int3> triangles;
|
||||
array<int> uv_loops;
|
||||
shader.reserve(num_triangles);
|
||||
triangles.reserve(num_triangles);
|
||||
uv_loops.reserve(num_triangles * 3);
|
||||
int index_offset = 0;
|
||||
|
||||
for (size_t i = 0; i < num_faces; i++) {
|
||||
int current_shader = 0;
|
||||
|
||||
if (!polygon_to_shader.empty()) {
|
||||
current_shader = polygon_to_shader[i];
|
||||
}
|
||||
|
||||
for (int j = 0; j < face_counts_array[i] - 2; j++) {
|
||||
const int v0 = face_indices_array[index_offset];
|
||||
const int v1 = face_indices_array[index_offset + j + 1];
|
||||
const int v2 = face_indices_array[index_offset + j + 2];
|
||||
|
||||
shader.push_back_reserved(current_shader);
|
||||
|
||||
/* Alembic orders the loops following the RenderMan convention, so need to go in reverse. */
|
||||
triangles.push_back_reserved(make_int3(v2, v1, v0));
|
||||
uv_loops.push_back_reserved(index_offset + j + 2);
|
||||
uv_loops.push_back_reserved(index_offset + j + 1);
|
||||
uv_loops.push_back_reserved(index_offset);
|
||||
}
|
||||
|
||||
index_offset += face_counts_array[i];
|
||||
}
|
||||
|
||||
cached_data.triangles.add_data(triangles, time);
|
||||
cached_data.uv_loops.add_data(uv_loops, time);
|
||||
cached_data.shader.add_data(shader, time);
|
||||
}
|
||||
|
||||
static array<int> compute_polygon_to_shader_map(
|
||||
const Int32ArraySamplePtr &face_counts,
|
||||
const vector<FaceSetShaderIndexPair> &face_set_shader_index,
|
||||
ISampleSelector sample_sel)
|
||||
{
|
||||
if (face_set_shader_index.empty()) {
|
||||
return {};
|
||||
}
|
||||
|
||||
if (!face_counts) {
|
||||
return {};
|
||||
}
|
||||
|
||||
if (face_counts->size() == 0) {
|
||||
return {};
|
||||
}
|
||||
|
||||
const array<int> polygon_to_shader(face_counts->size());
|
||||
|
||||
for (const FaceSetShaderIndexPair &pair : face_set_shader_index) {
|
||||
const IFaceSet &face_set = pair.face_set;
|
||||
const IFaceSetSchema face_schem = face_set.getSchema();
|
||||
const IFaceSetSchema::Sample face_sample = face_schem.getValue(sample_sel);
|
||||
const Int32ArraySamplePtr group_faces = face_sample.getFaces();
|
||||
const size_t num_group_faces = group_faces->size();
|
||||
|
||||
for (size_t l = 0; l < num_group_faces; l++) {
|
||||
const size_t pos = (*group_faces)[l];
|
||||
|
||||
if (pos >= polygon_to_shader.size()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
polygon_to_shader[pos] = pair.shader_index;
|
||||
}
|
||||
}
|
||||
|
||||
return polygon_to_shader;
|
||||
}
|
||||
|
||||
static void read_poly_mesh_geometry(CachedData &cached_data,
|
||||
const PolyMeshSchemaData &data,
|
||||
chrono_t time)
|
||||
{
|
||||
const ISampleSelector iss = ISampleSelector(time);
|
||||
|
||||
add_positions(data.positions.getValue(iss), time, cached_data);
|
||||
|
||||
const Int32ArraySamplePtr face_counts = data.face_counts.getValue(iss);
|
||||
const Int32ArraySamplePtr face_indices = data.face_indices.getValue(iss);
|
||||
|
||||
/* Only copy triangles for other frames if the topology is changing over time as well. */
|
||||
if (data.topology_variance != kHomogeneousTopology || cached_data.triangles.size() == 0) {
|
||||
bool do_triangles = true;
|
||||
|
||||
/* Compare key with last one to check whether the topology changed. */
|
||||
if (cached_data.triangles.size() > 0) {
|
||||
const ArraySample::Key key = face_indices->getKey();
|
||||
|
||||
if (key == cached_data.triangles.key1) {
|
||||
do_triangles = false;
|
||||
}
|
||||
|
||||
cached_data.triangles.key1 = key;
|
||||
}
|
||||
|
||||
if (do_triangles) {
|
||||
const array<int> polygon_to_shader = compute_polygon_to_shader_map(
|
||||
face_counts, data.shader_face_sets, iss);
|
||||
add_triangles(face_counts, face_indices, time, cached_data, polygon_to_shader);
|
||||
}
|
||||
else {
|
||||
cached_data.triangles.reuse_data_for_last_time(time);
|
||||
cached_data.uv_loops.reuse_data_for_last_time(time);
|
||||
cached_data.shader.reuse_data_for_last_time(time);
|
||||
}
|
||||
|
||||
/* Initialize the first key. */
|
||||
if (data.topology_variance != kHomogeneousTopology && cached_data.triangles.size() == 1) {
|
||||
cached_data.triangles.key1 = face_indices->getKey();
|
||||
}
|
||||
}
|
||||
|
||||
if (data.normals.valid()) {
|
||||
add_normals(face_indices, data.normals, time, cached_data);
|
||||
}
|
||||
else {
|
||||
compute_vertex_normals(cached_data, time);
|
||||
}
|
||||
}
|
||||
|
||||
void read_geometry_data(AlembicProcedural *proc,
|
||||
CachedData &cached_data,
|
||||
const PolyMeshSchemaData &data,
|
||||
Progress &progress)
|
||||
{
|
||||
read_data_loop(proc, cached_data, data, read_poly_mesh_geometry, progress);
|
||||
}
|
||||
|
||||
/* Subdivision Geometries */
|
||||
|
||||
static void add_subd_polygons(CachedData &cached_data, const SubDSchemaData &data, chrono_t time)
|
||||
{
|
||||
const ISampleSelector iss = ISampleSelector(time);
|
||||
|
||||
const Int32ArraySamplePtr face_counts = data.face_counts.getValue(iss);
|
||||
const Int32ArraySamplePtr face_indices = data.face_indices.getValue(iss);
|
||||
|
||||
array<int> subd_start_corner;
|
||||
array<int> shader;
|
||||
array<int> subd_num_corners;
|
||||
array<bool> subd_smooth;
|
||||
array<int> subd_ptex_offset;
|
||||
array<int> subd_face_corners;
|
||||
array<int> uv_loops;
|
||||
|
||||
const size_t num_faces = face_counts->size();
|
||||
const int *face_counts_array = face_counts->get();
|
||||
const int *face_indices_array = face_indices->get();
|
||||
|
||||
int num_corners = 0;
|
||||
for (size_t i = 0; i < face_counts->size(); i++) {
|
||||
num_corners += face_counts_array[i];
|
||||
}
|
||||
|
||||
subd_start_corner.reserve(num_faces);
|
||||
subd_num_corners.reserve(num_faces);
|
||||
subd_smooth.reserve(num_faces);
|
||||
subd_ptex_offset.reserve(num_faces);
|
||||
shader.reserve(num_faces);
|
||||
subd_face_corners.reserve(num_corners);
|
||||
uv_loops.reserve(num_corners);
|
||||
|
||||
int start_corner = 0;
|
||||
int current_shader = 0;
|
||||
int ptex_offset = 0;
|
||||
|
||||
const array<int> polygon_to_shader = compute_polygon_to_shader_map(
|
||||
face_counts, data.shader_face_sets, iss);
|
||||
|
||||
for (size_t i = 0; i < face_counts->size(); i++) {
|
||||
num_corners = face_counts_array[i];
|
||||
|
||||
if (!polygon_to_shader.empty()) {
|
||||
current_shader = polygon_to_shader[i];
|
||||
}
|
||||
|
||||
subd_start_corner.push_back_reserved(start_corner);
|
||||
subd_num_corners.push_back_reserved(num_corners);
|
||||
|
||||
for (int j = 0; j < num_corners; ++j) {
|
||||
subd_face_corners.push_back_reserved(face_indices_array[start_corner + j]);
|
||||
uv_loops.push_back_reserved(start_corner + j);
|
||||
}
|
||||
|
||||
shader.push_back_reserved(current_shader);
|
||||
subd_smooth.push_back_reserved(true);
|
||||
subd_ptex_offset.push_back_reserved(ptex_offset);
|
||||
|
||||
ptex_offset += (num_corners == 4 ? 1 : num_corners);
|
||||
|
||||
start_corner += num_corners;
|
||||
}
|
||||
|
||||
cached_data.shader.add_data(shader, time);
|
||||
cached_data.subd_start_corner.add_data(subd_start_corner, time);
|
||||
cached_data.subd_num_corners.add_data(subd_num_corners, time);
|
||||
cached_data.subd_smooth.add_data(subd_smooth, time);
|
||||
cached_data.subd_ptex_offset.add_data(subd_ptex_offset, time);
|
||||
cached_data.subd_face_corners.add_data(subd_face_corners, time);
|
||||
cached_data.uv_loops.add_data(uv_loops, time);
|
||||
}
|
||||
|
||||
static void add_subd_edge_creases(CachedData &cached_data,
|
||||
const SubDSchemaData &data,
|
||||
chrono_t time)
|
||||
{
|
||||
if (!(data.crease_indices.valid() && data.crease_lengths.valid() &&
|
||||
data.crease_sharpnesses.valid()))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
const ISampleSelector iss = ISampleSelector(time);
|
||||
|
||||
const Int32ArraySamplePtr creases_length = data.crease_lengths.getValue(iss);
|
||||
const Int32ArraySamplePtr creases_indices = data.crease_indices.getValue(iss);
|
||||
const FloatArraySamplePtr creases_sharpnesses = data.crease_sharpnesses.getValue(iss);
|
||||
|
||||
if (creases_length && creases_indices && creases_sharpnesses) {
|
||||
array<int> creases_edge;
|
||||
array<float> creases_weight;
|
||||
|
||||
creases_edge.reserve(creases_sharpnesses->size() * 2);
|
||||
creases_weight.reserve(creases_sharpnesses->size());
|
||||
|
||||
int length_offset = 0;
|
||||
int weight_offset = 0;
|
||||
for (size_t c = 0; c < creases_length->size(); ++c) {
|
||||
const int crease_length = creases_length->get()[c];
|
||||
|
||||
for (size_t j = 0; j < crease_length - 1; ++j) {
|
||||
creases_edge.push_back_reserved(creases_indices->get()[length_offset + j]);
|
||||
creases_edge.push_back_reserved(creases_indices->get()[length_offset + j + 1]);
|
||||
creases_weight.push_back_reserved(creases_sharpnesses->get()[weight_offset++]);
|
||||
}
|
||||
|
||||
length_offset += crease_length;
|
||||
}
|
||||
|
||||
cached_data.subd_creases_edge.add_data(creases_edge, time);
|
||||
cached_data.subd_creases_weight.add_data(creases_weight, time);
|
||||
}
|
||||
}
|
||||
|
||||
static void add_subd_vertex_creases(CachedData &cached_data,
|
||||
const SubDSchemaData &data,
|
||||
chrono_t time)
|
||||
{
|
||||
if (!(data.corner_indices.valid() && data.crease_sharpnesses.valid())) {
|
||||
return;
|
||||
}
|
||||
|
||||
const ISampleSelector iss = ISampleSelector(time);
|
||||
const Int32ArraySamplePtr creases_indices = data.crease_indices.getValue(iss);
|
||||
const FloatArraySamplePtr creases_sharpnesses = data.crease_sharpnesses.getValue(iss);
|
||||
|
||||
if (!(creases_indices && creases_sharpnesses) ||
|
||||
creases_indices->size() != creases_sharpnesses->size())
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
array<float> sharpnesses;
|
||||
sharpnesses.reserve(creases_indices->size());
|
||||
array<int> indices;
|
||||
indices.reserve(creases_indices->size());
|
||||
|
||||
for (size_t i = 0; i < creases_indices->size(); i++) {
|
||||
indices.push_back_reserved((*creases_indices)[i]);
|
||||
sharpnesses.push_back_reserved((*creases_sharpnesses)[i]);
|
||||
}
|
||||
|
||||
cached_data.subd_vertex_crease_indices.add_data(indices, time);
|
||||
cached_data.subd_vertex_crease_weights.add_data(sharpnesses, time);
|
||||
}
|
||||
|
||||
static void read_subd_geometry(CachedData &cached_data, const SubDSchemaData &data, chrono_t time)
|
||||
{
|
||||
const ISampleSelector iss = ISampleSelector(time);
|
||||
|
||||
add_positions(data.positions.getValue(iss), time, cached_data);
|
||||
|
||||
if (data.topology_variance != kHomogeneousTopology || cached_data.shader.size() == 0) {
|
||||
add_subd_polygons(cached_data, data, time);
|
||||
add_subd_edge_creases(cached_data, data, time);
|
||||
add_subd_vertex_creases(cached_data, data, time);
|
||||
}
|
||||
}
|
||||
|
||||
void read_geometry_data(AlembicProcedural *proc,
|
||||
CachedData &cached_data,
|
||||
const SubDSchemaData &data,
|
||||
Progress &progress)
|
||||
{
|
||||
read_data_loop(proc, cached_data, data, read_subd_geometry, progress);
|
||||
}
|
||||
|
||||
/* Curve Geometries. */
|
||||
|
||||
static void read_curves_data(CachedData &cached_data, const CurvesSchemaData &data, chrono_t time)
|
||||
{
|
||||
const ISampleSelector iss = ISampleSelector(time);
|
||||
|
||||
const Int32ArraySamplePtr curves_num_vertices = data.num_vertices.getValue(iss);
|
||||
const P3fArraySamplePtr position = data.positions.getValue(iss);
|
||||
|
||||
FloatArraySamplePtr radiuses;
|
||||
|
||||
if (data.widths.valid()) {
|
||||
const IFloatGeomParam::Sample wsample = data.widths.getExpandedValue(iss);
|
||||
radiuses = wsample.getVals();
|
||||
}
|
||||
|
||||
const bool do_radius = (radiuses != nullptr) && (radiuses->size() > 1);
|
||||
float radius = (radiuses && radiuses->size() == 1) ? (*radiuses)[0] : data.default_radius;
|
||||
|
||||
array<float3> curve_keys;
|
||||
array<float> curve_radius;
|
||||
array<int> curve_first_key;
|
||||
array<int> curve_shader;
|
||||
|
||||
const bool is_homogeneous = data.topology_variance == kHomogeneousTopology;
|
||||
|
||||
curve_keys.reserve(position->size());
|
||||
curve_radius.reserve(position->size());
|
||||
curve_first_key.reserve(curves_num_vertices->size());
|
||||
curve_shader.reserve(curves_num_vertices->size());
|
||||
|
||||
int offset = 0;
|
||||
for (size_t i = 0; i < curves_num_vertices->size(); i++) {
|
||||
const int num_vertices = curves_num_vertices->get()[i];
|
||||
|
||||
for (int j = 0; j < num_vertices; j++) {
|
||||
const V3f &f = position->get()[offset + j];
|
||||
// todo(@kevindietrich): we are reading too much data?
|
||||
curve_keys.push_back_slow(make_float3_from_yup(f));
|
||||
|
||||
if (do_radius) {
|
||||
radius = (*radiuses)[offset + j];
|
||||
}
|
||||
|
||||
curve_radius.push_back_slow(radius * data.radius_scale);
|
||||
}
|
||||
|
||||
if (!is_homogeneous || cached_data.curve_first_key.size() == 0) {
|
||||
curve_first_key.push_back_reserved(offset);
|
||||
curve_shader.push_back_reserved(0);
|
||||
}
|
||||
|
||||
offset += num_vertices;
|
||||
}
|
||||
|
||||
cached_data.curve_keys.add_data(curve_keys, time);
|
||||
cached_data.curve_radius.add_data(curve_radius, time);
|
||||
|
||||
if (!is_homogeneous || cached_data.curve_first_key.size() == 0) {
|
||||
cached_data.curve_first_key.add_data(curve_first_key, time);
|
||||
cached_data.curve_shader.add_data(curve_shader, time);
|
||||
}
|
||||
}
|
||||
|
||||
void read_geometry_data(AlembicProcedural *proc,
|
||||
CachedData &cached_data,
|
||||
const CurvesSchemaData &data,
|
||||
Progress &progress)
|
||||
{
|
||||
read_data_loop(proc, cached_data, data, read_curves_data, progress);
|
||||
}
|
||||
|
||||
/* Points Geometries. */
|
||||
|
||||
static void read_points_data(CachedData &cached_data, const PointsSchemaData &data, chrono_t time)
|
||||
{
|
||||
const ISampleSelector iss = ISampleSelector(time);
|
||||
|
||||
const P3fArraySamplePtr position = data.positions.getValue(iss);
|
||||
FloatArraySamplePtr radiuses;
|
||||
|
||||
array<float3> a_positions;
|
||||
array<float> a_radius;
|
||||
array<int> a_shader;
|
||||
a_positions.reserve(position->size());
|
||||
a_radius.reserve(position->size());
|
||||
a_shader.reserve(position->size());
|
||||
|
||||
if (data.radiuses.valid()) {
|
||||
const IFloatGeomParam::Sample wsample = data.radiuses.getExpandedValue(iss);
|
||||
radiuses = wsample.getVals();
|
||||
}
|
||||
|
||||
const bool do_radius = (radiuses != nullptr) && (radiuses->size() > 1);
|
||||
float radius = (radiuses && radiuses->size() == 1) ? (*radiuses)[0] : data.default_radius;
|
||||
|
||||
const int offset = 0;
|
||||
for (size_t i = 0; i < position->size(); i++) {
|
||||
const V3f &f = position->get()[offset + i];
|
||||
a_positions.push_back_slow(make_float3_from_yup(f));
|
||||
|
||||
if (do_radius) {
|
||||
radius = (*radiuses)[offset + i];
|
||||
}
|
||||
a_radius.push_back_slow(radius * data.radius_scale);
|
||||
|
||||
a_shader.push_back_slow(0);
|
||||
}
|
||||
|
||||
cached_data.points.add_data(a_positions, time);
|
||||
cached_data.radiuses.add_data(a_radius, time);
|
||||
cached_data.points_shader.add_data(a_shader, time);
|
||||
}
|
||||
|
||||
void read_geometry_data(AlembicProcedural *proc,
|
||||
CachedData &cached_data,
|
||||
const PointsSchemaData &data,
|
||||
Progress &progress)
|
||||
{
|
||||
read_data_loop(proc, cached_data, data, read_points_data, progress);
|
||||
}
|
||||
/* Attributes conversions. */
|
||||
|
||||
/* Type traits for converting between Alembic and Cycles types.
|
||||
*/
|
||||
|
||||
template<typename T> struct value_type_converter {
|
||||
using cycles_type = float;
|
||||
/* Use `TypeDesc::FLOAT` instead of `TypeFloat` to work around a compiler bug in gcc 11. */
|
||||
static constexpr TypeDesc type_desc = TypeDesc::FLOAT;
|
||||
static constexpr const char *type_name = "float (default)";
|
||||
|
||||
static cycles_type convert_value(T value)
|
||||
{
|
||||
return static_cast<float>(value);
|
||||
}
|
||||
};
|
||||
|
||||
template<> struct value_type_converter<Imath::V2f> {
|
||||
using cycles_type = float2;
|
||||
static constexpr TypeDesc type_desc = TypeFloat2;
|
||||
static constexpr const char *type_name = "float2";
|
||||
|
||||
static cycles_type convert_value(Imath::V2f value)
|
||||
{
|
||||
return make_float2(value.x, value.y);
|
||||
}
|
||||
};
|
||||
|
||||
template<> struct value_type_converter<Imath::V3f> {
|
||||
using cycles_type = float3;
|
||||
static constexpr TypeDesc type_desc = TypeVector;
|
||||
static constexpr const char *type_name = "float3";
|
||||
|
||||
static cycles_type convert_value(Imath::V3f value)
|
||||
{
|
||||
return make_float3_from_yup(value);
|
||||
}
|
||||
};
|
||||
|
||||
template<> struct value_type_converter<Imath::C3f> {
|
||||
using cycles_type = uchar4;
|
||||
static constexpr TypeDesc type_desc = TypeRGBA;
|
||||
static constexpr const char *type_name = "rgb";
|
||||
|
||||
static cycles_type convert_value(Imath::C3f value)
|
||||
{
|
||||
return color_float_to_byte(make_float3(value.x, value.y, value.z));
|
||||
}
|
||||
};
|
||||
|
||||
template<> struct value_type_converter<Imath::C4f> {
|
||||
using cycles_type = uchar4;
|
||||
static constexpr TypeDesc type_desc = TypeRGBA;
|
||||
static constexpr const char *type_name = "rgba";
|
||||
|
||||
static cycles_type convert_value(Imath::C4f value)
|
||||
{
|
||||
return color_float4_to_uchar4(make_float4(value.r, value.g, value.b, value.a));
|
||||
}
|
||||
};
|
||||
|
||||
/* Main function used to read attributes of any type. */
|
||||
template<typename TRAIT>
|
||||
static void process_attribute(CachedData &cache,
|
||||
CachedData::CachedAttribute &attribute,
|
||||
GeometryScope scope,
|
||||
const typename ITypedGeomParam<TRAIT>::Sample &sample,
|
||||
const double time)
|
||||
{
|
||||
using abc_type = typename TRAIT::value_type;
|
||||
using cycles_type = typename value_type_converter<abc_type>::cycles_type;
|
||||
|
||||
const TypedArraySample<TRAIT> &values = *sample.getVals();
|
||||
|
||||
switch (scope) {
|
||||
case kConstantScope:
|
||||
case kVertexScope: {
|
||||
const array<float3> *vertices =
|
||||
cache.vertices.data_for_time_no_check(time).get_data_or_null();
|
||||
|
||||
if (!vertices) {
|
||||
attribute.data.add_no_data(time);
|
||||
return;
|
||||
}
|
||||
|
||||
if (vertices->size() != values.size()) {
|
||||
attribute.data.add_no_data(time);
|
||||
return;
|
||||
}
|
||||
|
||||
array<char> data(vertices->size() * sizeof(cycles_type));
|
||||
|
||||
cycles_type *pod_typed_data = reinterpret_cast<cycles_type *>(data.data());
|
||||
|
||||
for (size_t i = 0; i < values.size(); ++i) {
|
||||
*pod_typed_data++ = value_type_converter<abc_type>::convert_value(values[i]);
|
||||
}
|
||||
|
||||
attribute.data.add_data(data, time);
|
||||
break;
|
||||
}
|
||||
case kVaryingScope: {
|
||||
const array<int3> *triangles =
|
||||
cache.triangles.data_for_time_no_check(time).get_data_or_null();
|
||||
|
||||
if (!triangles) {
|
||||
attribute.data.add_no_data(time);
|
||||
return;
|
||||
}
|
||||
|
||||
array<char> data(triangles->size() * 3 * sizeof(cycles_type));
|
||||
|
||||
cycles_type *pod_typed_data = reinterpret_cast<cycles_type *>(data.data());
|
||||
|
||||
for (const int3 &tri : *triangles) {
|
||||
*pod_typed_data++ = value_type_converter<abc_type>::convert_value(values[tri.x]);
|
||||
*pod_typed_data++ = value_type_converter<abc_type>::convert_value(values[tri.y]);
|
||||
*pod_typed_data++ = value_type_converter<abc_type>::convert_value(values[tri.z]);
|
||||
}
|
||||
|
||||
attribute.data.add_data(data, time);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* UVs are processed separately as their indexing is based on loops, instead of vertices or
|
||||
* corners. */
|
||||
static void process_uvs(CachedData &cache,
|
||||
CachedData::CachedAttribute &attribute,
|
||||
GeometryScope scope,
|
||||
const IV2fGeomParam::Sample &sample,
|
||||
const double time)
|
||||
{
|
||||
if (scope != kFacevaryingScope && scope != kVaryingScope && scope != kVertexScope) {
|
||||
return;
|
||||
}
|
||||
|
||||
const array<int> *uv_loops = cache.uv_loops.data_for_time_no_check(time).get_data_or_null();
|
||||
|
||||
/* It's ok to not have loop indices, as long as the scope is not face-varying. */
|
||||
if (!uv_loops && scope == kFacevaryingScope) {
|
||||
return;
|
||||
}
|
||||
|
||||
const array<int3> *triangles = cache.triangles.data_for_time_no_check(time).get_data_or_null();
|
||||
const array<int> *corners =
|
||||
cache.subd_face_corners.data_for_time_no_check(time).get_data_or_null();
|
||||
|
||||
array<char> data;
|
||||
if (triangles) {
|
||||
data.resize(triangles->size() * 3 * sizeof(float2));
|
||||
}
|
||||
else if (corners) {
|
||||
data.resize(corners->size() * sizeof(float2));
|
||||
}
|
||||
else {
|
||||
return;
|
||||
}
|
||||
|
||||
float2 *data_float2 = reinterpret_cast<float2 *>(data.data());
|
||||
|
||||
const uint32_t *indices = sample.getIndices()->get();
|
||||
const V2f *values = sample.getVals()->get();
|
||||
|
||||
if (scope == kFacevaryingScope) {
|
||||
for (const int uv_loop_index : *uv_loops) {
|
||||
const uint32_t index = indices[uv_loop_index];
|
||||
*data_float2++ = make_float2(values[index][0], values[index][1]);
|
||||
}
|
||||
}
|
||||
else if (scope == kVaryingScope || scope == kVertexScope) {
|
||||
if (triangles) {
|
||||
for (size_t i = 0; i < triangles->size(); i++) {
|
||||
const int3 t = (*triangles)[i];
|
||||
*data_float2++ = make_float2(values[t.x][0], values[t.x][1]);
|
||||
*data_float2++ = make_float2(values[t.y][0], values[t.y][1]);
|
||||
*data_float2++ = make_float2(values[t.z][0], values[t.z][1]);
|
||||
}
|
||||
}
|
||||
else if (corners) {
|
||||
for (size_t i = 0; i < corners->size(); i++) {
|
||||
const int c = (*corners)[i];
|
||||
*data_float2++ = make_float2(values[c][0], values[c][1]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
attribute.data.add_data(data, time);
|
||||
}
|
||||
|
||||
/* Type of the function used to parse one time worth of data, either process_uvs or
|
||||
* process_attribute_generic. */
|
||||
template<typename TRAIT>
|
||||
using process_callback_type = void (*)(CachedData &,
|
||||
CachedData::CachedAttribute &,
|
||||
GeometryScope,
|
||||
const typename ITypedGeomParam<TRAIT>::Sample &,
|
||||
double);
|
||||
|
||||
/* Main loop to process the attributes, this will look at the given param's TimeSampling and
|
||||
* extract data based on which frame time is requested by the procedural and execute the callback
|
||||
* for each of those requested time. */
|
||||
template<typename TRAIT>
|
||||
static void read_attribute_loop(AlembicProcedural *proc,
|
||||
CachedData &cache,
|
||||
const ITypedGeomParam<TRAIT> ¶m,
|
||||
process_callback_type<TRAIT> callback,
|
||||
Progress &progress,
|
||||
AttributeStandard std = ATTR_STD_NONE)
|
||||
{
|
||||
const std::set<chrono_t> times = get_relevant_sample_times(
|
||||
proc, *param.getTimeSampling(), param.getNumSamples());
|
||||
|
||||
if (times.empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
std::string name = param.getName();
|
||||
|
||||
if (std == ATTR_STD_UV) {
|
||||
const std::string uv_source_name = Alembic::Abc::GetSourceName(param.getMetaData());
|
||||
|
||||
/* According to the convention, primary UVs should have had their name
|
||||
* set using Alembic::Abc::SetSourceName, but you can't expect everyone
|
||||
* to follow it! :) */
|
||||
if (!uv_source_name.empty()) {
|
||||
name = uv_source_name;
|
||||
}
|
||||
}
|
||||
|
||||
CachedData::CachedAttribute &attribute = cache.add_attribute(ustring(name),
|
||||
*param.getTimeSampling());
|
||||
|
||||
using abc_type = typename TRAIT::value_type;
|
||||
|
||||
attribute.data.set_time_sampling(*param.getTimeSampling());
|
||||
attribute.std = std;
|
||||
attribute.type_desc = value_type_converter<abc_type>::type_desc;
|
||||
|
||||
if (attribute.type_desc == TypeRGBA) {
|
||||
attribute.element = ATTR_ELEMENT_CORNER_BYTE;
|
||||
}
|
||||
else {
|
||||
if (param.getScope() == kVaryingScope || param.getScope() == kFacevaryingScope) {
|
||||
attribute.element = ATTR_ELEMENT_CORNER;
|
||||
}
|
||||
else {
|
||||
attribute.element = ATTR_ELEMENT_VERTEX;
|
||||
}
|
||||
}
|
||||
|
||||
for (const chrono_t time : times) {
|
||||
if (progress.get_cancel()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const ISampleSelector iss = ISampleSelector(time);
|
||||
typename ITypedGeomParam<TRAIT>::Sample sample;
|
||||
param.getIndexed(sample, iss);
|
||||
|
||||
if (!sample.valid()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!sample.getVals()) {
|
||||
attribute.data.add_no_data(time);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Check whether we already loaded constant data. */
|
||||
if (attribute.data.size() != 0) {
|
||||
if (param.isConstant()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const ArraySample::Key indices_key = sample.getIndices()->getKey();
|
||||
const ArraySample::Key values_key = sample.getVals()->getKey();
|
||||
|
||||
const bool is_same_as_last_time = (indices_key == attribute.data.key1 &&
|
||||
values_key == attribute.data.key2);
|
||||
|
||||
attribute.data.key1 = indices_key;
|
||||
attribute.data.key2 = values_key;
|
||||
|
||||
if (is_same_as_last_time) {
|
||||
attribute.data.reuse_data_for_last_time(time);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
callback(cache, attribute, param.getScope(), sample, time);
|
||||
}
|
||||
}
|
||||
|
||||
/* Attributes requests. */
|
||||
|
||||
/* This structure is used to tell which ICoumpoundProperty the PropertyHeader comes from, as we
|
||||
* need the parent when downcasting to the proper type. */
|
||||
struct PropHeaderAndParent {
|
||||
const PropertyHeader *prop;
|
||||
ICompoundProperty parent;
|
||||
};
|
||||
|
||||
/* Parse the ICompoundProperty to look for properties whose names appear in the
|
||||
* AttributeRequestSet. This also looks into any child ICompoundProperty of the given
|
||||
* ICompoundProperty. If no property of the given name is found, let it be that way, Cycles will
|
||||
* use a zero value for the missing attribute. */
|
||||
static void parse_requested_attributes_recursive(const AttributeRequestSet &requested_attributes,
|
||||
const ICompoundProperty &arb_geom_params,
|
||||
vector<PropHeaderAndParent> &requested_properties)
|
||||
{
|
||||
if (!arb_geom_params.valid()) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (const AttributeRequest &req : requested_attributes.requests) {
|
||||
const PropertyHeader *property_header = arb_geom_params.getPropertyHeader(req.name.c_str());
|
||||
|
||||
if (!property_header) {
|
||||
continue;
|
||||
}
|
||||
|
||||
requested_properties.push_back({property_header, arb_geom_params});
|
||||
}
|
||||
|
||||
/* Look into children compound properties. */
|
||||
for (size_t i = 0; i < arb_geom_params.getNumProperties(); ++i) {
|
||||
const PropertyHeader &property_header = arb_geom_params.getPropertyHeader(i);
|
||||
|
||||
if (property_header.isCompound()) {
|
||||
const ICompoundProperty compound_property = ICompoundProperty(arb_geom_params,
|
||||
property_header.getName());
|
||||
parse_requested_attributes_recursive(
|
||||
requested_attributes, compound_property, requested_properties);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Main entry point for parsing requested attributes from an ICompoundProperty, this exists so that
|
||||
* we can simply return the list of properties instead of allocating it on the stack and passing it
|
||||
* as a parameter. */
|
||||
static vector<PropHeaderAndParent> parse_requested_attributes(
|
||||
const AttributeRequestSet &requested_attributes, const ICompoundProperty &arb_geom_params)
|
||||
{
|
||||
vector<PropHeaderAndParent> requested_properties;
|
||||
parse_requested_attributes_recursive(
|
||||
requested_attributes, arb_geom_params, requested_properties);
|
||||
return requested_properties;
|
||||
}
|
||||
|
||||
/* Read the attributes requested by the shaders from the archive. This will recursively find named
|
||||
* attributes from the AttributeRequestSet in the ICompoundProperty and any of its compound child.
|
||||
* The attributes are added to the CachedData's attribute list. For each attribute we will try to
|
||||
* deduplicate data across consecutive frames. */
|
||||
void read_attributes(AlembicProcedural *proc,
|
||||
CachedData &cache,
|
||||
const ICompoundProperty &arb_geom_params,
|
||||
const IV2fGeomParam &default_uvs_param,
|
||||
const AttributeRequestSet &requested_attributes,
|
||||
Progress &progress)
|
||||
{
|
||||
if (default_uvs_param.valid()) {
|
||||
/* Only the default UVs should be treated as the standard UV attribute. */
|
||||
read_attribute_loop(proc, cache, default_uvs_param, process_uvs, progress, ATTR_STD_UV);
|
||||
}
|
||||
|
||||
const vector<PropHeaderAndParent> requested_properties = parse_requested_attributes(
|
||||
requested_attributes, arb_geom_params);
|
||||
|
||||
for (const PropHeaderAndParent &prop_and_parent : requested_properties) {
|
||||
if (progress.get_cancel()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const PropertyHeader *prop = prop_and_parent.prop;
|
||||
const ICompoundProperty &parent = prop_and_parent.parent;
|
||||
|
||||
if (IBoolGeomParam::matches(*prop)) {
|
||||
const IBoolGeomParam ¶m = IBoolGeomParam(parent, prop->getName());
|
||||
read_attribute_loop(proc, cache, param, process_attribute<BooleanTPTraits>, progress);
|
||||
}
|
||||
else if (IInt32GeomParam::matches(*prop)) {
|
||||
const IInt32GeomParam ¶m = IInt32GeomParam(parent, prop->getName());
|
||||
read_attribute_loop(proc, cache, param, process_attribute<Int32TPTraits>, progress);
|
||||
}
|
||||
else if (IFloatGeomParam::matches(*prop)) {
|
||||
const IFloatGeomParam ¶m = IFloatGeomParam(parent, prop->getName());
|
||||
read_attribute_loop(proc, cache, param, process_attribute<Float32TPTraits>, progress);
|
||||
}
|
||||
else if (IV2fGeomParam::matches(*prop)) {
|
||||
const IV2fGeomParam ¶m = IV2fGeomParam(parent, prop->getName());
|
||||
if (Alembic::AbcGeom::isUV(*prop)) {
|
||||
read_attribute_loop(proc, cache, param, process_uvs, progress);
|
||||
}
|
||||
else {
|
||||
read_attribute_loop(proc, cache, param, process_attribute<V2fTPTraits>, progress);
|
||||
}
|
||||
}
|
||||
else if (IV3fGeomParam::matches(*prop)) {
|
||||
const IV3fGeomParam ¶m = IV3fGeomParam(parent, prop->getName());
|
||||
read_attribute_loop(proc, cache, param, process_attribute<V3fTPTraits>, progress);
|
||||
}
|
||||
else if (IN3fGeomParam::matches(*prop)) {
|
||||
const IN3fGeomParam ¶m = IN3fGeomParam(parent, prop->getName());
|
||||
read_attribute_loop(proc, cache, param, process_attribute<N3fTPTraits>, progress);
|
||||
}
|
||||
else if (IC3fGeomParam::matches(*prop)) {
|
||||
const IC3fGeomParam ¶m = IC3fGeomParam(parent, prop->getName());
|
||||
read_attribute_loop(proc, cache, param, process_attribute<C3fTPTraits>, progress);
|
||||
}
|
||||
else if (IC4fGeomParam::matches(*prop)) {
|
||||
const IC4fGeomParam ¶m = IC4fGeomParam(parent, prop->getName());
|
||||
read_attribute_loop(proc, cache, param, process_attribute<C4fTPTraits>, progress);
|
||||
}
|
||||
}
|
||||
|
||||
cache.invalidate_last_loaded_time(true);
|
||||
}
|
||||
|
||||
CCL_NAMESPACE_END
|
||||
|
||||
#endif
|
||||
@@ -1,143 +0,0 @@
|
||||
/* SPDX-FileCopyrightText: 2021-2022 Blender Foundation
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0 */
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef WITH_ALEMBIC
|
||||
|
||||
# include <Alembic/AbcCoreFactory/All.h>
|
||||
# include <Alembic/AbcGeom/All.h>
|
||||
|
||||
# include "util/vector.h"
|
||||
|
||||
CCL_NAMESPACE_BEGIN
|
||||
|
||||
class AlembicProcedural;
|
||||
class AttributeRequestSet;
|
||||
class Progress;
|
||||
struct CachedData;
|
||||
|
||||
/* Maps a FaceSet whose name matches that of a Shader to the index of said shader in the Geometry's
|
||||
* used_shaders list. */
|
||||
struct FaceSetShaderIndexPair {
|
||||
Alembic::AbcGeom::IFaceSet face_set;
|
||||
int shader_index;
|
||||
};
|
||||
|
||||
/* Data of an IPolyMeshSchema that we need to read. */
|
||||
struct PolyMeshSchemaData {
|
||||
Alembic::AbcGeom::TimeSamplingPtr time_sampling;
|
||||
size_t num_samples;
|
||||
Alembic::AbcGeom::MeshTopologyVariance topology_variance;
|
||||
|
||||
Alembic::AbcGeom::IP3fArrayProperty positions;
|
||||
Alembic::AbcGeom::IInt32ArrayProperty face_indices;
|
||||
Alembic::AbcGeom::IInt32ArrayProperty face_counts;
|
||||
|
||||
Alembic::AbcGeom::IN3fGeomParam normals;
|
||||
|
||||
vector<FaceSetShaderIndexPair> shader_face_sets;
|
||||
|
||||
// Unsupported for now.
|
||||
Alembic::AbcGeom::IV3fArrayProperty velocities;
|
||||
};
|
||||
|
||||
void read_geometry_data(AlembicProcedural *proc,
|
||||
CachedData &cached_data,
|
||||
const PolyMeshSchemaData &data,
|
||||
Progress &progress);
|
||||
|
||||
/* Data of an ISubDSchema that we need to read. */
|
||||
struct SubDSchemaData {
|
||||
Alembic::AbcGeom::TimeSamplingPtr time_sampling;
|
||||
size_t num_samples;
|
||||
Alembic::AbcGeom::MeshTopologyVariance topology_variance;
|
||||
|
||||
Alembic::AbcGeom::IInt32ArrayProperty face_counts;
|
||||
Alembic::AbcGeom::IInt32ArrayProperty face_indices;
|
||||
Alembic::AbcGeom::IP3fArrayProperty positions;
|
||||
|
||||
Alembic::AbcGeom::IInt32ArrayProperty crease_indices;
|
||||
Alembic::AbcGeom::IInt32ArrayProperty crease_lengths;
|
||||
Alembic::AbcGeom::IFloatArrayProperty crease_sharpnesses;
|
||||
|
||||
vector<FaceSetShaderIndexPair> shader_face_sets;
|
||||
|
||||
Alembic::AbcGeom::IInt32ArrayProperty corner_indices;
|
||||
Alembic::AbcGeom::IFloatArrayProperty corner_sharpnesses;
|
||||
|
||||
// Those are unsupported for now.
|
||||
Alembic::AbcGeom::IInt32Property face_varying_interpolate_boundary;
|
||||
Alembic::AbcGeom::IInt32Property face_varying_propagate_corners;
|
||||
Alembic::AbcGeom::IInt32Property interpolate_boundary;
|
||||
Alembic::AbcGeom::IInt32ArrayProperty holes;
|
||||
Alembic::AbcGeom::IStringProperty subdivision_scheme;
|
||||
Alembic::AbcGeom::IV3fArrayProperty velocities;
|
||||
};
|
||||
|
||||
void read_geometry_data(AlembicProcedural *proc,
|
||||
CachedData &cached_data,
|
||||
const SubDSchemaData &data,
|
||||
Progress &progress);
|
||||
|
||||
/* Data of a ICurvesSchema that we need to read. */
|
||||
struct CurvesSchemaData {
|
||||
Alembic::AbcGeom::TimeSamplingPtr time_sampling;
|
||||
size_t num_samples;
|
||||
Alembic::AbcGeom::MeshTopologyVariance topology_variance;
|
||||
|
||||
Alembic::AbcGeom::IP3fArrayProperty positions;
|
||||
|
||||
Alembic::AbcGeom::IInt32ArrayProperty num_vertices;
|
||||
|
||||
float default_radius;
|
||||
float radius_scale;
|
||||
|
||||
// Those are unsupported for now.
|
||||
Alembic::AbcGeom::IV3fArrayProperty velocities;
|
||||
// if this property is invalid then the weight for every point is 1
|
||||
Alembic::AbcGeom::IFloatArrayProperty position_weights;
|
||||
Alembic::AbcGeom::IN3fGeomParam normals;
|
||||
Alembic::AbcGeom::IFloatGeomParam widths;
|
||||
Alembic::AbcGeom::IUcharArrayProperty orders;
|
||||
Alembic::AbcGeom::IFloatArrayProperty knots;
|
||||
|
||||
// TODO(@kevindietrich): type, basis, wrap
|
||||
};
|
||||
|
||||
void read_geometry_data(AlembicProcedural *proc,
|
||||
CachedData &cached_data,
|
||||
const CurvesSchemaData &data,
|
||||
Progress &progress);
|
||||
|
||||
/* Data of a IPointsSchema that we need to read. */
|
||||
struct PointsSchemaData {
|
||||
Alembic::AbcGeom::TimeSamplingPtr time_sampling;
|
||||
size_t num_samples;
|
||||
|
||||
float default_radius;
|
||||
float radius_scale;
|
||||
|
||||
Alembic::AbcGeom::IP3fArrayProperty positions;
|
||||
Alembic::AbcGeom::IInt32ArrayProperty num_points;
|
||||
Alembic::AbcGeom::IFloatGeomParam radiuses;
|
||||
// Those are unsupported for now.
|
||||
Alembic::AbcGeom::IV3fArrayProperty velocities;
|
||||
};
|
||||
|
||||
void read_geometry_data(AlembicProcedural *proc,
|
||||
CachedData &cached_data,
|
||||
const PointsSchemaData &data,
|
||||
Progress &progress);
|
||||
|
||||
void read_attributes(AlembicProcedural *proc,
|
||||
CachedData &cache,
|
||||
const Alembic::AbcGeom::ICompoundProperty &arb_geom_params,
|
||||
const Alembic::AbcGeom::IV2fGeomParam &default_uvs_param,
|
||||
const AttributeRequestSet &requested_attributes,
|
||||
Progress &progress);
|
||||
|
||||
CCL_NAMESPACE_END
|
||||
|
||||
#endif
|
||||
@@ -8,7 +8,6 @@
|
||||
|
||||
#include "device/device.h"
|
||||
|
||||
#include "scene/alembic.h"
|
||||
#include "scene/background.h"
|
||||
#include "scene/bake.h"
|
||||
#include "scene/camera.h"
|
||||
@@ -895,20 +894,6 @@ template<> Shader *Scene::create_node<Shader>()
|
||||
return node_ptr;
|
||||
}
|
||||
|
||||
template<> AlembicProcedural *Scene::create_node<AlembicProcedural>()
|
||||
{
|
||||
#ifdef WITH_ALEMBIC
|
||||
unique_ptr<AlembicProcedural> node = make_unique<AlembicProcedural>();
|
||||
AlembicProcedural *node_ptr = node.get();
|
||||
node->set_owner(this);
|
||||
procedurals.push_back(std::move(node));
|
||||
procedural_manager->tag_update();
|
||||
return node_ptr;
|
||||
#else
|
||||
return nullptr;
|
||||
#endif
|
||||
}
|
||||
|
||||
template<> Pass *Scene::create_node<Pass>()
|
||||
{
|
||||
unique_ptr<Pass> node = make_unique<Pass>();
|
||||
@@ -1043,15 +1028,6 @@ template<> void Scene::delete_node(Procedural *node)
|
||||
procedural_manager->tag_update();
|
||||
}
|
||||
|
||||
template<> void Scene::delete_node(AlembicProcedural *node)
|
||||
{
|
||||
#ifdef WITH_ALEMBIC
|
||||
delete_node(static_cast<Procedural *>(node));
|
||||
#else
|
||||
(void)node;
|
||||
#endif
|
||||
}
|
||||
|
||||
template<> void Scene::delete_node(Pass *node)
|
||||
{
|
||||
assert(node->get_owner() == this);
|
||||
|
||||
@@ -19,7 +19,6 @@
|
||||
|
||||
CCL_NAMESPACE_BEGIN
|
||||
|
||||
class AlembicProcedural;
|
||||
class AttributeRequestSet;
|
||||
class Background;
|
||||
class BVH;
|
||||
@@ -271,7 +270,6 @@ template<> Volume *Scene::create_node<Volume>();
|
||||
template<> PointCloud *Scene::create_node<PointCloud>();
|
||||
template<> ParticleSystem *Scene::create_node<ParticleSystem>();
|
||||
template<> Shader *Scene::create_node<Shader>();
|
||||
template<> AlembicProcedural *Scene::create_node<AlembicProcedural>();
|
||||
template<> Pass *Scene::create_node<Pass>();
|
||||
template<> Camera *Scene::create_node<Camera>();
|
||||
template<> Background *Scene::create_node<Background>();
|
||||
@@ -288,7 +286,6 @@ template<> void Scene::delete_node(Object *node);
|
||||
template<> void Scene::delete_node(ParticleSystem *node);
|
||||
template<> void Scene::delete_node(Shader *node);
|
||||
template<> void Scene::delete_node(Procedural *node);
|
||||
template<> void Scene::delete_node(AlembicProcedural *node);
|
||||
template<> void Scene::delete_node(Pass *node);
|
||||
|
||||
template<> void Scene::delete_nodes(const set<Geometry *> &nodes, const NodeOwner *owner);
|
||||
|
||||
@@ -1169,11 +1169,6 @@ class ConstraintButtonsSubPanel:
|
||||
context, self.layout.template_cache_file_velocity
|
||||
)
|
||||
|
||||
def draw_transform_cache_procedural(self, context):
|
||||
self.draw_transform_cache_subpanel(
|
||||
context, self.layout.template_cache_file_procedural
|
||||
)
|
||||
|
||||
def draw_transform_cache_time(self, context):
|
||||
self.draw_transform_cache_subpanel(
|
||||
context, self.layout.template_cache_file_time_settings
|
||||
@@ -1614,22 +1609,6 @@ class BONE_PT_bTransformCacheConstraint_layers(BoneConstraintPanel, ConstraintBu
|
||||
self.draw_transform_cache_layers(context)
|
||||
|
||||
|
||||
class OBJECT_PT_bTransformCacheConstraint_procedural(ObjectConstraintPanel, ConstraintButtonsSubPanel, Panel):
|
||||
bl_parent_id = "OBJECT_PT_bTransformCacheConstraint"
|
||||
bl_label = "Render Procedural"
|
||||
|
||||
def draw(self, context):
|
||||
self.draw_transform_cache_procedural(context)
|
||||
|
||||
|
||||
class BONE_PT_bTransformCacheConstraint_procedural(BoneConstraintPanel, ConstraintButtonsSubPanel, Panel):
|
||||
bl_parent_id = "BONE_PT_bTransformCacheConstraint"
|
||||
bl_label = "Render Procedural"
|
||||
|
||||
def draw(self, context):
|
||||
self.draw_transform_cache_procedural(context)
|
||||
|
||||
|
||||
class OBJECT_PT_bTransformCacheConstraint_time(ObjectConstraintPanel, ConstraintButtonsSubPanel, Panel):
|
||||
bl_parent_id = "OBJECT_PT_bTransformCacheConstraint"
|
||||
bl_label = "Time"
|
||||
@@ -1721,7 +1700,6 @@ classes = (
|
||||
OBJECT_PT_bObjectSolverConstraint,
|
||||
OBJECT_PT_bTransformCacheConstraint,
|
||||
OBJECT_PT_bTransformCacheConstraint_time,
|
||||
OBJECT_PT_bTransformCacheConstraint_procedural,
|
||||
OBJECT_PT_bTransformCacheConstraint_velocity,
|
||||
OBJECT_PT_bTransformCacheConstraint_layers,
|
||||
OBJECT_PT_bArmatureConstraint,
|
||||
@@ -1761,7 +1739,6 @@ classes = (
|
||||
BONE_PT_bObjectSolverConstraint,
|
||||
BONE_PT_bTransformCacheConstraint,
|
||||
BONE_PT_bTransformCacheConstraint_time,
|
||||
BONE_PT_bTransformCacheConstraint_procedural,
|
||||
BONE_PT_bTransformCacheConstraint_velocity,
|
||||
BONE_PT_bTransformCacheConstraint_layers,
|
||||
BONE_PT_bArmatureConstraint,
|
||||
|
||||
@@ -37,14 +37,6 @@ void BKE_cachefile_reader_open(CacheFile *cache_file,
|
||||
const char *object_path);
|
||||
void BKE_cachefile_reader_free(CacheFile *cache_file, CacheReader **reader);
|
||||
|
||||
/**
|
||||
* Determine whether the #CacheFile should use a render engine procedural. If so, data is not read
|
||||
* from the file and bounding boxes are used to represent the objects in the Scene.
|
||||
* Render engines will receive the bounding box as a placeholder but can instead
|
||||
* load the data directly if they support it.
|
||||
*/
|
||||
bool BKE_cache_file_uses_render_procedural(const CacheFile *cache_file, Scene *scene);
|
||||
|
||||
/**
|
||||
* Add a layer to the cache_file. Return NULL if the `filepath` is already that of an existing
|
||||
* layer or if the number of layers exceeds the maximum allowed layer count.
|
||||
|
||||
@@ -413,19 +413,6 @@ double BKE_cachefile_frame_offset(const CacheFile *cache_file, const double time
|
||||
return cache_file->is_sequence ? frame : frame - time_offset;
|
||||
}
|
||||
|
||||
bool BKE_cache_file_uses_render_procedural(const CacheFile *cache_file, Scene *scene)
|
||||
{
|
||||
RenderEngineType *render_engine_type = RE_engines_find(scene->r.engine);
|
||||
|
||||
if (cache_file->type != CACHEFILE_TYPE_ALEMBIC ||
|
||||
!RE_engine_supports_alembic_procedural(render_engine_type, scene))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return cache_file->use_render_procedural;
|
||||
}
|
||||
|
||||
CacheFileLayer *BKE_cachefile_add_layer(CacheFile *cache_file, const char filepath[1024])
|
||||
{
|
||||
LISTBASE_FOREACH (CacheFileLayer *, layer, &cache_file->layers) {
|
||||
|
||||
@@ -5366,11 +5366,6 @@ static void transformcache_evaluate(bConstraint *con, bConstraintOb *cob, ListBa
|
||||
return;
|
||||
}
|
||||
|
||||
/* Do not process data if using a render time procedural. */
|
||||
if (BKE_cache_file_uses_render_procedural(cache_file, scene)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const float frame = DEG_get_ctime(cob->depsgraph);
|
||||
const double time = BKE_cachefile_time_offset(
|
||||
cache_file, double(frame), scene->frames_per_second());
|
||||
|
||||
@@ -2550,11 +2550,6 @@ bool uiTemplateCacheFilePointer(PointerRNA *ptr,
|
||||
*/
|
||||
void uiTemplateCacheFileVelocity(uiLayout *layout, PointerRNA *fileptr);
|
||||
|
||||
/**
|
||||
* Draw the render procedural related properties of the CacheFile.
|
||||
*/
|
||||
void uiTemplateCacheFileProcedural(uiLayout *layout, const bContext *C, PointerRNA *fileptr);
|
||||
|
||||
/**
|
||||
* Draw the time related properties of the CacheFile.
|
||||
*/
|
||||
|
||||
@@ -43,61 +43,6 @@ void uiTemplateCacheFileVelocity(uiLayout *layout, PointerRNA *fileptr)
|
||||
layout->prop(fileptr, "velocity_unit", UI_ITEM_NONE, std::nullopt, ICON_NONE);
|
||||
}
|
||||
|
||||
void uiTemplateCacheFileProcedural(uiLayout *layout, const bContext *C, PointerRNA *fileptr)
|
||||
{
|
||||
if (RNA_pointer_is_null(fileptr)) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Ensure that the context has a CacheFile as this may not be set inside of modifiers panels. */
|
||||
layout->context_ptr_set("edit_cachefile", fileptr);
|
||||
|
||||
uiLayout *row, *sub;
|
||||
|
||||
/* Only enable render procedural option if the active engine supports it. */
|
||||
const RenderEngineType *engine_type = CTX_data_engine_type(C);
|
||||
|
||||
Scene *scene = CTX_data_scene(C);
|
||||
const bool engine_supports_procedural = RE_engine_supports_alembic_procedural(engine_type,
|
||||
scene);
|
||||
CacheFile *cache_file = static_cast<CacheFile *>(fileptr->data);
|
||||
CacheFile *cache_file_eval = DEG_get_evaluated(CTX_data_depsgraph_pointer(C), cache_file);
|
||||
bool is_alembic = cache_file_eval->type == CACHEFILE_TYPE_ALEMBIC;
|
||||
|
||||
if (!is_alembic) {
|
||||
row = &layout->row(false);
|
||||
row->label(RPT_("Only Alembic Procedurals supported"), ICON_INFO);
|
||||
}
|
||||
else if (!engine_supports_procedural) {
|
||||
row = &layout->row(false);
|
||||
/* For Cycles, verify that experimental features are enabled. */
|
||||
if (BKE_scene_uses_cycles(scene) && !BKE_scene_uses_cycles_experimental_features(scene)) {
|
||||
row->label(
|
||||
RPT_(
|
||||
"The Cycles Alembic Procedural is only available with the experimental feature set"),
|
||||
ICON_INFO);
|
||||
}
|
||||
else {
|
||||
row->label(RPT_("The active render engine does not have an Alembic Procedural"), ICON_INFO);
|
||||
}
|
||||
}
|
||||
|
||||
row = &layout->row(false);
|
||||
row->active_set(is_alembic && engine_supports_procedural);
|
||||
row->prop(fileptr, "use_render_procedural", UI_ITEM_NONE, std::nullopt, ICON_NONE);
|
||||
|
||||
const bool use_render_procedural = RNA_boolean_get(fileptr, "use_render_procedural");
|
||||
const bool use_prefetch = RNA_boolean_get(fileptr, "use_prefetch");
|
||||
|
||||
row = &layout->row(false);
|
||||
row->enabled_set(use_render_procedural);
|
||||
row->prop(fileptr, "use_prefetch", UI_ITEM_NONE, std::nullopt, ICON_NONE);
|
||||
|
||||
sub = &layout->row(false);
|
||||
sub->enabled_set(use_prefetch && use_render_procedural);
|
||||
sub->prop(fileptr, "prefetch_cache_size", UI_ITEM_NONE, std::nullopt, ICON_NONE);
|
||||
}
|
||||
|
||||
void uiTemplateCacheFileTimeSettings(uiLayout *layout, PointerRNA *fileptr)
|
||||
{
|
||||
if (RNA_pointer_is_null(fileptr)) {
|
||||
|
||||
@@ -192,19 +192,6 @@ void ED_render_engine_changed(Main *bmain, const bool update_scene_data)
|
||||
}
|
||||
}
|
||||
BKE_main_ensure_invariants(*bmain);
|
||||
|
||||
/* Update #CacheFiles to ensure that procedurals are properly taken into account. */
|
||||
LISTBASE_FOREACH (CacheFile *, cachefile, &bmain->cachefiles) {
|
||||
/* Only update cache-files which are set to use a render procedural.
|
||||
* We do not use #BKE_cachefile_uses_render_procedural here as we need to update regardless of
|
||||
* the current engine or its settings. */
|
||||
if (cachefile->use_render_procedural) {
|
||||
DEG_id_tag_update(&cachefile->id, ID_RECALC_SYNC_TO_EVAL);
|
||||
/* Rebuild relations so that modifiers are reconnected to or disconnected from the
|
||||
* cache-file. */
|
||||
DEG_relations_tag_update(bmain);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ED_render_view_layer_changed(Main *bmain, bScreen *screen)
|
||||
|
||||
@@ -27,8 +27,6 @@
|
||||
.handle = NULL, \
|
||||
.handle_filepath[0] = '\0', \
|
||||
.handle_readers = NULL, \
|
||||
.use_prefetch = 1, \
|
||||
.prefetch_cache_size = 4096, \
|
||||
}
|
||||
|
||||
/** \} */
|
||||
|
||||
@@ -85,28 +85,13 @@ typedef struct CacheFile {
|
||||
/** The frame offset to subtract. */
|
||||
float frame_offset;
|
||||
|
||||
char _pad[4];
|
||||
|
||||
/** Animation flag. */
|
||||
short flag;
|
||||
|
||||
/* eCacheFileType enum. */
|
||||
char type;
|
||||
|
||||
/**
|
||||
* Do not load data from the cache file and display objects in the scene as boxes, Cycles will
|
||||
* load objects directly from the CacheFile. Other render engines which can load Alembic data
|
||||
* directly can take care of rendering it themselves.
|
||||
*/
|
||||
char use_render_procedural;
|
||||
|
||||
char _pad1[3];
|
||||
|
||||
/** Enable data prefetching when using the Cycles Procedural. */
|
||||
char use_prefetch;
|
||||
|
||||
/** Size in megabytes for the prefetch cache used by the Cycles Procedural. */
|
||||
int prefetch_cache_size;
|
||||
char _pad1[1];
|
||||
|
||||
/** Index of the currently selected layer in the UI, starts at 1. */
|
||||
int active_layer;
|
||||
@@ -117,6 +102,8 @@ typedef struct CacheFile {
|
||||
/* Name of the velocity property in the archive. */
|
||||
char velocity_name[64];
|
||||
|
||||
char _pad3[4];
|
||||
|
||||
/* Runtime */
|
||||
struct CacheArchiveHandle *handle;
|
||||
char handle_filepath[/*FILE_MAX*/ 1024];
|
||||
|
||||
@@ -56,12 +56,6 @@ static void rna_CacheFileLayer_update(Main * /*bmain*/, Scene * /*scene*/, Point
|
||||
WM_main_add_notifier(NC_OBJECT | ND_DRAW, nullptr);
|
||||
}
|
||||
|
||||
static void rna_CacheFile_dependency_update(Main *bmain, Scene *scene, PointerRNA *ptr)
|
||||
{
|
||||
rna_CacheFile_update(bmain, scene, ptr);
|
||||
DEG_relations_tag_update(bmain);
|
||||
}
|
||||
|
||||
static void rna_CacheFile_object_paths_begin(CollectionPropertyIterator *iter, PointerRNA *ptr)
|
||||
{
|
||||
CacheFile *cache_file = (CacheFile *)ptr->data;
|
||||
@@ -255,16 +249,6 @@ static void rna_def_cachefile(BlenderRNA *brna)
|
||||
prop, "Sequence", "Whether the cache is separated in a series of files");
|
||||
RNA_def_property_update(prop, 0, "rna_CacheFile_update");
|
||||
|
||||
prop = RNA_def_property(srna, "use_render_procedural", PROP_BOOLEAN, PROP_NONE);
|
||||
RNA_def_property_ui_text(
|
||||
prop,
|
||||
"Use Render Engine Procedural",
|
||||
"Display boxes in the viewport as placeholders for the objects, Cycles will use a "
|
||||
"procedural to load the objects during viewport rendering in experimental mode, "
|
||||
"other render engines will also receive a placeholder and should take care of loading the "
|
||||
"Alembic data themselves if possible");
|
||||
RNA_def_property_update(prop, 0, "rna_CacheFile_dependency_update");
|
||||
|
||||
/* ----------------- For Scene time ------------------- */
|
||||
|
||||
prop = RNA_def_property(srna, "override_frame", PROP_BOOLEAN, PROP_NONE);
|
||||
@@ -293,23 +277,6 @@ static void rna_def_cachefile(BlenderRNA *brna)
|
||||
"determine which file to use in a file sequence");
|
||||
RNA_def_property_update(prop, 0, "rna_CacheFile_update");
|
||||
|
||||
/* ----------------- Cache controls ----------------- */
|
||||
|
||||
prop = RNA_def_property(srna, "use_prefetch", PROP_BOOLEAN, PROP_NONE);
|
||||
RNA_def_property_ui_text(
|
||||
prop,
|
||||
"Use Prefetch",
|
||||
"When enabled, the Cycles Procedural will preload animation data for faster updates");
|
||||
RNA_def_property_update(prop, 0, "rna_CacheFile_update");
|
||||
|
||||
prop = RNA_def_property(srna, "prefetch_cache_size", PROP_INT, PROP_UNSIGNED);
|
||||
RNA_def_property_ui_text(
|
||||
prop,
|
||||
"Prefetch Cache Size",
|
||||
"Memory usage limit in megabytes for the Cycles Procedural cache, if the data does not "
|
||||
"fit within the limit, rendering is aborted");
|
||||
RNA_def_property_update(prop, 0, "rna_CacheFile_update");
|
||||
|
||||
/* ----------------- Axis Conversion ----------------- */
|
||||
|
||||
prop = RNA_def_property(srna, "forward_axis", PROP_ENUM, PROP_NONE);
|
||||
|
||||
@@ -1013,12 +1013,6 @@ static void rna_def_render_engine(BlenderRNA *brna)
|
||||
RNA_def_property_flag(prop, PROP_REGISTER_OPTIONAL);
|
||||
RNA_def_property_ui_text(prop, "Use Stereo Viewport", "Support rendering stereo 3D viewport");
|
||||
|
||||
prop = RNA_def_property(srna, "bl_use_alembic_procedural", PROP_BOOLEAN, PROP_NONE);
|
||||
RNA_def_property_boolean_sdna(prop, nullptr, "type->flag", RE_USE_ALEMBIC_PROCEDURAL);
|
||||
RNA_def_property_flag(prop, PROP_REGISTER_OPTIONAL);
|
||||
RNA_def_property_ui_text(
|
||||
prop, "Use Alembic Procedural", "Support loading Alembic data at render time");
|
||||
|
||||
prop = RNA_def_property(srna, "bl_use_materialx", PROP_BOOLEAN, PROP_NONE);
|
||||
RNA_def_property_boolean_sdna(prop, nullptr, "type->flag", RE_USE_MATERIALX);
|
||||
RNA_def_property_flag(prop, PROP_REGISTER_OPTIONAL);
|
||||
|
||||
@@ -750,19 +750,6 @@ static void rna_uiTemplateCacheFileVelocity(uiLayout *layout,
|
||||
uiTemplateCacheFileVelocity(layout, &fileptr);
|
||||
}
|
||||
|
||||
static void rna_uiTemplateCacheFileProcedural(uiLayout *layout,
|
||||
bContext *C,
|
||||
PointerRNA *ptr,
|
||||
const char *propname)
|
||||
{
|
||||
PointerRNA fileptr;
|
||||
if (!uiTemplateCacheFilePointer(ptr, propname, &fileptr)) {
|
||||
return;
|
||||
}
|
||||
|
||||
uiTemplateCacheFileProcedural(layout, C, &fileptr);
|
||||
}
|
||||
|
||||
static void rna_uiTemplateCacheFileTimeSettings(uiLayout *layout,
|
||||
PointerRNA *ptr,
|
||||
const char *propname)
|
||||
@@ -2204,12 +2191,6 @@ void RNA_api_ui_layout(StructRNA *srna)
|
||||
RNA_def_function_ui_description(func, "Show cache files velocity properties");
|
||||
api_ui_item_rna_common(func);
|
||||
|
||||
func = RNA_def_function(
|
||||
srna, "template_cache_file_procedural", "rna_uiTemplateCacheFileProcedural");
|
||||
RNA_def_function_ui_description(func, "Show cache files render procedural properties");
|
||||
RNA_def_function_flag(func, FUNC_USE_CONTEXT);
|
||||
api_ui_item_rna_common(func);
|
||||
|
||||
func = RNA_def_function(
|
||||
srna, "template_cache_file_time_settings", "rna_uiTemplateCacheFileTimeSettings");
|
||||
RNA_def_function_ui_description(func, "Show cache files time settings");
|
||||
|
||||
@@ -146,26 +146,6 @@ static bool can_use_mesh_for_orco_evaluation(MeshSeqCacheModifierData *mcmd,
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static Mesh *generate_bounding_box_mesh(const std::optional<Bounds<float3>> &bounds,
|
||||
Material **mat,
|
||||
short totcol)
|
||||
{
|
||||
if (!bounds) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Mesh *result = geometry::create_cuboid_mesh(bounds->max - bounds->min, 2, 2, 2);
|
||||
if (mat) {
|
||||
result->mat = static_cast<Material **>(MEM_dupallocN(mat));
|
||||
result->totcol = totcol;
|
||||
}
|
||||
|
||||
bke::mesh_translate(*result, math::midpoint(bounds->min, bounds->max), false);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static void modify_geometry_set(ModifierData *md,
|
||||
@@ -200,24 +180,6 @@ static void modify_geometry_set(ModifierData *md,
|
||||
}
|
||||
}
|
||||
|
||||
/* Do not process data if using a render procedural, return a box instead for displaying in the
|
||||
* viewport. */
|
||||
if (BKE_cache_file_uses_render_procedural(cache_file, scene)) {
|
||||
Mesh *bbox = nullptr;
|
||||
if (geometry_set->has_mesh()) {
|
||||
const Mesh *mesh = geometry_set->get_mesh();
|
||||
bbox = generate_bounding_box_mesh(mesh->bounds_min_max(), mesh->mat, mesh->totcol);
|
||||
}
|
||||
else if (geometry_set->has_pointcloud()) {
|
||||
const PointCloud *pointcloud = geometry_set->get_pointcloud();
|
||||
bbox = generate_bounding_box_mesh(
|
||||
pointcloud->bounds_min_max(), pointcloud->mat, pointcloud->totcol);
|
||||
}
|
||||
|
||||
*geometry_set = bke::GeometrySet::from_mesh(bbox);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Time (in frames or seconds) between two velocity samples. Automatically computed to
|
||||
* scale the velocity vectors at render time for generating proper motion blur data. */
|
||||
# ifdef WITH_ALEMBIC
|
||||
@@ -290,12 +252,6 @@ static Mesh *modify_mesh(ModifierData *md, const ModifierEvalContext *ctx, Mesh
|
||||
}
|
||||
}
|
||||
|
||||
/* Do not process data if using a render procedural, return a box instead for displaying in the
|
||||
* viewport. */
|
||||
if (BKE_cache_file_uses_render_procedural(cache_file, scene)) {
|
||||
return generate_bounding_box_mesh(org_mesh->bounds_min_max(), org_mesh->mat, org_mesh->totcol);
|
||||
}
|
||||
|
||||
/* If this invocation is for the ORCO mesh, and the mesh hasn't changed topology, we
|
||||
* must return the mesh as-is instead of deforming it. */
|
||||
if (can_use_mesh_for_orco_evaluation(mcmd, ctx, mesh, frame_offset, time_offset, &err_str)) {
|
||||
@@ -343,13 +299,11 @@ static Mesh *modify_mesh(ModifierData *md, const ModifierEvalContext *ctx, Mesh
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool depends_on_time(Scene *scene, ModifierData *md)
|
||||
static bool depends_on_time(Scene * /*scene*/, ModifierData *md)
|
||||
{
|
||||
#if defined(WITH_USD) || defined(WITH_ALEMBIC)
|
||||
MeshSeqCacheModifierData *mcmd = reinterpret_cast<MeshSeqCacheModifierData *>(md);
|
||||
/* Do not evaluate animations if using the render engine procedural. */
|
||||
return (mcmd->cache_file != nullptr) &&
|
||||
!BKE_cache_file_uses_render_procedural(mcmd->cache_file, scene);
|
||||
return (mcmd->cache_file != nullptr);
|
||||
#else
|
||||
UNUSED_VARS(scene, md);
|
||||
return false;
|
||||
@@ -436,22 +390,6 @@ static void time_panel_draw(const bContext * /*C*/, Panel *panel)
|
||||
uiTemplateCacheFileTimeSettings(layout, &fileptr);
|
||||
}
|
||||
|
||||
static void render_procedural_panel_draw(const bContext *C, Panel *panel)
|
||||
{
|
||||
uiLayout *layout = panel->layout;
|
||||
|
||||
PointerRNA ob_ptr;
|
||||
PointerRNA *ptr = modifier_panel_get_property_pointers(panel, &ob_ptr);
|
||||
|
||||
PointerRNA fileptr;
|
||||
if (!uiTemplateCacheFilePointer(ptr, "cache_file", &fileptr)) {
|
||||
return;
|
||||
}
|
||||
|
||||
layout->use_property_split_set(true);
|
||||
uiTemplateCacheFileProcedural(layout, C, &fileptr);
|
||||
}
|
||||
|
||||
static void override_layers_panel_draw(const bContext *C, Panel *panel)
|
||||
{
|
||||
uiLayout *layout = panel->layout;
|
||||
@@ -473,12 +411,6 @@ static void panel_register(ARegionType *region_type)
|
||||
PanelType *panel_type = modifier_panel_register(
|
||||
region_type, eModifierType_MeshSequenceCache, panel_draw);
|
||||
modifier_subpanel_register(region_type, "time", "Time", nullptr, time_panel_draw, panel_type);
|
||||
modifier_subpanel_register(region_type,
|
||||
"render_procedural",
|
||||
"Render Procedural",
|
||||
nullptr,
|
||||
render_procedural_panel_draw,
|
||||
panel_type);
|
||||
modifier_subpanel_register(
|
||||
region_type, "velocity", "Velocity", nullptr, velocity_panel_draw, panel_type);
|
||||
modifier_subpanel_register(region_type,
|
||||
|
||||
@@ -50,8 +50,7 @@ enum RenderEngineTypeFlag {
|
||||
RE_USE_GPU_CONTEXT = (1 << 7),
|
||||
RE_USE_CUSTOM_FREESTYLE = (1 << 8),
|
||||
RE_USE_NO_IMAGE_SAVE = (1 << 9),
|
||||
RE_USE_ALEMBIC_PROCEDURAL = (1 << 10),
|
||||
RE_USE_MATERIALX = (1 << 11),
|
||||
RE_USE_MATERIALX = (1 << 10),
|
||||
};
|
||||
|
||||
/** #RenderEngine.flag */
|
||||
@@ -275,12 +274,6 @@ void RE_engines_init(void);
|
||||
void RE_engines_exit(void);
|
||||
void RE_engines_register(RenderEngineType *render_type);
|
||||
|
||||
/**
|
||||
* Return true if the RenderEngineType has native support for direct loading of Alembic data. For
|
||||
* Cycles, this also checks that the experimental feature set is enabled.
|
||||
*/
|
||||
bool RE_engine_supports_alembic_procedural(const RenderEngineType *render_type, Scene *scene);
|
||||
|
||||
RenderEngineType *RE_engines_find(const char *idname);
|
||||
|
||||
const rcti *RE_engine_get_current_tiles(struct Render *re, int *r_total_tiles);
|
||||
|
||||
@@ -110,19 +110,6 @@ bool RE_engine_is_external(const Render *re)
|
||||
return (re->engine && re->engine->type && re->engine->type->render);
|
||||
}
|
||||
|
||||
bool RE_engine_supports_alembic_procedural(const RenderEngineType *render_type, Scene *scene)
|
||||
{
|
||||
if ((render_type->flag & RE_USE_ALEMBIC_PROCEDURAL) == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (BKE_scene_uses_cycles(scene) && !BKE_scene_uses_cycles_experimental_features(scene)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Create, Free */
|
||||
|
||||
RenderEngine *RE_engine_create(RenderEngineType *type)
|
||||
|
||||
Reference in New Issue
Block a user