Refactor: Tweak draw_pbvh.cc code organization
With the goal of removing the type and custom data domain from the attribute requests, tweak the order of the calls to ensure that each vertex buffer is allocated until after we know about the source mesh data format. After that, it makes more sense to have the loop over BVH nodes inside each extraction function. Pull Request: https://projects.blender.org/blender/blender/pulls/132467
This commit is contained in:
@@ -336,29 +336,6 @@ static GPUVertFormat attribute_format(const OrigMeshData &orig_mesh_data,
|
||||
return format;
|
||||
}
|
||||
|
||||
static GPUVertFormat format_for_request(const OrigMeshData &orig_mesh_data,
|
||||
const AttributeRequest &request)
|
||||
{
|
||||
if (const CustomRequest *request_type = std::get_if<CustomRequest>(&request)) {
|
||||
switch (*request_type) {
|
||||
case CustomRequest::Position:
|
||||
return position_format();
|
||||
case CustomRequest::Normal:
|
||||
return normal_format();
|
||||
case CustomRequest::Mask:
|
||||
return mask_format();
|
||||
case CustomRequest::FaceSet:
|
||||
return face_set_format();
|
||||
}
|
||||
}
|
||||
else {
|
||||
const GenericRequest &attr = std::get<GenericRequest>(request);
|
||||
return attribute_format(orig_mesh_data, attr.name, attr.type);
|
||||
}
|
||||
BLI_assert_unreachable();
|
||||
return {};
|
||||
}
|
||||
|
||||
static bool pbvh_attr_supported(const AttributeRequest &request)
|
||||
{
|
||||
if (std::holds_alternative<CustomRequest>(request)) {
|
||||
@@ -606,614 +583,579 @@ void DrawCacheImpl::free_nodes_with_changed_topology(const bke::pbvh::Tree &pbvh
|
||||
}
|
||||
}
|
||||
|
||||
static void fill_vbo_normal_mesh(const OffsetIndices<int> faces,
|
||||
const Span<int> corner_verts,
|
||||
const Span<bool> sharp_faces,
|
||||
const Span<float3> vert_normals,
|
||||
const Span<float3> face_normals,
|
||||
const Span<int> face_indices,
|
||||
gpu::VertBuf &vert_buf)
|
||||
BLI_NOINLINE static void ensure_vbos_allocated_mesh(const Object &object,
|
||||
const GPUVertFormat &format,
|
||||
const IndexMask &node_mask,
|
||||
const MutableSpan<gpu::VertBuf *> vbos)
|
||||
{
|
||||
short4 *data = vert_buf.data<short4>().data();
|
||||
|
||||
for (const int face : face_indices) {
|
||||
if (!sharp_faces.is_empty() && sharp_faces[face]) {
|
||||
const int face_size = faces[face].size();
|
||||
std::fill_n(data, face_size, normal_float_to_short(face_normals[face]));
|
||||
data += face_size;
|
||||
}
|
||||
else {
|
||||
for (const int vert : corner_verts.slice(faces[face])) {
|
||||
*data = normal_float_to_short(vert_normals[vert]);
|
||||
data++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void fill_vbo_mask_mesh(const OffsetIndices<int> faces,
|
||||
const Span<int> corner_verts,
|
||||
const Span<float> mask,
|
||||
const Span<int> face_indices,
|
||||
gpu::VertBuf &vbo)
|
||||
{
|
||||
float *data = vbo.data<float>().data();
|
||||
for (const int face : face_indices) {
|
||||
for (const int vert : corner_verts.slice(faces[face])) {
|
||||
*data = mask[vert];
|
||||
data++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void fill_vbo_face_set_mesh(const OffsetIndices<int> faces,
|
||||
const Span<int> face_sets,
|
||||
const int color_default,
|
||||
const int color_seed,
|
||||
const Span<int> face_indices,
|
||||
gpu::VertBuf &vert_buf)
|
||||
{
|
||||
uchar4 *data = vert_buf.data<uchar4>().data();
|
||||
for (const int face : face_indices) {
|
||||
const int id = face_sets[face];
|
||||
|
||||
uchar4 fset_color(UCHAR_MAX);
|
||||
if (id != color_default) {
|
||||
BKE_paint_face_set_overlay_color_get(id, color_seed, fset_color);
|
||||
}
|
||||
else {
|
||||
/* Skip for the default color face set to render it white. */
|
||||
fset_color[0] = fset_color[1] = fset_color[2] = UCHAR_MAX;
|
||||
}
|
||||
|
||||
const int face_size = faces[face].size();
|
||||
std::fill_n(data, face_size, fset_color);
|
||||
data += face_size;
|
||||
}
|
||||
}
|
||||
|
||||
static void fill_vbo_attribute_mesh(const OffsetIndices<int> faces,
|
||||
const Span<int> corner_verts,
|
||||
const GSpan attribute,
|
||||
const bke::AttrDomain domain,
|
||||
const Span<int> face_indices,
|
||||
gpu::VertBuf &vert_buf)
|
||||
{
|
||||
bke::attribute_math::convert_to_static_type(attribute.type(), [&](auto dummy) {
|
||||
using T = decltype(dummy);
|
||||
if constexpr (!std::is_void_v<typename AttributeConverter<T>::VBOType>) {
|
||||
const Span<T> src = attribute.typed<T>();
|
||||
switch (domain) {
|
||||
case bke::AttrDomain::Point:
|
||||
extract_data_vert_mesh<T>(faces, corner_verts, src, face_indices, vert_buf);
|
||||
break;
|
||||
case bke::AttrDomain::Face:
|
||||
extract_data_face_mesh<T>(faces, src, face_indices, vert_buf);
|
||||
break;
|
||||
case bke::AttrDomain::Corner:
|
||||
extract_data_corner_mesh<T>(faces, src, face_indices, vert_buf);
|
||||
break;
|
||||
default:
|
||||
BLI_assert_unreachable();
|
||||
}
|
||||
const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
|
||||
const Span<bke::pbvh::MeshNode> nodes = pbvh.nodes<bke::pbvh::MeshNode>();
|
||||
node_mask.foreach_index(GrainSize(64), [&](const int i) {
|
||||
if (!vbos[i]) {
|
||||
vbos[i] = GPU_vertbuf_create_with_format(format);
|
||||
}
|
||||
GPU_vertbuf_data_alloc(*vbos[i], nodes[i].corners_num());
|
||||
});
|
||||
}
|
||||
|
||||
static void fill_vbo_position_grids(const CCGKey &key,
|
||||
const Span<float3> positions,
|
||||
const bool use_flat_layout,
|
||||
const Span<int> grids,
|
||||
gpu::VertBuf &vert_buf)
|
||||
BLI_NOINLINE static void ensure_vbos_allocated_grids(const Object &object,
|
||||
const GPUVertFormat &format,
|
||||
const BitSpan use_flat_layout,
|
||||
const IndexMask &node_mask,
|
||||
const MutableSpan<gpu::VertBuf *> vbos)
|
||||
{
|
||||
float3 *data = vert_buf.data<float3>().data();
|
||||
if (use_flat_layout) {
|
||||
const int grid_size_1 = key.grid_size - 1;
|
||||
for (const int grid : grids) {
|
||||
const Span<float3> grid_positions = positions.slice(bke::ccg::grid_range(key, grid));
|
||||
for (int y = 0; y < grid_size_1; y++) {
|
||||
for (int x = 0; x < grid_size_1; x++) {
|
||||
*data = grid_positions[CCG_grid_xy_to_index(key.grid_size, x, y)];
|
||||
data++;
|
||||
*data = grid_positions[CCG_grid_xy_to_index(key.grid_size, x + 1, y)];
|
||||
data++;
|
||||
*data = grid_positions[CCG_grid_xy_to_index(key.grid_size, x + 1, y + 1)];
|
||||
data++;
|
||||
*data = grid_positions[CCG_grid_xy_to_index(key.grid_size, x, y + 1)];
|
||||
data++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (const int grid : grids) {
|
||||
const Span<float3> grid_positions = positions.slice(bke::ccg::grid_range(key, grid));
|
||||
std::copy_n(grid_positions.data(), grid_positions.size(), data);
|
||||
data += grid_positions.size();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void fill_vbo_normal_grids(const CCGKey &key,
|
||||
const Span<float3> positions,
|
||||
const Span<float3> normals,
|
||||
const Span<int> grid_to_face_map,
|
||||
const Span<bool> sharp_faces,
|
||||
const bool use_flat_layout,
|
||||
const Span<int> grids,
|
||||
gpu::VertBuf &vert_buf)
|
||||
{
|
||||
short4 *data = vert_buf.data<short4>().data();
|
||||
|
||||
if (use_flat_layout) {
|
||||
const int grid_size_1 = key.grid_size - 1;
|
||||
for (const int grid : grids) {
|
||||
const Span<float3> grid_positions = positions.slice(bke::ccg::grid_range(key, grid));
|
||||
const Span<float3> grid_normals = normals.slice(bke::ccg::grid_range(key, grid));
|
||||
if (!sharp_faces.is_empty() && sharp_faces[grid_to_face_map[grid]]) {
|
||||
for (int y = 0; y < grid_size_1; y++) {
|
||||
for (int x = 0; x < grid_size_1; x++) {
|
||||
float3 no;
|
||||
normal_quad_v3(no,
|
||||
grid_positions[CCG_grid_xy_to_index(key.grid_size, x, y + 1)],
|
||||
grid_positions[CCG_grid_xy_to_index(key.grid_size, x + 1, y + 1)],
|
||||
grid_positions[CCG_grid_xy_to_index(key.grid_size, x + 1, y)],
|
||||
grid_positions[CCG_grid_xy_to_index(key.grid_size, x, y)]);
|
||||
std::fill_n(data, 4, normal_float_to_short(no));
|
||||
data += 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (int y = 0; y < grid_size_1; y++) {
|
||||
for (int x = 0; x < grid_size_1; x++) {
|
||||
std::fill_n(
|
||||
data,
|
||||
4,
|
||||
normal_float_to_short(grid_normals[CCG_grid_xy_to_index(key.grid_size, x, y)]));
|
||||
data += 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* The non-flat VBO layout does not support sharp faces. */
|
||||
for (const int grid : grids) {
|
||||
for (const float3 &normal : normals.slice(bke::ccg::grid_range(key, grid))) {
|
||||
*data = normal_float_to_short(normal);
|
||||
data++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void fill_vbo_mask_grids(const CCGKey &key,
|
||||
const Span<float> masks,
|
||||
const bool use_flat_layout,
|
||||
const Span<int> grids,
|
||||
gpu::VertBuf &vert_buf)
|
||||
{
|
||||
float *data = vert_buf.data<float>().data();
|
||||
if (use_flat_layout) {
|
||||
const int grid_size_1 = key.grid_size - 1;
|
||||
for (const int grid : grids) {
|
||||
const Span<float> grid_masks = masks.slice(bke::ccg::grid_range(key, grid));
|
||||
for (int y = 0; y < grid_size_1; y++) {
|
||||
for (int x = 0; x < grid_size_1; x++) {
|
||||
*data = grid_masks[CCG_grid_xy_to_index(key.grid_size, x, y)];
|
||||
data++;
|
||||
*data = grid_masks[CCG_grid_xy_to_index(key.grid_size, x + 1, y)];
|
||||
data++;
|
||||
*data = grid_masks[CCG_grid_xy_to_index(key.grid_size, x + 1, y + 1)];
|
||||
data++;
|
||||
*data = grid_masks[CCG_grid_xy_to_index(key.grid_size, x, y + 1)];
|
||||
data++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (const int grid : grids) {
|
||||
const Span<float> grid_masks = masks.slice(bke::ccg::grid_range(key, grid));
|
||||
std::copy_n(grid_masks.data(), grid_masks.size(), data);
|
||||
data += grid_masks.size();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void fill_vbo_face_set_grids(const CCGKey &key,
|
||||
const Span<int> grid_to_face_map,
|
||||
const Span<int> face_sets,
|
||||
const int color_default,
|
||||
const int color_seed,
|
||||
const bool use_flat_layout,
|
||||
const Span<int> grid_indices,
|
||||
gpu::VertBuf &vert_buf)
|
||||
{
|
||||
const int verts_per_grid = use_flat_layout ? square_i(key.grid_size - 1) * 4 :
|
||||
square_i(key.grid_size);
|
||||
uchar4 *data = vert_buf.data<uchar4>().data();
|
||||
for (const int i : grid_indices.index_range()) {
|
||||
uchar4 color{UCHAR_MAX};
|
||||
const int fset = face_sets[grid_to_face_map[grid_indices[i]]];
|
||||
if (fset != color_default) {
|
||||
BKE_paint_face_set_overlay_color_get(fset, color_seed, color);
|
||||
}
|
||||
|
||||
std::fill_n(data, verts_per_grid, color);
|
||||
data += verts_per_grid;
|
||||
}
|
||||
}
|
||||
|
||||
static void fill_vbos_grids(const Object &object,
|
||||
const OrigMeshData &orig_mesh_data,
|
||||
const BitSpan use_flat_layout,
|
||||
const IndexMask &node_mask,
|
||||
const AttributeRequest &request,
|
||||
const MutableSpan<gpu::VertBuf *> vbos)
|
||||
{
|
||||
const SculptSession &ss = *object.sculpt;
|
||||
const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
|
||||
const Span<bke::pbvh::GridsNode> nodes = pbvh.nodes<bke::pbvh::GridsNode>();
|
||||
const SubdivCCG &subdiv_ccg = *ss.subdiv_ccg;
|
||||
const CCGKey key = BKE_subdiv_ccg_key_top_level(subdiv_ccg);
|
||||
|
||||
if (const CustomRequest *request_type = std::get_if<CustomRequest>(&request)) {
|
||||
switch (*request_type) {
|
||||
case CustomRequest::Position: {
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
fill_vbo_position_grids(
|
||||
key, subdiv_ccg.positions, use_flat_layout[i], nodes[i].grids(), *vbos[i]);
|
||||
});
|
||||
break;
|
||||
}
|
||||
case CustomRequest::Normal: {
|
||||
const Span<int> grid_to_face_map = subdiv_ccg.grid_to_face_map;
|
||||
const bke::AttributeAccessor attributes = orig_mesh_data.attributes;
|
||||
const VArraySpan sharp_faces = *attributes.lookup<bool>("sharp_face",
|
||||
bke::AttrDomain::Face);
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
fill_vbo_normal_grids(key,
|
||||
subdiv_ccg.positions,
|
||||
subdiv_ccg.normals,
|
||||
grid_to_face_map,
|
||||
sharp_faces,
|
||||
use_flat_layout[i],
|
||||
nodes[i].grids(),
|
||||
*vbos[i]);
|
||||
});
|
||||
|
||||
break;
|
||||
}
|
||||
case CustomRequest::Mask: {
|
||||
const Span<float> masks = subdiv_ccg.masks;
|
||||
if (!masks.is_empty()) {
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
fill_vbo_mask_grids(key, masks, use_flat_layout[i], nodes[i].grids(), *vbos[i]);
|
||||
});
|
||||
}
|
||||
else {
|
||||
node_mask.foreach_index(GrainSize(64),
|
||||
[&](const int i) { vbos[i]->data<float>().fill(0.0f); });
|
||||
}
|
||||
break;
|
||||
}
|
||||
case CustomRequest::FaceSet: {
|
||||
const int face_set_default = orig_mesh_data.face_set_default;
|
||||
const int face_set_seed = orig_mesh_data.face_set_seed;
|
||||
const Span<int> grid_to_face_map = subdiv_ccg.grid_to_face_map;
|
||||
const bke::AttributeAccessor attributes = orig_mesh_data.attributes;
|
||||
if (const VArray<int> face_sets = *attributes.lookup<int>(".sculpt_face_set",
|
||||
bke::AttrDomain::Face))
|
||||
{
|
||||
const VArraySpan<int> face_sets_span(face_sets);
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
fill_vbo_face_set_grids(key,
|
||||
grid_to_face_map,
|
||||
face_sets_span,
|
||||
face_set_default,
|
||||
face_set_seed,
|
||||
use_flat_layout[i],
|
||||
nodes[i].grids(),
|
||||
*vbos[i]);
|
||||
});
|
||||
}
|
||||
else {
|
||||
node_mask.foreach_index(
|
||||
GrainSize(1), [&](const int i) { vbos[i]->data<uchar4>().fill(uchar4{UCHAR_MAX}); });
|
||||
}
|
||||
break;
|
||||
}
|
||||
const SubdivCCG &subdiv_ccg = *object.sculpt->subdiv_ccg;
|
||||
node_mask.foreach_index(GrainSize(64), [&](const int i) {
|
||||
if (!vbos[i]) {
|
||||
vbos[i] = GPU_vertbuf_create_with_format(format);
|
||||
}
|
||||
}
|
||||
else {
|
||||
const eCustomDataType type = std::get<GenericRequest>(request).type;
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
bke::attribute_math::convert_to_static_type(type, [&](auto dummy) {
|
||||
using T = decltype(dummy);
|
||||
using Converter = AttributeConverter<T>;
|
||||
using VBOType = typename Converter::VBOType;
|
||||
if constexpr (!std::is_void_v<VBOType>) {
|
||||
vbos[i]->data<VBOType>().fill(Converter::convert(fallback_value_for_fill<T>()));
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
const int verts_per_grid = use_flat_layout[i] ? square_i(subdiv_ccg.grid_area - 1) * 4 :
|
||||
square_i(subdiv_ccg.grid_area);
|
||||
const int verts_num = nodes[i].grids().size() * verts_per_grid;
|
||||
GPU_vertbuf_data_alloc(*vbos[i], verts_num);
|
||||
});
|
||||
}
|
||||
|
||||
static void fill_vbos_mesh(const Object &object,
|
||||
const OrigMeshData &orig_mesh_data,
|
||||
const IndexMask &node_mask,
|
||||
const AttributeRequest &request,
|
||||
const MutableSpan<gpu::VertBuf *> vbos)
|
||||
BLI_NOINLINE static void ensure_vbos_allocated_bmesh(const Object &object,
|
||||
const GPUVertFormat &format,
|
||||
const IndexMask &node_mask,
|
||||
const MutableSpan<gpu::VertBuf *> vbos)
|
||||
{
|
||||
const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
|
||||
const Span<bke::pbvh::BMeshNode> nodes = pbvh.nodes<bke::pbvh::BMeshNode>();
|
||||
node_mask.foreach_index(GrainSize(64), [&](const int i) {
|
||||
if (!vbos[i]) {
|
||||
vbos[i] = GPU_vertbuf_create_with_format(format);
|
||||
}
|
||||
const Set<BMFace *, 0> &faces = BKE_pbvh_bmesh_node_faces(
|
||||
&const_cast<bke::pbvh::BMeshNode &>(nodes[i]));
|
||||
const int verts_num = count_visible_tris_bmesh(faces) * 3;
|
||||
GPU_vertbuf_data_alloc(*vbos[i], verts_num);
|
||||
});
|
||||
}
|
||||
|
||||
static void update_positions_mesh(const Object &object,
|
||||
const IndexMask &node_mask,
|
||||
MutableSpan<gpu::VertBuf *> vbos)
|
||||
{
|
||||
const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
|
||||
const Span<bke::pbvh::MeshNode> nodes = pbvh.nodes<bke::pbvh::MeshNode>();
|
||||
const Mesh &mesh = *static_cast<const Mesh *>(object.data);
|
||||
const OffsetIndices<int> faces = mesh.faces();
|
||||
const Span<int> corner_verts = mesh.corner_verts();
|
||||
const Span<float3> vert_positions = bke::pbvh::vert_positions_eval_from_eval(object);
|
||||
ensure_vbos_allocated_mesh(object, position_format(), node_mask, vbos);
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
extract_data_vert_mesh<float3>(
|
||||
faces, corner_verts, vert_positions, nodes[i].faces(), *vbos[i]);
|
||||
});
|
||||
}
|
||||
|
||||
if (const CustomRequest *request_type = std::get_if<CustomRequest>(&request)) {
|
||||
switch (*request_type) {
|
||||
case CustomRequest::Position: {
|
||||
const Span<float3> vert_positions = bke::pbvh::vert_positions_eval_from_eval(object);
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
extract_data_vert_mesh<float3>(
|
||||
faces, corner_verts, vert_positions, nodes[i].faces(), *vbos[i]);
|
||||
});
|
||||
break;
|
||||
static void update_normals_mesh(const Object &object,
|
||||
const IndexMask &node_mask,
|
||||
MutableSpan<gpu::VertBuf *> vbos)
|
||||
{
|
||||
const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
|
||||
const Span<bke::pbvh::MeshNode> nodes = pbvh.nodes<bke::pbvh::MeshNode>();
|
||||
const Mesh &mesh = *static_cast<const Mesh *>(object.data);
|
||||
const OffsetIndices<int> faces = mesh.faces();
|
||||
const Span<int> corner_verts = mesh.corner_verts();
|
||||
const Span<float3> vert_normals = bke::pbvh::vert_normals_eval_from_eval(object);
|
||||
const Span<float3> face_normals = bke::pbvh::face_normals_eval_from_eval(object);
|
||||
const bke::AttributeAccessor attributes = mesh.attributes();
|
||||
const VArraySpan sharp_faces = *attributes.lookup<bool>("sharp_face", bke::AttrDomain::Face);
|
||||
ensure_vbos_allocated_mesh(object, normal_format(), node_mask, vbos);
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
short4 *data = vbos[i]->data<short4>().data();
|
||||
|
||||
for (const int face : nodes[i].faces()) {
|
||||
if (!sharp_faces.is_empty() && sharp_faces[face]) {
|
||||
const int face_size = faces[face].size();
|
||||
std::fill_n(data, face_size, normal_float_to_short(face_normals[face]));
|
||||
data += face_size;
|
||||
}
|
||||
case CustomRequest::Normal: {
|
||||
const Span<float3> vert_normals = bke::pbvh::vert_normals_eval_from_eval(object);
|
||||
const Span<float3> face_normals = bke::pbvh::face_normals_eval_from_eval(object);
|
||||
const bke::AttributeAccessor attributes = mesh.attributes();
|
||||
const VArraySpan sharp_faces = *attributes.lookup<bool>("sharp_face",
|
||||
bke::AttrDomain::Face);
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
fill_vbo_normal_mesh(faces,
|
||||
corner_verts,
|
||||
sharp_faces,
|
||||
vert_normals,
|
||||
face_normals,
|
||||
nodes[i].faces(),
|
||||
*vbos[i]);
|
||||
});
|
||||
break;
|
||||
}
|
||||
case CustomRequest::Mask: {
|
||||
const VArraySpan mask = *orig_mesh_data.attributes.lookup<float>(".sculpt_mask",
|
||||
bke::AttrDomain::Point);
|
||||
if (!mask.is_empty()) {
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
fill_vbo_mask_mesh(faces, corner_verts, mask, nodes[i].faces(), *vbos[i]);
|
||||
});
|
||||
else {
|
||||
for (const int vert : corner_verts.slice(faces[face])) {
|
||||
*data = normal_float_to_short(vert_normals[vert]);
|
||||
data++;
|
||||
}
|
||||
else {
|
||||
node_mask.foreach_index(GrainSize(64),
|
||||
[&](const int i) { vbos[i]->data<float>().fill(0.0f); });
|
||||
}
|
||||
break;
|
||||
}
|
||||
case CustomRequest::FaceSet: {
|
||||
const int face_set_default = orig_mesh_data.face_set_default;
|
||||
const int face_set_seed = orig_mesh_data.face_set_seed;
|
||||
const VArraySpan face_sets = *orig_mesh_data.attributes.lookup<int>(".sculpt_face_set",
|
||||
bke::AttrDomain::Face);
|
||||
if (!face_sets.is_empty()) {
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
fill_vbo_face_set_mesh(
|
||||
faces, face_sets, face_set_default, face_set_seed, nodes[i].faces(), *vbos[i]);
|
||||
});
|
||||
}
|
||||
else {
|
||||
node_mask.foreach_index(GrainSize(64),
|
||||
[&](const int i) { vbos[i]->data<uchar4>().fill(uchar4(255)); });
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
const GenericRequest &attr = std::get<GenericRequest>(request);
|
||||
const StringRef name = attr.name;
|
||||
const bke::AttrDomain domain = attr.domain;
|
||||
const eCustomDataType data_type = attr.type;
|
||||
const bke::AttributeAccessor attributes = orig_mesh_data.attributes;
|
||||
const GVArraySpan attribute = *attributes.lookup_or_default(name, domain, data_type);
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
fill_vbo_attribute_mesh(faces, corner_verts, attribute, domain, nodes[i].faces(), *vbos[i]);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
static void fill_vbo_position_bmesh(const Set<BMFace *, 0> &faces, gpu::VertBuf &vbo)
|
||||
{
|
||||
float3 *data = vbo.data<float3>().data();
|
||||
for (const BMFace *face : faces) {
|
||||
if (BM_elem_flag_test(face, BM_ELEM_HIDDEN)) {
|
||||
continue;
|
||||
}
|
||||
const BMLoop *l = face->l_first;
|
||||
*data = l->prev->v->co;
|
||||
data++;
|
||||
*data = l->v->co;
|
||||
data++;
|
||||
*data = l->next->v->co;
|
||||
data++;
|
||||
}
|
||||
}
|
||||
|
||||
static void fill_vbo_normal_bmesh(const Set<BMFace *, 0> &faces, gpu::VertBuf &vbo)
|
||||
{
|
||||
short4 *data = vbo.data<short4>().data();
|
||||
for (const BMFace *face : faces) {
|
||||
if (BM_elem_flag_test(face, BM_ELEM_HIDDEN)) {
|
||||
continue;
|
||||
}
|
||||
if (BM_elem_flag_test(face, BM_ELEM_SMOOTH)) {
|
||||
const BMLoop *l = face->l_first;
|
||||
*data = normal_float_to_short(l->prev->v->no);
|
||||
data++;
|
||||
*data = normal_float_to_short(l->v->no);
|
||||
data++;
|
||||
*data = normal_float_to_short(l->next->v->no);
|
||||
data++;
|
||||
}
|
||||
else {
|
||||
std::fill_n(data, 3, normal_float_to_short(face->no));
|
||||
data += 3;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void fill_vbo_mask_bmesh(const Set<BMFace *, 0> &faces,
|
||||
const int cd_offset,
|
||||
gpu::VertBuf &vbo)
|
||||
{
|
||||
float *data = vbo.data<float>().data();
|
||||
for (const BMFace *face : faces) {
|
||||
if (BM_elem_flag_test(face, BM_ELEM_HIDDEN)) {
|
||||
continue;
|
||||
}
|
||||
const BMLoop *l = face->l_first;
|
||||
*data = bmesh_cd_vert_get<float>(*l->prev->v, cd_offset);
|
||||
data++;
|
||||
*data = bmesh_cd_vert_get<float>(*l->v, cd_offset);
|
||||
data++;
|
||||
*data = bmesh_cd_vert_get<float>(*l->next->v, cd_offset);
|
||||
data++;
|
||||
}
|
||||
}
|
||||
|
||||
static void fill_vbo_face_set_bmesh(const Set<BMFace *, 0> &faces,
|
||||
const int color_default,
|
||||
const int color_seed,
|
||||
const int offset,
|
||||
gpu::VertBuf &vbo)
|
||||
{
|
||||
uchar4 *data = vbo.data<uchar4>().data();
|
||||
for (const BMFace *face : faces) {
|
||||
if (BM_elem_flag_test(face, BM_ELEM_HIDDEN)) {
|
||||
continue;
|
||||
}
|
||||
uchar4 color{UCHAR_MAX};
|
||||
const int fset = bmesh_cd_face_get<int>(*face, offset);
|
||||
if (fset != color_default) {
|
||||
BKE_paint_face_set_overlay_color_get(fset, color_seed, color);
|
||||
}
|
||||
std::fill_n(data, 3, color);
|
||||
data += 3;
|
||||
}
|
||||
}
|
||||
|
||||
static void fill_vbo_attribute_bmesh(const Set<BMFace *, 0> &faces,
|
||||
const eCustomDataType data_type,
|
||||
const bke::AttrDomain domain,
|
||||
const int offset,
|
||||
gpu::VertBuf &vbo)
|
||||
{
|
||||
bke::attribute_math::convert_to_static_type(data_type, [&](auto dummy) {
|
||||
using T = decltype(dummy);
|
||||
if constexpr (!std::is_void_v<typename AttributeConverter<T>::VBOType>) {
|
||||
switch (domain) {
|
||||
case bke::AttrDomain::Point:
|
||||
extract_data_vert_bmesh<T>(faces, offset, vbo);
|
||||
break;
|
||||
case bke::AttrDomain::Face:
|
||||
extract_data_face_bmesh<T>(faces, offset, vbo);
|
||||
break;
|
||||
case bke::AttrDomain::Corner:
|
||||
extract_data_corner_bmesh<T>(faces, offset, vbo);
|
||||
break;
|
||||
default:
|
||||
BLI_assert_unreachable();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
static void fill_vbos_bmesh(const Object &object,
|
||||
const OrigMeshData &orig_mesh_data,
|
||||
const IndexMask &node_mask,
|
||||
const AttributeRequest &request,
|
||||
const MutableSpan<gpu::VertBuf *> vbos)
|
||||
BLI_NOINLINE static void update_masks_mesh(const Object &object,
|
||||
const OrigMeshData &orig_mesh_data,
|
||||
const IndexMask &node_mask,
|
||||
MutableSpan<gpu::VertBuf *> vbos)
|
||||
{
|
||||
const SculptSession &ss = *object.sculpt;
|
||||
const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
|
||||
const Span<bke::pbvh::BMeshNode> nodes = pbvh.nodes<bke::pbvh::BMeshNode>();
|
||||
const BMesh &bm = *ss.bm;
|
||||
if (const CustomRequest *request_type = std::get_if<CustomRequest>(&request)) {
|
||||
switch (*request_type) {
|
||||
case CustomRequest::Position: {
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
fill_vbo_position_bmesh(
|
||||
BKE_pbvh_bmesh_node_faces(&const_cast<bke::pbvh::BMeshNode &>(nodes[i])), *vbos[i]);
|
||||
});
|
||||
break;
|
||||
}
|
||||
case CustomRequest::Normal: {
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
fill_vbo_normal_bmesh(
|
||||
BKE_pbvh_bmesh_node_faces(&const_cast<bke::pbvh::BMeshNode &>(nodes[i])), *vbos[i]);
|
||||
});
|
||||
break;
|
||||
}
|
||||
case CustomRequest::Mask: {
|
||||
const int cd_offset = CustomData_get_offset_named(
|
||||
&bm.vdata, CD_PROP_FLOAT, ".sculpt_mask");
|
||||
if (cd_offset != -1) {
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
fill_vbo_mask_bmesh(
|
||||
BKE_pbvh_bmesh_node_faces(&const_cast<bke::pbvh::BMeshNode &>(nodes[i])),
|
||||
cd_offset,
|
||||
*vbos[i]);
|
||||
});
|
||||
}
|
||||
else {
|
||||
node_mask.foreach_index(GrainSize(64),
|
||||
[&](const int i) { vbos[i]->data<float>().fill(0.0f); });
|
||||
}
|
||||
break;
|
||||
}
|
||||
case CustomRequest::FaceSet: {
|
||||
const int face_set_default = orig_mesh_data.face_set_default;
|
||||
const int face_set_seed = orig_mesh_data.face_set_seed;
|
||||
const int cd_offset = CustomData_get_offset_named(
|
||||
&bm.pdata, CD_PROP_INT32, ".sculpt_face_set");
|
||||
if (cd_offset != -1) {
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
fill_vbo_face_set_bmesh(
|
||||
BKE_pbvh_bmesh_node_faces(&const_cast<bke::pbvh::BMeshNode &>(nodes[i])),
|
||||
face_set_default,
|
||||
face_set_seed,
|
||||
cd_offset,
|
||||
*vbos[i]);
|
||||
});
|
||||
}
|
||||
else {
|
||||
node_mask.foreach_index(GrainSize(64),
|
||||
[&](const int i) { vbos[i]->data<uchar4>().fill(uchar4(255)); });
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
const GenericRequest &attr = std::get<GenericRequest>(request);
|
||||
const bke::AttrDomain domain = attr.domain;
|
||||
const eCustomDataType data_type = attr.type;
|
||||
const CustomData &custom_data = *get_cdata(bm, domain);
|
||||
const int offset = CustomData_get_offset_named(&custom_data, data_type, attr.name);
|
||||
const Span<bke::pbvh::MeshNode> nodes = pbvh.nodes<bke::pbvh::MeshNode>();
|
||||
const Mesh &mesh = *static_cast<const Mesh *>(object.data);
|
||||
const OffsetIndices<int> faces = mesh.faces();
|
||||
const Span<int> corner_verts = mesh.corner_verts();
|
||||
const VArraySpan mask = *orig_mesh_data.attributes.lookup<float>(".sculpt_mask",
|
||||
bke::AttrDomain::Point);
|
||||
ensure_vbos_allocated_mesh(object, mask_format(), node_mask, vbos);
|
||||
if (!mask.is_empty()) {
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
fill_vbo_attribute_bmesh(
|
||||
BKE_pbvh_bmesh_node_faces(&const_cast<bke::pbvh::BMeshNode &>(nodes[i])),
|
||||
data_type,
|
||||
domain,
|
||||
offset,
|
||||
*vbos[i]);
|
||||
float *data = vbos[i]->data<float>().data();
|
||||
for (const int face : nodes[i].faces()) {
|
||||
for (const int vert : corner_verts.slice(faces[face])) {
|
||||
*data = mask[vert];
|
||||
data++;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
node_mask.foreach_index(GrainSize(64),
|
||||
[&](const int i) { vbos[i]->data<float>().fill(0.0f); });
|
||||
}
|
||||
}
|
||||
|
||||
BLI_NOINLINE static void update_face_sets_mesh(const Object &object,
|
||||
const OrigMeshData &orig_mesh_data,
|
||||
const IndexMask &node_mask,
|
||||
MutableSpan<gpu::VertBuf *> vbos)
|
||||
{
|
||||
const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
|
||||
const Span<bke::pbvh::MeshNode> nodes = pbvh.nodes<bke::pbvh::MeshNode>();
|
||||
const Mesh &mesh = *static_cast<const Mesh *>(object.data);
|
||||
const OffsetIndices<int> faces = mesh.faces();
|
||||
const int color_default = orig_mesh_data.face_set_default;
|
||||
const int color_seed = orig_mesh_data.face_set_seed;
|
||||
const VArraySpan face_sets = *orig_mesh_data.attributes.lookup<int>(".sculpt_face_set",
|
||||
bke::AttrDomain::Face);
|
||||
ensure_vbos_allocated_mesh(object, face_set_format(), node_mask, vbos);
|
||||
if (!face_sets.is_empty()) {
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
uchar4 *data = vbos[i]->data<uchar4>().data();
|
||||
for (const int face : nodes[i].faces()) {
|
||||
const int id = face_sets[face];
|
||||
|
||||
uchar4 fset_color(UCHAR_MAX);
|
||||
if (id != color_default) {
|
||||
BKE_paint_face_set_overlay_color_get(id, color_seed, fset_color);
|
||||
}
|
||||
else {
|
||||
/* Skip for the default color face set to render it white. */
|
||||
fset_color[0] = fset_color[1] = fset_color[2] = UCHAR_MAX;
|
||||
}
|
||||
|
||||
const int face_size = faces[face].size();
|
||||
std::fill_n(data, face_size, fset_color);
|
||||
data += face_size;
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
node_mask.foreach_index(GrainSize(64),
|
||||
[&](const int i) { vbos[i]->data<uchar4>().fill(uchar4(255)); });
|
||||
}
|
||||
}
|
||||
|
||||
BLI_NOINLINE static void update_generic_attribute_mesh(const Object &object,
|
||||
const OrigMeshData &orig_mesh_data,
|
||||
const IndexMask &node_mask,
|
||||
const GenericRequest &attr,
|
||||
MutableSpan<gpu::VertBuf *> vbos)
|
||||
{
|
||||
const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
|
||||
const Span<bke::pbvh::MeshNode> nodes = pbvh.nodes<bke::pbvh::MeshNode>();
|
||||
const Mesh &mesh = *static_cast<const Mesh *>(object.data);
|
||||
const OffsetIndices<int> faces = mesh.faces();
|
||||
const Span<int> corner_verts = mesh.corner_verts();
|
||||
const StringRefNull name = attr.name;
|
||||
const bke::AttrDomain domain = attr.domain;
|
||||
const eCustomDataType data_type = attr.type;
|
||||
const bke::AttributeAccessor attributes = orig_mesh_data.attributes;
|
||||
const GVArraySpan attribute = *attributes.lookup_or_default(name, domain, data_type);
|
||||
ensure_vbos_allocated_mesh(
|
||||
object, attribute_format(orig_mesh_data, name, data_type), node_mask, vbos);
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
bke::attribute_math::convert_to_static_type(attribute.type(), [&](auto dummy) {
|
||||
using T = decltype(dummy);
|
||||
if constexpr (!std::is_void_v<typename AttributeConverter<T>::VBOType>) {
|
||||
const Span<T> src = attribute.typed<T>();
|
||||
switch (domain) {
|
||||
case bke::AttrDomain::Point:
|
||||
extract_data_vert_mesh<T>(faces, corner_verts, src, nodes[i].faces(), *vbos[i]);
|
||||
break;
|
||||
case bke::AttrDomain::Face:
|
||||
extract_data_face_mesh<T>(faces, src, nodes[i].faces(), *vbos[i]);
|
||||
break;
|
||||
case bke::AttrDomain::Corner:
|
||||
extract_data_corner_mesh<T>(faces, src, nodes[i].faces(), *vbos[i]);
|
||||
break;
|
||||
default:
|
||||
BLI_assert_unreachable();
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
BLI_NOINLINE static void fill_positions_grids(const Object &object,
|
||||
const BitSpan use_flat_layout,
|
||||
const IndexMask &node_mask,
|
||||
const MutableSpan<gpu::VertBuf *> vbos)
|
||||
{
|
||||
const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
|
||||
const Span<bke::pbvh::GridsNode> nodes = pbvh.nodes<bke::pbvh::GridsNode>();
|
||||
const SubdivCCG &subdiv_ccg = *object.sculpt->subdiv_ccg;
|
||||
const Span<float3> positions = subdiv_ccg.positions;
|
||||
const CCGKey key = BKE_subdiv_ccg_key_top_level(subdiv_ccg);
|
||||
ensure_vbos_allocated_grids(object, position_format(), use_flat_layout, node_mask, vbos);
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
float3 *data = vbos[i]->data<float3>().data();
|
||||
if (use_flat_layout[i]) {
|
||||
const int grid_size_1 = key.grid_size - 1;
|
||||
for (const int grid : nodes[i].grids()) {
|
||||
const Span<float3> grid_positions = positions.slice(bke::ccg::grid_range(key, grid));
|
||||
for (int y = 0; y < grid_size_1; y++) {
|
||||
for (int x = 0; x < grid_size_1; x++) {
|
||||
*data = grid_positions[CCG_grid_xy_to_index(key.grid_size, x, y)];
|
||||
data++;
|
||||
*data = grid_positions[CCG_grid_xy_to_index(key.grid_size, x + 1, y)];
|
||||
data++;
|
||||
*data = grid_positions[CCG_grid_xy_to_index(key.grid_size, x + 1, y + 1)];
|
||||
data++;
|
||||
*data = grid_positions[CCG_grid_xy_to_index(key.grid_size, x, y + 1)];
|
||||
data++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (const int grid : nodes[i].grids()) {
|
||||
const Span<float3> grid_positions = positions.slice(bke::ccg::grid_range(key, grid));
|
||||
std::copy_n(grid_positions.data(), grid_positions.size(), data);
|
||||
data += grid_positions.size();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
BLI_NOINLINE static void fill_normals_grids(const Object &object,
|
||||
const OrigMeshData &orig_mesh_data,
|
||||
const BitSpan use_flat_layout,
|
||||
const IndexMask &node_mask,
|
||||
const MutableSpan<gpu::VertBuf *> vbos)
|
||||
{
|
||||
const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
|
||||
const Span<bke::pbvh::GridsNode> nodes = pbvh.nodes<bke::pbvh::GridsNode>();
|
||||
const SubdivCCG &subdiv_ccg = *object.sculpt->subdiv_ccg;
|
||||
const Span<float3> positions = subdiv_ccg.positions;
|
||||
const Span<float3> normals = subdiv_ccg.normals;
|
||||
const CCGKey key = BKE_subdiv_ccg_key_top_level(subdiv_ccg);
|
||||
const Span<int> grid_to_face_map = subdiv_ccg.grid_to_face_map;
|
||||
const bke::AttributeAccessor attributes = orig_mesh_data.attributes;
|
||||
const VArraySpan sharp_faces = *attributes.lookup<bool>("sharp_face", bke::AttrDomain::Face);
|
||||
ensure_vbos_allocated_grids(object, normal_format(), use_flat_layout, node_mask, vbos);
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
short4 *data = vbos[i]->data<short4>().data();
|
||||
|
||||
if (use_flat_layout[i]) {
|
||||
const int grid_size_1 = key.grid_size - 1;
|
||||
for (const int grid : nodes[i].grids()) {
|
||||
const Span<float3> grid_positions = positions.slice(bke::ccg::grid_range(key, grid));
|
||||
const Span<float3> grid_normals = normals.slice(bke::ccg::grid_range(key, grid));
|
||||
if (!sharp_faces.is_empty() && sharp_faces[grid_to_face_map[grid]]) {
|
||||
for (int y = 0; y < grid_size_1; y++) {
|
||||
for (int x = 0; x < grid_size_1; x++) {
|
||||
float3 no;
|
||||
normal_quad_v3(no,
|
||||
grid_positions[CCG_grid_xy_to_index(key.grid_size, x, y + 1)],
|
||||
grid_positions[CCG_grid_xy_to_index(key.grid_size, x + 1, y + 1)],
|
||||
grid_positions[CCG_grid_xy_to_index(key.grid_size, x + 1, y)],
|
||||
grid_positions[CCG_grid_xy_to_index(key.grid_size, x, y)]);
|
||||
std::fill_n(data, 4, normal_float_to_short(no));
|
||||
data += 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (int y = 0; y < grid_size_1; y++) {
|
||||
for (int x = 0; x < grid_size_1; x++) {
|
||||
std::fill_n(
|
||||
data,
|
||||
4,
|
||||
normal_float_to_short(grid_normals[CCG_grid_xy_to_index(key.grid_size, x, y)]));
|
||||
data += 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* The non-flat VBO layout does not support sharp faces. */
|
||||
for (const int grid : nodes[i].grids()) {
|
||||
for (const float3 &normal : normals.slice(bke::ccg::grid_range(key, grid))) {
|
||||
*data = normal_float_to_short(normal);
|
||||
data++;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
BLI_NOINLINE static void fill_masks_grids(const Object &object,
|
||||
const BitSpan use_flat_layout,
|
||||
const IndexMask &node_mask,
|
||||
const MutableSpan<gpu::VertBuf *> vbos)
|
||||
{
|
||||
const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
|
||||
const Span<bke::pbvh::GridsNode> nodes = pbvh.nodes<bke::pbvh::GridsNode>();
|
||||
const SubdivCCG &subdiv_ccg = *object.sculpt->subdiv_ccg;
|
||||
const CCGKey key = BKE_subdiv_ccg_key_top_level(subdiv_ccg);
|
||||
const Span<float> masks = subdiv_ccg.masks;
|
||||
ensure_vbos_allocated_grids(object, mask_format(), use_flat_layout, node_mask, vbos);
|
||||
if (!masks.is_empty()) {
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
float *data = vbos[i]->data<float>().data();
|
||||
if (use_flat_layout[i]) {
|
||||
const int grid_size_1 = key.grid_size - 1;
|
||||
for (const int grid : nodes[i].grids()) {
|
||||
const Span<float> grid_masks = masks.slice(bke::ccg::grid_range(key, grid));
|
||||
for (int y = 0; y < grid_size_1; y++) {
|
||||
for (int x = 0; x < grid_size_1; x++) {
|
||||
*data = grid_masks[CCG_grid_xy_to_index(key.grid_size, x, y)];
|
||||
data++;
|
||||
*data = grid_masks[CCG_grid_xy_to_index(key.grid_size, x + 1, y)];
|
||||
data++;
|
||||
*data = grid_masks[CCG_grid_xy_to_index(key.grid_size, x + 1, y + 1)];
|
||||
data++;
|
||||
*data = grid_masks[CCG_grid_xy_to_index(key.grid_size, x, y + 1)];
|
||||
data++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (const int grid : nodes[i].grids()) {
|
||||
const Span<float> grid_masks = masks.slice(bke::ccg::grid_range(key, grid));
|
||||
std::copy_n(grid_masks.data(), grid_masks.size(), data);
|
||||
data += grid_masks.size();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
node_mask.foreach_index(GrainSize(64),
|
||||
[&](const int i) { vbos[i]->data<float>().fill(0.0f); });
|
||||
}
|
||||
}
|
||||
|
||||
BLI_NOINLINE static void fill_face_sets_grids(const Object &object,
|
||||
const OrigMeshData &orig_mesh_data,
|
||||
const BitSpan use_flat_layout,
|
||||
const IndexMask &node_mask,
|
||||
const MutableSpan<gpu::VertBuf *> vbos)
|
||||
{
|
||||
const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
|
||||
const Span<bke::pbvh::GridsNode> nodes = pbvh.nodes<bke::pbvh::GridsNode>();
|
||||
const SubdivCCG &subdiv_ccg = *object.sculpt->subdiv_ccg;
|
||||
const CCGKey key = BKE_subdiv_ccg_key_top_level(subdiv_ccg);
|
||||
const int color_default = orig_mesh_data.face_set_default;
|
||||
const int color_seed = orig_mesh_data.face_set_seed;
|
||||
const Span<int> grid_to_face_map = subdiv_ccg.grid_to_face_map;
|
||||
const bke::AttributeAccessor attributes = orig_mesh_data.attributes;
|
||||
ensure_vbos_allocated_grids(object, face_set_format(), use_flat_layout, node_mask, vbos);
|
||||
if (const VArray<int> face_sets = *attributes.lookup<int>(".sculpt_face_set",
|
||||
bke::AttrDomain::Face))
|
||||
{
|
||||
const VArraySpan<int> face_sets_span(face_sets);
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
const Span<int> grids = nodes[i].grids();
|
||||
const int verts_per_grid = use_flat_layout[i] ? square_i(key.grid_size - 1) * 4 :
|
||||
square_i(key.grid_size);
|
||||
uchar4 *data = vbos[i]->data<uchar4>().data();
|
||||
for (const int i : grids.index_range()) {
|
||||
uchar4 color{UCHAR_MAX};
|
||||
const int fset = face_sets[grid_to_face_map[grids[i]]];
|
||||
if (fset != color_default) {
|
||||
BKE_paint_face_set_overlay_color_get(fset, color_seed, color);
|
||||
}
|
||||
|
||||
std::fill_n(data, verts_per_grid, color);
|
||||
data += verts_per_grid;
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
node_mask.foreach_index(GrainSize(1),
|
||||
[&](const int i) { vbos[i]->data<uchar4>().fill(uchar4{UCHAR_MAX}); });
|
||||
}
|
||||
}
|
||||
|
||||
BLI_NOINLINE static void update_positions_bmesh(const Object &object,
|
||||
const IndexMask &node_mask,
|
||||
const MutableSpan<gpu::VertBuf *> vbos)
|
||||
{
|
||||
const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
|
||||
const Span<bke::pbvh::BMeshNode> nodes = pbvh.nodes<bke::pbvh::BMeshNode>();
|
||||
ensure_vbos_allocated_bmesh(object, position_format(), node_mask, vbos);
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
float3 *data = vbos[i]->data<float3>().data();
|
||||
for (const BMFace *face :
|
||||
BKE_pbvh_bmesh_node_faces(&const_cast<bke::pbvh::BMeshNode &>(nodes[i])))
|
||||
{
|
||||
if (BM_elem_flag_test(face, BM_ELEM_HIDDEN)) {
|
||||
continue;
|
||||
}
|
||||
const BMLoop *l = face->l_first;
|
||||
*data = l->prev->v->co;
|
||||
data++;
|
||||
*data = l->v->co;
|
||||
data++;
|
||||
*data = l->next->v->co;
|
||||
data++;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
BLI_NOINLINE static void update_normals_bmesh(const Object &object,
|
||||
const IndexMask &node_mask,
|
||||
const MutableSpan<gpu::VertBuf *> vbos)
|
||||
{
|
||||
const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
|
||||
const Span<bke::pbvh::BMeshNode> nodes = pbvh.nodes<bke::pbvh::BMeshNode>();
|
||||
ensure_vbos_allocated_bmesh(object, normal_format(), node_mask, vbos);
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
short4 *data = vbos[i]->data<short4>().data();
|
||||
for (const BMFace *face :
|
||||
BKE_pbvh_bmesh_node_faces(&const_cast<bke::pbvh::BMeshNode &>(nodes[i])))
|
||||
{
|
||||
if (BM_elem_flag_test(face, BM_ELEM_HIDDEN)) {
|
||||
continue;
|
||||
}
|
||||
if (BM_elem_flag_test(face, BM_ELEM_SMOOTH)) {
|
||||
const BMLoop *l = face->l_first;
|
||||
*data = normal_float_to_short(l->prev->v->no);
|
||||
data++;
|
||||
*data = normal_float_to_short(l->v->no);
|
||||
data++;
|
||||
*data = normal_float_to_short(l->next->v->no);
|
||||
data++;
|
||||
}
|
||||
else {
|
||||
std::fill_n(data, 3, normal_float_to_short(face->no));
|
||||
data += 3;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
BLI_NOINLINE static void update_masks_bmesh(const Object &object,
|
||||
const IndexMask &node_mask,
|
||||
const MutableSpan<gpu::VertBuf *> vbos)
|
||||
{
|
||||
const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
|
||||
const Span<bke::pbvh::BMeshNode> nodes = pbvh.nodes<bke::pbvh::BMeshNode>();
|
||||
const BMesh &bm = *object.sculpt->bm;
|
||||
const int cd_offset = CustomData_get_offset_named(&bm.vdata, CD_PROP_FLOAT, ".sculpt_mask");
|
||||
ensure_vbos_allocated_bmesh(object, mask_format(), node_mask, vbos);
|
||||
if (cd_offset != -1) {
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
float *data = vbos[i]->data<float>().data();
|
||||
for (const BMFace *face :
|
||||
BKE_pbvh_bmesh_node_faces(&const_cast<bke::pbvh::BMeshNode &>(nodes[i])))
|
||||
{
|
||||
if (BM_elem_flag_test(face, BM_ELEM_HIDDEN)) {
|
||||
continue;
|
||||
}
|
||||
const BMLoop *l = face->l_first;
|
||||
*data = bmesh_cd_vert_get<float>(*l->prev->v, cd_offset);
|
||||
data++;
|
||||
*data = bmesh_cd_vert_get<float>(*l->v, cd_offset);
|
||||
data++;
|
||||
*data = bmesh_cd_vert_get<float>(*l->next->v, cd_offset);
|
||||
data++;
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
node_mask.foreach_index(GrainSize(64),
|
||||
[&](const int i) { vbos[i]->data<float>().fill(0.0f); });
|
||||
}
|
||||
}
|
||||
|
||||
BLI_NOINLINE static void update_face_sets_bmesh(const Object &object,
|
||||
const OrigMeshData &orig_mesh_data,
|
||||
const IndexMask &node_mask,
|
||||
const MutableSpan<gpu::VertBuf *> vbos)
|
||||
{
|
||||
const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
|
||||
const Span<bke::pbvh::BMeshNode> nodes = pbvh.nodes<bke::pbvh::BMeshNode>();
|
||||
const BMesh &bm = *object.sculpt->bm;
|
||||
const int color_default = orig_mesh_data.face_set_default;
|
||||
const int color_seed = orig_mesh_data.face_set_seed;
|
||||
const int offset = CustomData_get_offset_named(&bm.pdata, CD_PROP_INT32, ".sculpt_face_set");
|
||||
ensure_vbos_allocated_bmesh(object, face_set_format(), node_mask, vbos);
|
||||
if (offset != -1) {
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
uchar4 *data = vbos[i]->data<uchar4>().data();
|
||||
for (const BMFace *face :
|
||||
BKE_pbvh_bmesh_node_faces(&const_cast<bke::pbvh::BMeshNode &>(nodes[i])))
|
||||
{
|
||||
if (BM_elem_flag_test(face, BM_ELEM_HIDDEN)) {
|
||||
continue;
|
||||
}
|
||||
uchar4 color{UCHAR_MAX};
|
||||
const int fset = bmesh_cd_face_get<int>(*face, offset);
|
||||
if (fset != color_default) {
|
||||
BKE_paint_face_set_overlay_color_get(fset, color_seed, color);
|
||||
}
|
||||
std::fill_n(data, 3, color);
|
||||
data += 3;
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
node_mask.foreach_index(GrainSize(64),
|
||||
[&](const int i) { vbos[i]->data<uchar4>().fill(uchar4(255)); });
|
||||
}
|
||||
}
|
||||
|
||||
BLI_NOINLINE static void update_generic_attribute_bmesh(const Object &object,
|
||||
const OrigMeshData &orig_mesh_data,
|
||||
const IndexMask &node_mask,
|
||||
const GenericRequest &attr,
|
||||
const MutableSpan<gpu::VertBuf *> vbos)
|
||||
{
|
||||
const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
|
||||
const Span<bke::pbvh::BMeshNode> nodes = pbvh.nodes<bke::pbvh::BMeshNode>();
|
||||
const BMesh &bm = *object.sculpt->bm;
|
||||
const bke::AttrDomain domain = attr.domain;
|
||||
const eCustomDataType data_type = attr.type;
|
||||
const CustomData &custom_data = *get_cdata(bm, domain);
|
||||
const int offset = CustomData_get_offset_named(&custom_data, data_type, attr.name);
|
||||
ensure_vbos_allocated_bmesh(
|
||||
object, attribute_format(orig_mesh_data, attr.name, data_type), node_mask, vbos);
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
bke::attribute_math::convert_to_static_type(data_type, [&](auto dummy) {
|
||||
using T = decltype(dummy);
|
||||
const auto &faces = BKE_pbvh_bmesh_node_faces(&const_cast<bke::pbvh::BMeshNode &>(nodes[i]));
|
||||
if constexpr (!std::is_void_v<typename AttributeConverter<T>::VBOType>) {
|
||||
switch (domain) {
|
||||
case bke::AttrDomain::Point:
|
||||
extract_data_vert_bmesh<T>(faces, offset, *vbos[i]);
|
||||
break;
|
||||
case bke::AttrDomain::Face:
|
||||
extract_data_face_bmesh<T>(faces, offset, *vbos[i]);
|
||||
break;
|
||||
case bke::AttrDomain::Corner:
|
||||
extract_data_corner_bmesh<T>(faces, offset, *vbos[i]);
|
||||
break;
|
||||
default:
|
||||
BLI_assert_unreachable();
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
static gpu::IndexBuf *create_index_faces(const OffsetIndices<int> faces,
|
||||
@@ -1738,62 +1680,6 @@ BitSpan DrawCacheImpl::ensure_use_flat_layout(const Object &object,
|
||||
return use_flat_layout_;
|
||||
}
|
||||
|
||||
BLI_NOINLINE static void ensure_vbos_allocated_mesh(const Object &object,
|
||||
const GPUVertFormat &format,
|
||||
const IndexMask &node_mask,
|
||||
const MutableSpan<gpu::VertBuf *> vbos)
|
||||
{
|
||||
const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
|
||||
const Span<bke::pbvh::MeshNode> nodes = pbvh.nodes<bke::pbvh::MeshNode>();
|
||||
node_mask.foreach_index(GrainSize(64), [&](const int i) {
|
||||
if (!vbos[i]) {
|
||||
vbos[i] = GPU_vertbuf_create_with_format(format);
|
||||
}
|
||||
GPU_vertbuf_data_alloc(*vbos[i], nodes[i].corners_num());
|
||||
});
|
||||
}
|
||||
|
||||
BLI_NOINLINE static void ensure_vbos_allocated_grids(const Object &object,
|
||||
const GPUVertFormat &format,
|
||||
const BitSpan use_flat_layout,
|
||||
const IndexMask &node_mask,
|
||||
const MutableSpan<gpu::VertBuf *> vbos)
|
||||
{
|
||||
const SculptSession &ss = *object.sculpt;
|
||||
const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
|
||||
const Span<bke::pbvh::GridsNode> nodes = pbvh.nodes<bke::pbvh::GridsNode>();
|
||||
const SubdivCCG &subdiv_ccg = *ss.subdiv_ccg;
|
||||
const CCGKey key = BKE_subdiv_ccg_key_top_level(subdiv_ccg);
|
||||
node_mask.foreach_index(GrainSize(64), [&](const int i) {
|
||||
if (!vbos[i]) {
|
||||
vbos[i] = GPU_vertbuf_create_with_format(format);
|
||||
}
|
||||
const int verts_per_grid = use_flat_layout[i] ? square_i(key.grid_size - 1) * 4 :
|
||||
square_i(key.grid_size);
|
||||
const int verts_num = nodes[i].grids().size() * verts_per_grid;
|
||||
GPU_vertbuf_data_alloc(*vbos[i], verts_num);
|
||||
});
|
||||
}
|
||||
|
||||
BLI_NOINLINE static void ensure_vbos_allocated_bmesh(const Object &object,
|
||||
const GPUVertFormat &format,
|
||||
const IndexMask &node_mask,
|
||||
const MutableSpan<gpu::VertBuf *> vbos)
|
||||
{
|
||||
const bke::pbvh::Tree &pbvh = *bke::object::pbvh_get(object);
|
||||
const Span<bke::pbvh::BMeshNode> nodes = pbvh.nodes<bke::pbvh::BMeshNode>();
|
||||
|
||||
node_mask.foreach_index(GrainSize(64), [&](const int i) {
|
||||
if (!vbos[i]) {
|
||||
vbos[i] = GPU_vertbuf_create_with_format(format);
|
||||
}
|
||||
const Set<BMFace *, 0> &faces = BKE_pbvh_bmesh_node_faces(
|
||||
&const_cast<bke::pbvh::BMeshNode &>(nodes[i]));
|
||||
const int verts_num = count_visible_tris_bmesh(faces) * 3;
|
||||
GPU_vertbuf_data_alloc(*vbos[i], verts_num);
|
||||
});
|
||||
}
|
||||
|
||||
BLI_NOINLINE static void flush_vbo_data(const Span<gpu::VertBuf *> vbos,
|
||||
const IndexMask &node_mask)
|
||||
{
|
||||
@@ -1826,22 +1712,83 @@ Span<gpu::VertBuf *> DrawCacheImpl::ensure_attribute_data(const Object &object,
|
||||
node_mask.slice_content(data.dirty_nodes.index_range()), data.dirty_nodes, memory);
|
||||
const IndexMask mask = IndexMask::from_union(empty_mask, dirty_mask, memory);
|
||||
|
||||
const GPUVertFormat format = format_for_request(orig_mesh_data, attr);
|
||||
|
||||
switch (pbvh.type()) {
|
||||
case bke::pbvh::Type::Mesh: {
|
||||
ensure_vbos_allocated_mesh(object, format, mask, vbos);
|
||||
fill_vbos_mesh(object, orig_mesh_data, mask, attr, vbos);
|
||||
if (const CustomRequest *request_type = std::get_if<CustomRequest>(&attr)) {
|
||||
switch (*request_type) {
|
||||
case CustomRequest::Position:
|
||||
update_positions_mesh(object, mask, vbos);
|
||||
break;
|
||||
case CustomRequest::Normal:
|
||||
update_normals_mesh(object, mask, vbos);
|
||||
break;
|
||||
case CustomRequest::Mask:
|
||||
update_masks_mesh(object, orig_mesh_data, mask, vbos);
|
||||
break;
|
||||
case CustomRequest::FaceSet:
|
||||
update_face_sets_mesh(object, orig_mesh_data, mask, vbos);
|
||||
break;
|
||||
}
|
||||
}
|
||||
else {
|
||||
update_generic_attribute_mesh(
|
||||
object, orig_mesh_data, mask, std::get<GenericRequest>(attr), vbos);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case bke::pbvh::Type::Grids: {
|
||||
ensure_vbos_allocated_grids(object, format, use_flat_layout_, mask, vbos);
|
||||
fill_vbos_grids(object, orig_mesh_data, use_flat_layout_, mask, attr, vbos);
|
||||
if (const CustomRequest *request_type = std::get_if<CustomRequest>(&attr)) {
|
||||
switch (*request_type) {
|
||||
case CustomRequest::Position:
|
||||
fill_positions_grids(object, use_flat_layout_, mask, vbos);
|
||||
break;
|
||||
case CustomRequest::Normal:
|
||||
fill_normals_grids(object, orig_mesh_data, use_flat_layout_, mask, vbos);
|
||||
break;
|
||||
case CustomRequest::Mask:
|
||||
fill_masks_grids(object, use_flat_layout_, mask, vbos);
|
||||
break;
|
||||
case CustomRequest::FaceSet:
|
||||
fill_face_sets_grids(object, orig_mesh_data, use_flat_layout_, mask, vbos);
|
||||
break;
|
||||
}
|
||||
}
|
||||
else {
|
||||
const eCustomDataType type = std::get<GenericRequest>(attr).type;
|
||||
node_mask.foreach_index(GrainSize(1), [&](const int i) {
|
||||
bke::attribute_math::convert_to_static_type(type, [&](auto dummy) {
|
||||
using T = decltype(dummy);
|
||||
using Converter = AttributeConverter<T>;
|
||||
using VBOType = typename Converter::VBOType;
|
||||
if constexpr (!std::is_void_v<VBOType>) {
|
||||
vbos[i]->data<VBOType>().fill(Converter::convert(fallback_value_for_fill<T>()));
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
break;
|
||||
}
|
||||
case bke::pbvh::Type::BMesh: {
|
||||
ensure_vbos_allocated_bmesh(object, format, mask, vbos);
|
||||
fill_vbos_bmesh(object, orig_mesh_data, mask, attr, vbos);
|
||||
if (const CustomRequest *request_type = std::get_if<CustomRequest>(&attr)) {
|
||||
switch (*request_type) {
|
||||
case CustomRequest::Position:
|
||||
update_positions_bmesh(object, mask, vbos);
|
||||
break;
|
||||
case CustomRequest::Normal:
|
||||
update_normals_bmesh(object, mask, vbos);
|
||||
break;
|
||||
case CustomRequest::Mask:
|
||||
update_masks_bmesh(object, mask, vbos);
|
||||
break;
|
||||
case CustomRequest::FaceSet:
|
||||
update_face_sets_bmesh(object, orig_mesh_data, mask, vbos);
|
||||
break;
|
||||
}
|
||||
}
|
||||
else {
|
||||
update_generic_attribute_bmesh(
|
||||
object, orig_mesh_data, mask, std::get<GenericRequest>(attr), vbos);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user