Refactor: Avoid "extractors" abstraction for mesh normals GPU data

Part of #116901.
The only non-obvious part is changing from using the `MR_DATA_LOOP_NOR`
flag as a signal to calculate normals and store them in `MeshRenderData` to
a more explicit check that the normals buffer is requested. In the future hopefully
these dependencies will be refactored to be part of the task graph instead.
This commit is contained in:
Hans Goudey
2024-05-22 14:57:45 -04:00
parent 30ec31b550
commit a778e3ddc1
4 changed files with 71 additions and 99 deletions

View File

@@ -528,9 +528,11 @@ static void mesh_extract_render_data_node_exec(void *__restrict task_data)
const eMRIterType iter_type = update_task_data->iter_type;
const eMRDataType data_flag = update_task_data->data_flag;
const bool request_face_normals = (data_flag & (MR_DATA_POLY_NOR | MR_DATA_LOOP_NOR |
const bool request_face_normals = DRW_vbo_requested(update_task_data->cache->buff.vbo.nor) ||
(data_flag & (MR_DATA_POLY_NOR | MR_DATA_LOOP_NOR |
MR_DATA_TAN_LOOP_NOR)) != 0;
const bool request_corner_normals = (data_flag & MR_DATA_LOOP_NOR) != 0;
const bool request_corner_normals = DRW_vbo_requested(update_task_data->cache->buff.vbo.nor) ||
(data_flag & MR_DATA_LOOP_NOR) != 0;
const bool force_corner_normals = (data_flag & MR_DATA_TAN_LOOP_NOR) != 0;
if (request_face_normals) {
@@ -627,7 +629,6 @@ void mesh_buffer_cache_create_requested(TaskGraph &task_graph,
} \
} while (0)
EXTRACT_ADD_REQUESTED(vbo, nor);
EXTRACT_ADD_REQUESTED(vbo, uv);
EXTRACT_ADD_REQUESTED(vbo, tan);
EXTRACT_ADD_REQUESTED(vbo, sculpt_data);
@@ -666,7 +667,8 @@ void mesh_buffer_cache_create_requested(TaskGraph &task_graph,
if (extractors.is_empty() && !DRW_ibo_requested(buffers.ibo.lines) &&
!DRW_ibo_requested(buffers.ibo.lines_loose) && !DRW_ibo_requested(buffers.ibo.tris) &&
!DRW_ibo_requested(buffers.ibo.points) && !DRW_vbo_requested(buffers.vbo.pos))
!DRW_ibo_requested(buffers.ibo.points) && !DRW_vbo_requested(buffers.vbo.pos) &&
!DRW_vbo_requested(buffers.vbo.nor))
{
return;
}
@@ -717,6 +719,22 @@ void mesh_buffer_cache_create_requested(TaskGraph &task_graph,
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_vbo_requested(buffers.vbo.nor)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferCache &mbc;
bool do_hq_normals;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_normals(data.mr, data.do_hq_normals, *data.mbc.buff.vbo.nor);
},
new TaskData{*mr, mbc, do_hq_normals},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_ibo_requested(buffers.ibo.tris)) {
struct TaskData {
MeshRenderData &mr;
@@ -858,7 +876,6 @@ void mesh_buffer_cache_create_requested_subdiv(MeshBatchCache &cache,
} \
} while (0)
EXTRACT_ADD_REQUESTED(vbo, nor);
for (int i = 0; i < GPU_MAX_ATTR; i++) {
EXTRACT_ADD_REQUESTED(vbo, attr[i]);
}
@@ -894,7 +911,7 @@ void mesh_buffer_cache_create_requested_subdiv(MeshBatchCache &cache,
if (extractors.is_empty() && !DRW_ibo_requested(buffers.ibo.lines) &&
!DRW_ibo_requested(buffers.ibo.lines_loose) && !DRW_ibo_requested(buffers.ibo.tris) &&
!DRW_ibo_requested(buffers.ibo.points) && !DRW_vbo_requested(buffers.vbo.pos) &&
!DRW_vbo_requested(buffers.vbo.orco))
!DRW_vbo_requested(buffers.vbo.orco) && !DRW_vbo_requested(buffers.vbo.nor))
{
return;
}
@@ -907,6 +924,10 @@ void mesh_buffer_cache_create_requested_subdiv(MeshBatchCache &cache,
if (DRW_vbo_requested(buffers.vbo.pos) || DRW_vbo_requested(buffers.vbo.orco)) {
extract_positions_subdiv(subdiv_cache, mr, *buffers.vbo.pos, buffers.vbo.orco);
}
if (DRW_vbo_requested(buffers.vbo.nor)) {
/* The corner normals calculation uses positions and normals stored in the `pos` VBO. */
extract_normals_subdiv(subdiv_cache, *buffers.vbo.pos, *buffers.vbo.nor);
}
if (DRW_ibo_requested(buffers.ibo.lines) || DRW_ibo_requested(buffers.ibo.lines_loose)) {
extract_lines_subdiv(
subdiv_cache, mr, buffers.ibo.lines, buffers.ibo.lines_loose, cache.no_loose_wire);

View File

@@ -52,9 +52,6 @@ eMRIterType mesh_extract_iter_type(const MeshExtract *ext)
static const MeshExtract *mesh_extract_override_hq_normals(const MeshExtract *extractor)
{
if (extractor == &extract_nor) {
return &extract_nor_hq;
}
if (extractor == &extract_tan) {
return &extract_tan_hq;
}

View File

@@ -348,6 +348,11 @@ void extract_positions_subdiv(const DRWSubdivCache &subdiv_cache,
gpu::VertBuf &vbo,
gpu::VertBuf *orco_vbo);
void extract_normals(const MeshRenderData &mr, bool use_hq, gpu::VertBuf &vbo);
void extract_normals_subdiv(const DRWSubdivCache &subdiv_cache,
gpu::VertBuf &pos_nor,
gpu::VertBuf &lnor);
void extract_tris(const MeshRenderData &mr,
const SortedFaceData &face_sorted,
MeshBatchCache &cache,
@@ -378,8 +383,6 @@ extern const MeshExtract extract_edituv_tris;
extern const MeshExtract extract_edituv_lines;
extern const MeshExtract extract_edituv_points;
extern const MeshExtract extract_edituv_fdots;
extern const MeshExtract extract_nor_hq;
extern const MeshExtract extract_nor;
extern const MeshExtract extract_uv;
extern const MeshExtract extract_tan;
extern const MeshExtract extract_tan_hq;

View File

@@ -14,10 +14,6 @@
namespace blender::draw {
/* ---------------------------------------------------------------------- */
/** \name Extract Loop Normal
* \{ */
template<typename GPUType> inline GPUType convert_normal(const float3 &src);
template<> inline GPUPackedNormal convert_normal(const float3 &src)
@@ -217,27 +213,44 @@ static void extract_normals_bm(const MeshRenderData &mr, MutableSpan<GPUType> no
}
}
static void extract_lnor_init(const MeshRenderData &mr,
MeshBatchCache & /*cache*/,
void *buf,
void * /*tls_data*/)
void extract_normals(const MeshRenderData &mr, const bool use_hq, gpu::VertBuf &vbo)
{
gpu::VertBuf *vbo = static_cast<gpu::VertBuf *>(buf);
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
GPU_vertformat_alias_add(&format, "lnor");
}
GPU_vertbuf_init_with_format(vbo, &format);
GPU_vertbuf_data_alloc(vbo, mr.corners_num);
MutableSpan vbo_data(static_cast<GPUPackedNormal *>(GPU_vertbuf_get_data(vbo)), mr.corners_num);
if (use_hq) {
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
GPU_vertformat_alias_add(&format, "lnor");
}
GPU_vertbuf_init_with_format(&vbo, &format);
GPU_vertbuf_data_alloc(&vbo, mr.corners_num);
MutableSpan vbo_data(static_cast<short4 *>(GPU_vertbuf_get_data(&vbo)), mr.corners_num);
if (mr.extract_type == MR_EXTRACT_MESH) {
extract_normals_mesh(mr, vbo_data);
extract_paint_overlay_flags(mr, vbo_data);
if (mr.extract_type == MR_EXTRACT_MESH) {
extract_normals_mesh(mr, vbo_data);
extract_paint_overlay_flags(mr, vbo_data);
}
else {
extract_normals_bm(mr, vbo_data);
}
}
else {
extract_normals_bm(mr, vbo_data);
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
GPU_vertformat_alias_add(&format, "lnor");
}
GPU_vertbuf_init_with_format(&vbo, &format);
GPU_vertbuf_data_alloc(&vbo, mr.corners_num);
MutableSpan vbo_data(static_cast<GPUPackedNormal *>(GPU_vertbuf_get_data(&vbo)),
mr.corners_num);
if (mr.extract_type == MR_EXTRACT_MESH) {
extract_normals_mesh(mr, vbo_data);
extract_paint_overlay_flags(mr, vbo_data);
}
else {
extract_normals_bm(mr, vbo_data);
}
}
}
@@ -252,74 +265,12 @@ static GPUVertFormat *get_subdiv_lnor_format()
return &format;
}
static void extract_lnor_init_subdiv(const DRWSubdivCache &subdiv_cache,
const MeshRenderData & /*mr*/,
MeshBatchCache &cache,
void *buffer,
void * /*data*/)
void extract_normals_subdiv(const DRWSubdivCache &subdiv_cache,
gpu::VertBuf &pos_nor,
gpu::VertBuf &lnor)
{
gpu::VertBuf *vbo = static_cast<gpu::VertBuf *>(buffer);
gpu::VertBuf *pos_nor = cache.final.buff.vbo.pos;
BLI_assert(pos_nor);
GPU_vertbuf_init_build_on_device(vbo, get_subdiv_lnor_format(), subdiv_cache.num_subdiv_loops);
draw_subdiv_build_lnor_buffer(subdiv_cache, pos_nor, vbo);
GPU_vertbuf_init_build_on_device(&lnor, get_subdiv_lnor_format(), subdiv_cache.num_subdiv_loops);
draw_subdiv_build_lnor_buffer(subdiv_cache, &pos_nor, &lnor);
}
constexpr MeshExtract create_extractor_lnor()
{
MeshExtract extractor = {nullptr};
extractor.init = extract_lnor_init;
extractor.init_subdiv = extract_lnor_init_subdiv;
extractor.data_type = MR_DATA_LOOP_NOR;
extractor.use_threading = true;
extractor.mesh_buffer_offset = offsetof(MeshBufferList, vbo.nor);
return extractor;
}
/** \} */
/* ---------------------------------------------------------------------- */
/** \name Extract HQ Loop Normal
* \{ */
static void extract_lnor_hq_init(const MeshRenderData &mr,
MeshBatchCache & /*cache*/,
void *buf,
void * /*tls_data*/)
{
gpu::VertBuf *vbo = static_cast<gpu::VertBuf *>(buf);
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
GPU_vertformat_alias_add(&format, "lnor");
}
GPU_vertbuf_init_with_format(vbo, &format);
GPU_vertbuf_data_alloc(vbo, mr.corners_num);
MutableSpan vbo_data(static_cast<short4 *>(GPU_vertbuf_get_data(vbo)), mr.corners_num);
if (mr.extract_type == MR_EXTRACT_MESH) {
extract_normals_mesh(mr, vbo_data);
extract_paint_overlay_flags(mr, vbo_data);
}
else {
extract_normals_bm(mr, vbo_data);
}
}
constexpr MeshExtract create_extractor_lnor_hq()
{
MeshExtract extractor = {nullptr};
extractor.init = extract_lnor_hq_init;
extractor.init_subdiv = extract_lnor_init_subdiv;
extractor.data_type = MR_DATA_LOOP_NOR;
extractor.use_threading = true;
extractor.mesh_buffer_offset = offsetof(MeshBufferList, vbo.nor);
return extractor;
}
/** \} */
const MeshExtract extract_nor = create_extractor_lnor();
const MeshExtract extract_nor_hq = create_extractor_lnor_hq();
} // namespace blender::draw