Draw: Refactor mesh extraction to avoid creating uninitialized buffers

The initial goal of this PR is to avoid creating vertex and index
buffers as part of the "request" phase of the drawing loop. Conflating
requesting and creating index buffers might not sound so bad, but it
ends up significantly complicating the whole process. It is also
incompatible with a future buffer cache that would allow avoiding
re-uploading mesh buffers.

Specifically, this means removing the use of `DRW_vbo_request` and
`DRW_ibo_request` from the mesh batch extraction process. Instead, a
list of buffer types is gathered based on the requested batches. Then
that list is filtered to find the batches that haven't been requested
yet. Overall I find the new process much easier to understand.

A few examples of simplifications this allows are avoiding allocating
`MeshRenderData` on the heap, and the removal of its `use_final_mesh`
member. That's just replaced by passing the necessary information
through the call stack.

Another notable difference is that for meshes, EEVEE's velocity module
now requests a batch that contains the buffer rather than just requesting
the buffer itself. This is just simpler to get working since it doesn't require
a separate code path.

The task graph argument for extraction is unused after this change. It wasn't
used effectively anyway; a simpler method of multithreading extractions is
used in this PR. I didn't remove it completely because it will probably be
repurposed in the next step of this project.

The next step in this project is to replace `MeshBufferList` with a
global cache that's keyed based on the mesh data that compromises each
batch, when possible (i.e. for non edit-mode meshes). This changes above
should be applied to other object types too.

Pull Request: https://projects.blender.org/blender/blender/pulls/135699
This commit is contained in:
Hans Goudey
2025-03-25 18:09:38 +01:00
committed by Hans Goudey
parent 5d3d0af750
commit 9b70851d91
17 changed files with 999 additions and 1685 deletions

View File

@@ -198,8 +198,8 @@ bool VelocityModule::step_object_sync(ObjectKey &object_key,
case OB_POINTCLOUD:
data.pos_buf = DRW_pointcloud_position_and_radius_buffer_get(ob);
break;
default:
data.pos_buf = DRW_cache_object_pos_vertbuf_get(ob);
case OB_MESH:
data.pos_buf = DRW_cache_mesh_surface_get(ob);
break;
}
return data;
@@ -207,7 +207,7 @@ bool VelocityModule::step_object_sync(ObjectKey &object_key,
const VelocityGeometryData &data = geometry_map.lookup_or_add_cb(vel.id, add_cb);
if (data.pos_buf == nullptr) {
if (!data.pos_buf_get()) {
has_deform = false;
}
}
@@ -248,10 +248,11 @@ void VelocityModule::geometry_steps_fill()
{
uint dst_ofs = 0;
for (VelocityGeometryData &geom : geometry_map.values()) {
if (!geom.pos_buf) {
gpu::VertBuf *pos_buf = geom.pos_buf_get();
if (!pos_buf) {
continue;
}
uint src_len = GPU_vertbuf_get_vertex_len(geom.pos_buf);
uint src_len = GPU_vertbuf_get_vertex_len(pos_buf);
geom.len = src_len;
geom.ofs = dst_ofs;
dst_ofs += src_len;
@@ -269,20 +270,21 @@ void VelocityModule::geometry_steps_fill()
copy_ps.bind_ssbo("out_buf", *geometry_steps[step_]);
for (VelocityGeometryData &geom : geometry_map.values()) {
if (!geom.pos_buf || geom.len == 0) {
gpu::VertBuf *pos_buf = geom.pos_buf_get();
if (!pos_buf || geom.len == 0) {
continue;
}
const GPUVertFormat *format = GPU_vertbuf_get_format(geom.pos_buf);
const GPUVertFormat *format = GPU_vertbuf_get_format(pos_buf);
if (format->stride == 16) {
GPU_storagebuf_copy_sub_from_vertbuf(*geometry_steps[step_],
geom.pos_buf,
pos_buf,
geom.ofs * sizeof(float4),
0,
geom.len * sizeof(float4));
}
else {
BLI_assert(format->stride % 4 == 0);
copy_ps.bind_ssbo("in_buf", geom.pos_buf);
copy_ps.bind_ssbo("in_buf", pos_buf);
copy_ps.push_constant("start_offset", geom.ofs);
copy_ps.push_constant("vertex_stride", int(format->stride / 4));
copy_ps.push_constant("vertex_count", geom.len);
@@ -382,7 +384,8 @@ void VelocityModule::end_sync()
/* Current geometry step will be copied at the end of the frame.
* Thus vel.geo.len[STEP_CURRENT] is not yet valid and the current length is manually
* retrieved. */
gpu::VertBuf *pos_buf = geometry_map.lookup_default(vel.id, VelocityGeometryData()).pos_buf;
gpu::VertBuf *pos_buf =
geometry_map.lookup_default(vel.id, VelocityGeometryData()).pos_buf_get();
vel.geo.do_deform = pos_buf != nullptr &&
(vel.geo.len[STEP_PREVIOUS] == GPU_vertbuf_get_vertex_len(pos_buf));
}

View File

@@ -13,8 +13,11 @@
#pragma once
#include <variant>
#include "BLI_map.hh"
#include "GPU_batch.hh"
#include "eevee_shader_shared.hh"
#include "eevee_sync.hh"
@@ -34,11 +37,22 @@ class VelocityModule {
};
struct VelocityGeometryData {
/** VertBuf not yet ready to be copied to the #VelocityGeometryBuf. */
gpu::VertBuf *pos_buf = nullptr;
std::variant<std::monostate, gpu::Batch *, gpu::VertBuf *> pos_buf;
/* Offset in the #VelocityGeometryBuf to the start of the data. In vertex. */
int ofs = 0;
/* Length of the vertex buffer. In vertex. */
int len = 0;
gpu::VertBuf *pos_buf_get() const
{
if (std::holds_alternative<gpu::VertBuf *>(this->pos_buf)) {
return std::get<gpu::VertBuf *>(this->pos_buf);
}
if (std::holds_alternative<gpu::Batch *>(this->pos_buf)) {
return std::get<gpu::Batch *>(this->pos_buf)->verts_(0);
}
return nullptr;
}
};
/**
* The map contains indirection indices to the obmat and geometry in each step buffer.

View File

@@ -175,27 +175,6 @@ blender::gpu::Batch *DRW_cache_object_surface_get(Object *ob)
}
}
blender::gpu::VertBuf *DRW_cache_object_pos_vertbuf_get(Object *ob)
{
using namespace blender::draw;
Mesh *mesh = BKE_object_get_evaluated_mesh_no_subsurf_unchecked(ob);
if (mesh) {
/* For drawing we want either the base mesh if GPU subdivision is enabled, or the
* tessellated mesh if GPU subdivision is disabled. */
if (!BKE_subsurf_modifier_has_gpu_subdiv(mesh)) {
mesh = BKE_mesh_wrapper_ensure_subdivision(mesh);
}
return DRW_mesh_batch_cache_pos_vertbuf_get(*mesh);
}
if (ob->type == OB_MESH) {
return DRW_mesh_batch_cache_pos_vertbuf_get(DRW_object_get_data_for_drawing<Mesh>(*ob));
}
return nullptr;
}
Span<blender::gpu::Batch *> DRW_cache_object_surface_material_get(
Object *ob, const Span<const GPUMaterial *> materials)
{

View File

@@ -47,11 +47,6 @@ blender::Span<blender::gpu::Batch *> DRW_cache_object_surface_material_get(
Object *ob, blender::Span<const GPUMaterial *> materials);
blender::gpu::Batch *DRW_cache_object_face_wireframe_get(const Scene *scene, Object *ob);
/**
* Returns the vertbuf used by shaded surface batch.
*/
blender::gpu::VertBuf *DRW_cache_object_pos_vertbuf_get(Object *ob);
/* Meshes */
blender::gpu::Batch *DRW_cache_mesh_all_verts_get(Object *ob);

View File

@@ -9,12 +9,15 @@
#pragma once
#include "BLI_array.hh"
#include "BLI_map.hh"
#include "BLI_math_matrix_types.hh"
#include "BLI_utildefines.h"
#include "DNA_view3d_enums.h"
#include "GPU_index_buffer.hh"
#include "GPU_shader.hh"
#include "GPU_vertex_buffer.hh"
#include "draw_attributes.hh"
@@ -57,61 +60,86 @@ enum {
DRW_MESH_WEIGHT_STATE_LOCK_RELATIVE = (1 << 2),
};
/**
* Vertex buffer types that can be use by batches in the mesh batch cache.
*
* \todo It would be good to change this to something like #draw::pbvh::AttributeRequest to
* separate the generic attribute requests. While there is a limit on the number of vertex buffers
* used by a single shader/batch, there is no need for that limit here; there are potentially many
* shaders requiring attributes for a particular mesh. OTOH, it may be good to use flags for the
* builtin buffer types, so that bitwise operations can be used.
*/
enum class VBOType : int8_t {
Position,
CornerNormal,
EdgeFactor,
VertexGroupWeight,
UVs,
Tangents,
SculptData,
Orco,
EditData,
EditUVData,
EditUVStretchArea,
EditUVStretchAngle,
MeshAnalysis,
FaceDotPosition,
FaceDotNormal,
FaceDotUV,
FaceDotEditUVData,
SkinRoots,
IndexVert,
IndexEdge,
IndexFace,
IndexFaceDot,
Attr0,
Attr1,
Attr2,
Attr3,
Attr5,
Attr6,
Attr7,
Attr8,
Attr9,
Attr10,
Attr11,
Attr12,
Attr13,
Attr14,
Attr15,
AttrViewer,
VertexNormal,
};
/**
* All index buffers used for mesh batches.
*
* \note "Tris per material" (#MeshBatchCache::tris_per_mat) is an exception. Since there are
* an arbitrary numbers of materials, those are handled separately (as slices of the overall
* triangles buffer).
*/
enum class IBOType : int8_t {
Tris,
Lines,
LinesLoose,
Points,
FaceDots,
LinesPaintMask,
LinesAdjacency,
EditUVTris,
EditUVLines,
EditUVPoints,
EditUVFaceDots,
};
struct MeshBufferList {
/* Every VBO below contains at least enough data for every loop in the mesh
* (except fdots and skin roots). For some VBOs, it extends to (in this exact order) :
* loops + loose_edges * 2 + loose_verts */
struct {
gpu::VertBuf *pos; /* extend */
gpu::VertBuf *nor; /* extend */
gpu::VertBuf *edge_fac; /* extend */
gpu::VertBuf *weights; /* extend */
gpu::VertBuf *uv;
gpu::VertBuf *tan;
gpu::VertBuf *sculpt_data;
gpu::VertBuf *orco;
/* Only for edit mode. */
gpu::VertBuf *edit_data; /* extend */
gpu::VertBuf *edituv_data;
gpu::VertBuf *edituv_stretch_area;
gpu::VertBuf *edituv_stretch_angle;
gpu::VertBuf *mesh_analysis;
gpu::VertBuf *fdots_pos;
gpu::VertBuf *fdots_nor;
gpu::VertBuf *fdots_uv;
// gpu::VertBuf *fdots_edit_data; /* inside fdots_nor for now. */
gpu::VertBuf *fdots_edituv_data;
gpu::VertBuf *skin_roots;
/* Selection */
gpu::VertBuf *vert_idx; /* extend */
gpu::VertBuf *edge_idx; /* extend */
gpu::VertBuf *face_idx;
gpu::VertBuf *fdot_idx;
gpu::VertBuf *attr[GPU_MAX_ATTR];
gpu::VertBuf *attr_viewer;
gpu::VertBuf *vnor;
} vbo;
/* Index Buffers:
* Only need to be updated when topology changes. */
struct {
/* Indices to vloops. Ordered per material. */
gpu::IndexBuf *tris;
/* Loose edges last. */
gpu::IndexBuf *lines;
/* Potentially a sub buffer of `lines` only containing the loose edges. */
gpu::IndexBuf *lines_loose;
gpu::IndexBuf *points;
gpu::IndexBuf *fdots;
/* 3D overlays. */
/* no loose edges. */
gpu::IndexBuf *lines_paint_mask;
gpu::IndexBuf *lines_adjacency;
/** UV overlays. (visibility can differ from 3D view). */
gpu::IndexBuf *edituv_tris;
gpu::IndexBuf *edituv_lines;
gpu::IndexBuf *edituv_points;
gpu::IndexBuf *edituv_fdots;
} ibo;
/* Though using maps here may add some overhead compared to just indexed arrays, it's a bit more
* conventient currently, because the "buffer exists" test is very clear, it's just whether the
* map contains it (e.g. compared to "buffer is allocated but not filled with data"). The
* sparseness *may* be useful for reducing memory usage when only few buffers are used. */
Map<VBOType, std::unique_ptr<gpu::VertBuf, gpu::VertBufDeleter>> vbos;
Map<IBOType, std::unique_ptr<gpu::IndexBuf, gpu::IndexBufDeleter>> ibos;
};
struct MeshBatchList {
@@ -287,21 +315,23 @@ struct MeshBatchCache {
MBC_EDITUV_EDGES | MBC_EDITUV_VERTS | MBC_EDITUV_FACEDOTS | MBC_WIRE_LOOPS_UVS)
void mesh_buffer_cache_create_requested(TaskGraph &task_graph,
const Scene &scene,
MeshBatchCache &cache,
MeshBufferCache &mbc,
Span<IBOType> ibo_requests,
Span<VBOType> vbo_requests,
Object &object,
Mesh &mesh,
bool is_editmode,
bool is_paint_mode,
const float4x4 &object_to_world,
bool do_final,
bool do_uvedit,
const Scene &scene,
const ToolSettings *ts,
bool use_hide);
void mesh_buffer_cache_create_requested_subdiv(MeshBatchCache &cache,
MeshBufferCache &mbc,
Span<IBOType> ibo_requests,
Span<VBOType> vbo_requests,
DRWSubdivCache &subdiv_cache,
MeshRenderData &mr);

View File

@@ -11,12 +11,12 @@
#include "DNA_mesh_types.h"
#include "DNA_scene_types.h"
#include "BLI_task.h"
#include "BLI_map.hh"
#include "BLI_task.hh"
#include "GPU_capabilities.hh"
#include "draw_cache_extract.hh"
#include "draw_cache_inline.hh"
#include "draw_subdivision.hh"
#include "mesh_extractors/extract_mesh.hh"
@@ -29,23 +29,17 @@
namespace blender::draw {
struct MeshRenderDataUpdateTaskData {
std::unique_ptr<MeshRenderData> mr;
MeshBufferCache &cache;
};
static void mesh_extract_render_data_node_exec(void *__restrict task_data)
static void ensure_dependency_data(MeshRenderData &mr,
Span<IBOType> ibo_requests,
Span<VBOType> vbo_requests,
MeshBufferCache &cache)
{
auto *update_task_data = static_cast<MeshRenderDataUpdateTaskData *>(task_data);
MeshRenderData &mr = *update_task_data->mr;
MeshBufferList &buffers = update_task_data->cache.buff;
const bool request_face_normals = DRW_vbo_requested(buffers.vbo.nor) ||
DRW_vbo_requested(buffers.vbo.fdots_nor) ||
DRW_vbo_requested(buffers.vbo.edge_fac) ||
DRW_vbo_requested(buffers.vbo.mesh_analysis);
const bool request_corner_normals = DRW_vbo_requested(buffers.vbo.nor);
const bool force_corner_normals = DRW_vbo_requested(buffers.vbo.tan);
const bool request_face_normals = vbo_requests.contains(VBOType::CornerNormal) ||
vbo_requests.contains(VBOType::FaceDotNormal) ||
vbo_requests.contains(VBOType::EdgeFactor) ||
vbo_requests.contains(VBOType::MeshAnalysis);
const bool request_corner_normals = vbo_requests.contains(VBOType::CornerNormal);
const bool force_corner_normals = vbo_requests.contains(VBOType::Tangents);
if (request_face_normals) {
mesh_render_data_update_face_normals(mr);
@@ -57,18 +51,18 @@ static void mesh_extract_render_data_node_exec(void *__restrict task_data)
mesh_render_data_update_corner_normals(mr);
}
const bool calc_loose_geom = DRW_ibo_requested(buffers.ibo.lines) ||
DRW_ibo_requested(buffers.ibo.lines_loose) ||
DRW_ibo_requested(buffers.ibo.points) ||
DRW_vbo_requested(buffers.vbo.pos) ||
DRW_vbo_requested(buffers.vbo.edit_data) ||
DRW_vbo_requested(buffers.vbo.vnor) ||
DRW_vbo_requested(buffers.vbo.vert_idx) ||
DRW_vbo_requested(buffers.vbo.edge_idx) ||
DRW_vbo_requested(buffers.vbo.edge_fac);
const bool calc_loose_geom = ibo_requests.contains(IBOType::Lines) ||
ibo_requests.contains(IBOType::LinesLoose) ||
ibo_requests.contains(IBOType::Points) ||
vbo_requests.contains(VBOType::Position) ||
vbo_requests.contains(VBOType::EditData) ||
vbo_requests.contains(VBOType::VertexNormal) ||
vbo_requests.contains(VBOType::IndexVert) ||
vbo_requests.contains(VBOType::IndexEdge) ||
vbo_requests.contains(VBOType::EdgeFactor);
if (calc_loose_geom) {
mesh_render_data_update_loose_geom(mr, update_task_data->cache);
mesh_render_data_update_loose_geom(mr, cache);
}
}
@@ -78,660 +72,221 @@ static void mesh_extract_render_data_node_exec(void *__restrict task_data)
/** \name Extract Loop
* \{ */
static bool any_attr_requested(const MeshBufferList &buffers)
{
for (const int i : IndexRange(ARRAY_SIZE(buffers.vbo.attr))) {
if (DRW_vbo_requested(buffers.vbo.attr[i])) {
return true;
}
}
return false;
}
void mesh_buffer_cache_create_requested(TaskGraph &task_graph,
void mesh_buffer_cache_create_requested(TaskGraph & /*task_graph*/,
const Scene &scene,
MeshBatchCache &cache,
MeshBufferCache &mbc,
const Span<IBOType> ibo_requests,
const Span<VBOType> vbo_requests,
Object &object,
Mesh &mesh,
const bool is_editmode,
const bool is_paint_mode,
const float4x4 &object_to_world,
const bool do_final,
const bool do_uvedit,
const Scene &scene,
const ToolSettings *ts,
const bool use_hide)
{
/* For each mesh where batches needs to be updated a sub-graph will be added to the task_graph.
* This sub-graph starts with an extract_render_data_node. This fills/converts the required
* data from Mesh.
*
* Small extractions and extractions that can't be multi-threaded are grouped in a single
* `extract_single_threaded_task_node`.
*
* Other extractions will create a node for each loop exceeding 8192 items. these nodes are
* linked to the `user_data_init_task_node`. the `user_data_init_task_node` prepares the
* user_data needed for the extraction based on the data extracted from the mesh.
* counters are used to check if the finalize of a task has to be called.
*
* Mesh extraction sub graph
*
* +----------------------+
* +-----> | extract_task1_loop_1 |
* | +----------------------+
* +------------------+ +----------------------+ +----------------------+
* | mesh_render_data | --> | | --> | extract_task1_loop_2 |
* +------------------+ | | +----------------------+
* | | | +----------------------+
* | | user_data_init | --> | extract_task2_loop_1 |
* v | | +----------------------+
* +------------------+ | | +----------------------+
* | single_threaded | | | --> | extract_task2_loop_2 |
* +------------------+ +----------------------+ +----------------------+
* | +----------------------+
* +-----> | extract_task2_loop_3 |
* +----------------------+
*/
const bool do_hq_normals = (scene.r.perf_flag & SCE_PERF_HQ_NORMALS) != 0 ||
GPU_use_hq_normals_workaround();
MeshBufferList &buffers = mbc.buff;
const bool attrs_requested = any_attr_requested(buffers);
if (!DRW_ibo_requested(buffers.ibo.lines) && !DRW_ibo_requested(buffers.ibo.lines_loose) &&
!DRW_ibo_requested(buffers.ibo.tris) && !DRW_ibo_requested(buffers.ibo.points) &&
!DRW_ibo_requested(buffers.ibo.fdots) && !DRW_vbo_requested(buffers.vbo.pos) &&
!DRW_vbo_requested(buffers.vbo.fdots_pos) && !DRW_vbo_requested(buffers.vbo.nor) &&
!DRW_vbo_requested(buffers.vbo.vnor) && !DRW_vbo_requested(buffers.vbo.fdots_nor) &&
!DRW_vbo_requested(buffers.vbo.edge_fac) && !DRW_vbo_requested(buffers.vbo.tan) &&
!DRW_vbo_requested(buffers.vbo.edit_data) && !DRW_vbo_requested(buffers.vbo.face_idx) &&
!DRW_vbo_requested(buffers.vbo.edge_idx) && !DRW_vbo_requested(buffers.vbo.vert_idx) &&
!DRW_vbo_requested(buffers.vbo.fdot_idx) && !DRW_vbo_requested(buffers.vbo.weights) &&
!DRW_vbo_requested(buffers.vbo.fdots_uv) &&
!DRW_vbo_requested(buffers.vbo.fdots_edituv_data) && !DRW_vbo_requested(buffers.vbo.uv) &&
!DRW_vbo_requested(buffers.vbo.edituv_stretch_area) &&
!DRW_vbo_requested(buffers.vbo.edituv_stretch_angle) &&
!DRW_vbo_requested(buffers.vbo.edituv_data) && !DRW_ibo_requested(buffers.ibo.edituv_tris) &&
!DRW_ibo_requested(buffers.ibo.edituv_lines) &&
!DRW_ibo_requested(buffers.ibo.edituv_points) &&
!DRW_ibo_requested(buffers.ibo.edituv_fdots) &&
!DRW_ibo_requested(buffers.ibo.lines_paint_mask) &&
!DRW_ibo_requested(buffers.ibo.lines_adjacency) &&
!DRW_vbo_requested(buffers.vbo.skin_roots) && !DRW_vbo_requested(buffers.vbo.sculpt_data) &&
!DRW_vbo_requested(buffers.vbo.orco) && !DRW_vbo_requested(buffers.vbo.mesh_analysis) &&
!DRW_vbo_requested(buffers.vbo.attr_viewer) && !attrs_requested)
{
if (ibo_requests.is_empty() && vbo_requests.is_empty()) {
return;
}
#ifdef DEBUG_TIME
double rdata_start = BLI_time_now_seconds();
SCOPED_TIMER(__func__);
#endif
std::unique_ptr<MeshRenderData> mr_ptr = mesh_render_data_create(object,
mesh,
is_editmode,
is_paint_mode,
object_to_world,
do_final,
do_uvedit,
use_hide,
ts);
MeshRenderData *mr = mr_ptr.get();
mr->use_subsurf_fdots = mr->mesh && !mr->mesh->runtime->subsurf_face_dot_tags.is_empty();
mr->use_final_mesh = do_final;
mr->use_simplify_normals = (scene.r.mode & R_SIMPLIFY) && (scene.r.mode & R_SIMPLIFY_NORMALS);
MeshBufferList &buffers = mbc.buff;
#ifdef DEBUG_TIME
double rdata_end = BLI_time_now_seconds();
#endif
MeshRenderData mr = mesh_render_data_create(object,
mesh,
is_editmode,
is_paint_mode,
object.object_to_world(),
do_final,
do_uvedit,
use_hide,
scene.toolsettings);
TaskNode *task_node_mesh_render_data = BLI_task_graph_node_create(
&task_graph,
mesh_extract_render_data_node_exec,
new MeshRenderDataUpdateTaskData{std::move(mr_ptr), mbc},
[](void *task_data) { delete static_cast<MeshRenderDataUpdateTaskData *>(task_data); });
ensure_dependency_data(mr, ibo_requests, vbo_requests, mbc);
if (DRW_vbo_requested(buffers.vbo.pos)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferCache &mbc;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_positions(data.mr, *data.mbc.buff.vbo.pos);
},
new TaskData{*mr, mbc},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
mr.use_subsurf_fdots = mr.mesh && !mr.mesh->runtime->subsurf_face_dot_tags.is_empty();
mr.use_simplify_normals = (scene.r.mode & R_SIMPLIFY) && (scene.r.mode & R_SIMPLIFY_NORMALS);
bool lines = false;
bool attrs = false;
Map<IBOType, gpu::IndexBuf *> ibos_to_create;
Map<VBOType, gpu::VertBuf *> vbos_to_create;
for (const IBOType request : ibo_requests) {
buffers.ibos.lookup_or_add_cb(request, [&]() {
lines |= ELEM(request, IBOType::Lines, IBOType::LinesLoose);
gpu::IndexBuf *ibo = GPU_indexbuf_calloc();
ibos_to_create.add_new(request, ibo);
return std::unique_ptr<gpu::IndexBuf, gpu::IndexBufDeleter>(ibo);
});
}
if (DRW_vbo_requested(buffers.vbo.fdots_pos)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferCache &mbc;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_face_dots_position(data.mr, *data.mbc.buff.vbo.fdots_pos);
},
new TaskData{*mr, mbc},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_vbo_requested(buffers.vbo.nor)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferCache &mbc;
bool do_hq_normals;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_normals(data.mr, data.do_hq_normals, *data.mbc.buff.vbo.nor);
},
new TaskData{*mr, mbc, do_hq_normals},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_vbo_requested(buffers.vbo.vnor)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_vert_normals(data.mr, *data.buffers.vbo.vnor);
},
new TaskData{*mr, buffers},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_vbo_requested(buffers.vbo.fdots_nor)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferCache &mbc;
bool do_hq_normals;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_face_dot_normals(data.mr, data.do_hq_normals, *data.mbc.buff.vbo.fdots_nor);
},
new TaskData{*mr, mbc, do_hq_normals},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_vbo_requested(buffers.vbo.edge_fac)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferCache &mbc;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_edge_factor(data.mr, *data.mbc.buff.vbo.edge_fac);
},
new TaskData{*mr, mbc},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_ibo_requested(buffers.ibo.tris)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferCache &mbc;
MeshBatchCache &cache;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
const SortedFaceData &face_sorted = mesh_render_data_faces_sorted_ensure(data.mr,
data.mbc);
extract_tris(data.mr, face_sorted, data.cache, *data.mbc.buff.ibo.tris);
},
new TaskData{*mr, mbc, cache},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_ibo_requested(buffers.ibo.lines) || DRW_ibo_requested(buffers.ibo.lines_loose)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
MeshBatchCache &cache;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_lines(data.mr,
data.buffers.ibo.lines,
data.buffers.ibo.lines_loose,
data.cache.no_loose_wire);
},
new TaskData{*mr, buffers, cache},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_ibo_requested(buffers.ibo.points)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_points(data.mr, *data.buffers.ibo.points);
},
new TaskData{*mr, buffers},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_ibo_requested(buffers.ibo.fdots)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_face_dots(data.mr, *data.buffers.ibo.fdots);
},
new TaskData{*mr, buffers},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_vbo_requested(buffers.vbo.edit_data)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_edit_data(data.mr, *data.buffers.vbo.edit_data);
},
new TaskData{*mr, buffers},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_vbo_requested(buffers.vbo.tan)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
MeshBatchCache &cache;
bool do_hq_normals;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_tangents(data.mr, data.cache, data.do_hq_normals, *data.buffers.vbo.tan);
},
new TaskData{*mr, buffers, cache, do_hq_normals},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_vbo_requested(buffers.vbo.face_idx) || DRW_vbo_requested(buffers.vbo.edge_idx) ||
DRW_vbo_requested(buffers.vbo.vert_idx) || DRW_vbo_requested(buffers.vbo.fdot_idx))
{
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
if (DRW_vbo_requested(data.buffers.vbo.vert_idx)) {
extract_vert_index(data.mr, *data.buffers.vbo.vert_idx);
}
if (DRW_vbo_requested(data.buffers.vbo.edge_idx)) {
extract_edge_index(data.mr, *data.buffers.vbo.edge_idx);
}
if (DRW_vbo_requested(data.buffers.vbo.face_idx)) {
extract_face_index(data.mr, *data.buffers.vbo.face_idx);
}
if (DRW_vbo_requested(data.buffers.vbo.fdot_idx)) {
extract_face_dot_index(data.mr, *data.buffers.vbo.fdot_idx);
}
},
new TaskData{*mr, buffers},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_vbo_requested(buffers.vbo.weights)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
MeshBatchCache &cache;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_weights(data.mr, data.cache, *data.buffers.vbo.weights);
},
new TaskData{*mr, buffers, cache},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_vbo_requested(buffers.vbo.fdots_uv)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_face_dots_uv(data.mr, *data.buffers.vbo.fdots_uv);
},
new TaskData{*mr, buffers},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_vbo_requested(buffers.vbo.fdots_edituv_data)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_face_dots_edituv_data(data.mr, *data.buffers.vbo.fdots_edituv_data);
},
new TaskData{*mr, buffers},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_vbo_requested(buffers.vbo.uv)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
MeshBatchCache &cache;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_uv_maps(data.mr, data.cache, *data.buffers.vbo.uv);
},
new TaskData{*mr, buffers, cache},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_vbo_requested(buffers.vbo.edituv_stretch_area)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
MeshBatchCache &cache;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_edituv_stretch_area(data.mr,
*data.buffers.vbo.edituv_stretch_area,
data.cache.tot_area,
data.cache.tot_uv_area);
},
new TaskData{*mr, buffers, cache},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_vbo_requested(buffers.vbo.edituv_stretch_angle)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_edituv_stretch_angle(data.mr, *data.buffers.vbo.edituv_stretch_angle);
},
new TaskData{*mr, buffers},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_vbo_requested(buffers.vbo.edituv_data)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_edituv_data(data.mr, *data.buffers.vbo.edituv_data);
},
new TaskData{*mr, buffers},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_ibo_requested(buffers.ibo.edituv_tris)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_edituv_tris(data.mr, *data.buffers.ibo.edituv_tris);
},
new TaskData{*mr, buffers},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_ibo_requested(buffers.ibo.edituv_lines)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_edituv_lines(data.mr, *data.buffers.ibo.edituv_lines);
},
new TaskData{*mr, buffers},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_ibo_requested(buffers.ibo.edituv_points)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_edituv_points(data.mr, *data.buffers.ibo.edituv_points);
},
new TaskData{*mr, buffers},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_ibo_requested(buffers.ibo.edituv_fdots)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_edituv_face_dots(data.mr, *data.buffers.ibo.edituv_fdots);
},
new TaskData{*mr, buffers},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_ibo_requested(buffers.ibo.lines_paint_mask)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_lines_paint_mask(data.mr, *data.buffers.ibo.lines_paint_mask);
},
new TaskData{*mr, buffers},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_ibo_requested(buffers.ibo.lines_adjacency)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
MeshBatchCache &cache;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_lines_adjacency(
data.mr, *data.buffers.ibo.lines_adjacency, data.cache.is_manifold);
},
new TaskData{*mr, buffers, cache},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_vbo_requested(buffers.vbo.skin_roots)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_skin_roots(data.mr, *data.buffers.vbo.skin_roots);
},
new TaskData{*mr, buffers},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_vbo_requested(buffers.vbo.sculpt_data)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_sculpt_data(data.mr, *data.buffers.vbo.sculpt_data);
},
new TaskData{*mr, buffers},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_vbo_requested(buffers.vbo.orco)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_orco(data.mr, *data.buffers.vbo.orco);
},
new TaskData{*mr, buffers},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_vbo_requested(buffers.vbo.mesh_analysis)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_mesh_analysis(data.mr, *data.buffers.vbo.mesh_analysis);
},
new TaskData{*mr, buffers},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (attrs_requested) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
MeshBatchCache &cache;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_attributes(data.mr,
{data.cache.attr_used.requests, GPU_MAX_ATTR},
{data.buffers.vbo.attr, GPU_MAX_ATTR});
},
new TaskData{*mr, buffers, cache},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
}
if (DRW_vbo_requested(buffers.vbo.attr_viewer)) {
struct TaskData {
MeshRenderData &mr;
MeshBufferList &buffers;
};
TaskNode *task_node = BLI_task_graph_node_create(
&task_graph,
[](void *__restrict task_data) {
const TaskData &data = *static_cast<TaskData *>(task_data);
extract_attr_viewer(data.mr, *data.buffers.vbo.attr_viewer);
},
new TaskData{*mr, buffers},
[](void *task_data) { delete static_cast<TaskData *>(task_data); });
BLI_task_graph_edge_create(task_node_mesh_render_data, task_node);
for (const VBOType request : vbo_requests) {
buffers.vbos.lookup_or_add_cb(request, [&]() {
attrs |= int8_t(request) >= int8_t(VBOType::Attr0) &&
int8_t(request) <= int8_t(VBOType::Attr15);
gpu::VertBuf *vbo = GPU_vertbuf_calloc();
vbos_to_create.add_new(request, vbo);
return std::unique_ptr<gpu::VertBuf, gpu::VertBufDeleter>(vbo);
});
}
/* Trigger the sub-graph for this mesh. */
BLI_task_graph_node_push_work(task_node_mesh_render_data);
#ifdef DEBUG_TIME
BLI_task_graph_work_and_wait(task_graph);
double end = BLI_time_now_seconds();
static double avg = 0;
static double avg_fps = 0;
static double avg_rdata = 0;
static double end_prev = 0;
if (end_prev == 0) {
end_prev = end;
/* Because lines and loose lines are stored in the same buffer, they're handled separately rather
* than from potentially multiple threads in the parallel_for_each loop below. */
if (lines) {
extract_lines(mr,
ibos_to_create.lookup_default(IBOType::Lines, nullptr),
ibos_to_create.lookup_default(IBOType::LinesLoose, nullptr),
cache.no_loose_wire);
}
avg = avg * 0.95 + (end - rdata_end) * 0.05;
avg_fps = avg_fps * 0.95 + (end - end_prev) * 0.05;
avg_rdata = avg_rdata * 0.95 + (rdata_end - rdata_start) * 0.05;
threading::parallel_for_each(ibos_to_create.items(), [&](const auto item) {
switch (item.key) {
case IBOType::Tris:
extract_tris(mr, mesh_render_data_faces_sorted_ensure(mr, mbc), *item.value);
break;
case IBOType::Lines:
case IBOType::LinesLoose:
/* Handled as a special case above. */
break;
case IBOType::Points:
extract_points(mr, *item.value);
break;
case IBOType::FaceDots:
extract_face_dots(mr, *item.value);
break;
case IBOType::LinesPaintMask:
extract_lines_paint_mask(mr, *item.value);
break;
case IBOType::LinesAdjacency:
extract_lines_adjacency(mr, *item.value, cache.is_manifold);
break;
case IBOType::EditUVTris:
extract_edituv_tris(mr, *item.value);
break;
case IBOType::EditUVLines:
extract_edituv_lines(mr, *item.value);
break;
case IBOType::EditUVPoints:
extract_edituv_points(mr, *item.value);
break;
case IBOType::EditUVFaceDots:
extract_edituv_face_dots(mr, *item.value);
break;
}
});
printf(
"rdata %.0fms iter %.0fms (frame %.0fms)\n", avg_rdata * 1000, avg * 1000, avg_fps * 1000);
/* It's simpler to handle all the generic attribute requests in the same place too. This is
* multithreaded and just memory bound anyway. Running them in parallel with other buffer
* creation tasks is probably not useful. */
if (attrs) {
for (const int8_t i : IndexRange(GPU_MAX_ATTR)) {
const VBOType vbo_type = VBOType(int8_t(VBOType::Attr0) + i);
if (gpu::VertBuf *vbo = vbos_to_create.lookup_default(vbo_type, nullptr)) {
extract_attribute(mr, cache.attr_used.requests[i], *vbo);
}
}
}
end_prev = end;
#endif
const bool do_hq_normals = (scene.r.perf_flag & SCE_PERF_HQ_NORMALS) != 0 ||
GPU_use_hq_normals_workaround();
threading::parallel_for_each(vbos_to_create.items(), [&](const auto item) {
switch (item.key) {
case VBOType::Position:
extract_positions(mr, *item.value);
break;
case VBOType::CornerNormal:
extract_normals(mr, do_hq_normals, *item.value);
break;
case VBOType::EdgeFactor:
extract_edge_factor(mr, *item.value);
break;
case VBOType::VertexGroupWeight:
extract_weights(mr, cache, *item.value);
break;
case VBOType::UVs:
extract_uv_maps(mr, cache, *item.value);
break;
case VBOType::Tangents:
extract_tangents(mr, cache, do_hq_normals, *item.value);
break;
case VBOType::SculptData:
extract_sculpt_data(mr, *item.value);
break;
case VBOType::Orco:
extract_orco(mr, *item.value);
break;
case VBOType::EditData:
extract_edit_data(mr, *item.value);
break;
case VBOType::EditUVData:
extract_edituv_data(mr, *item.value);
break;
case VBOType::EditUVStretchArea:
extract_edituv_stretch_area(mr, *item.value, cache.tot_area, cache.tot_uv_area);
break;
case VBOType::EditUVStretchAngle:
extract_edituv_stretch_angle(mr, *item.value);
break;
case VBOType::MeshAnalysis:
extract_mesh_analysis(mr, *item.value);
break;
case VBOType::FaceDotPosition:
extract_face_dots_position(mr, *item.value);
break;
case VBOType::FaceDotNormal:
extract_face_dot_normals(mr, do_hq_normals, *item.value);
break;
case VBOType::FaceDotUV:
extract_face_dots_uv(mr, *item.value);
break;
case VBOType::FaceDotEditUVData:
extract_face_dots_edituv_data(mr, *item.value);
break;
case VBOType::SkinRoots:
extract_skin_roots(mr, *item.value);
break;
case VBOType::IndexVert:
extract_vert_index(mr, *item.value);
break;
case VBOType::IndexEdge:
extract_edge_index(mr, *item.value);
break;
case VBOType::IndexFace:
extract_face_index(mr, *item.value);
break;
case VBOType::IndexFaceDot:
extract_face_dot_index(mr, *item.value);
break;
case VBOType::Attr0:
case VBOType::Attr1:
case VBOType::Attr2:
case VBOType::Attr3:
case VBOType::Attr5:
case VBOType::Attr6:
case VBOType::Attr7:
case VBOType::Attr8:
case VBOType::Attr9:
case VBOType::Attr10:
case VBOType::Attr11:
case VBOType::Attr12:
case VBOType::Attr13:
case VBOType::Attr14:
case VBOType::Attr15:
/* Handled as a special case above. */
break;
case VBOType::AttrViewer:
extract_attr_viewer(mr, *item.value);
break;
case VBOType::VertexNormal:
extract_vert_normals(mr, *item.value);
break;
}
});
}
/** \} */
@@ -742,119 +297,135 @@ void mesh_buffer_cache_create_requested(TaskGraph &task_graph,
void mesh_buffer_cache_create_requested_subdiv(MeshBatchCache &cache,
MeshBufferCache &mbc,
const Span<IBOType> ibo_requests,
const Span<VBOType> vbo_requests,
DRWSubdivCache &subdiv_cache,
MeshRenderData &mr)
{
MeshBufferList &buffers = mbc.buff;
const bool attrs_requested = any_attr_requested(buffers);
if (!DRW_ibo_requested(buffers.ibo.lines) && !DRW_ibo_requested(buffers.ibo.lines_loose) &&
!DRW_ibo_requested(buffers.ibo.tris) && !DRW_ibo_requested(buffers.ibo.points) &&
!DRW_vbo_requested(buffers.vbo.pos) && !DRW_vbo_requested(buffers.vbo.orco) &&
!DRW_vbo_requested(buffers.vbo.nor) && !DRW_vbo_requested(buffers.vbo.edge_fac) &&
!DRW_vbo_requested(buffers.vbo.tan) && !DRW_vbo_requested(buffers.vbo.edit_data) &&
!DRW_vbo_requested(buffers.vbo.face_idx) && !DRW_vbo_requested(buffers.vbo.edge_idx) &&
!DRW_vbo_requested(buffers.vbo.vert_idx) && !DRW_vbo_requested(buffers.vbo.weights) &&
!DRW_vbo_requested(buffers.vbo.fdots_nor) && !DRW_vbo_requested(buffers.vbo.fdots_pos) &&
!DRW_ibo_requested(buffers.ibo.fdots) && !DRW_vbo_requested(buffers.vbo.uv) &&
!DRW_vbo_requested(buffers.vbo.edituv_stretch_area) &&
!DRW_vbo_requested(buffers.vbo.edituv_stretch_angle) &&
!DRW_vbo_requested(buffers.vbo.edituv_data) && !DRW_ibo_requested(buffers.ibo.edituv_tris) &&
!DRW_ibo_requested(buffers.ibo.edituv_lines) &&
!DRW_ibo_requested(buffers.ibo.edituv_points) &&
!DRW_ibo_requested(buffers.ibo.lines_paint_mask) &&
!DRW_ibo_requested(buffers.ibo.lines_adjacency) &&
!DRW_vbo_requested(buffers.vbo.sculpt_data) && !attrs_requested)
{
if (ibo_requests.is_empty() && vbo_requests.is_empty()) {
return;
}
MeshBufferList &buffers = mbc.buff;
mesh_render_data_update_corner_normals(mr);
mesh_render_data_update_loose_geom(mr, mbc);
DRW_subdivide_loose_geom(subdiv_cache, mbc);
if (DRW_vbo_requested(buffers.vbo.pos) || DRW_vbo_requested(buffers.vbo.orco)) {
extract_positions_subdiv(subdiv_cache, mr, *buffers.vbo.pos, buffers.vbo.orco);
bool lines = false;
bool attrs = false;
Map<IBOType, gpu::IndexBuf *> ibos_to_create;
Map<VBOType, gpu::VertBuf *> vbos_to_create;
for (const IBOType request : ibo_requests) {
buffers.ibos.lookup_or_add_cb(request, [&]() {
lines |= ELEM(request, IBOType::Lines, IBOType::LinesLoose);
gpu::IndexBuf *ibo = GPU_indexbuf_calloc();
ibos_to_create.add_new(request, ibo);
return std::unique_ptr<gpu::IndexBuf, gpu::IndexBufDeleter>(ibo);
});
}
if (DRW_vbo_requested(buffers.vbo.nor)) {
for (const VBOType request : vbo_requests) {
buffers.vbos.lookup_or_add_cb(request, [&]() {
attrs |= int8_t(request) >= int8_t(VBOType::Attr0) &&
int8_t(request) <= int8_t(VBOType::Attr15);
gpu::VertBuf *vbo = GPU_vertbuf_calloc();
vbos_to_create.add_new(request, vbo);
return std::unique_ptr<gpu::VertBuf, gpu::VertBufDeleter>(vbo);
});
}
if (vbos_to_create.contains(VBOType::Position) || vbos_to_create.contains(VBOType::Orco)) {
extract_positions_subdiv(subdiv_cache,
mr,
*vbos_to_create.lookup(VBOType::Position),
vbos_to_create.lookup_default(VBOType::Orco, nullptr));
}
if (gpu::VertBuf *vbo = vbos_to_create.lookup_default(VBOType::CornerNormal, nullptr)) {
/* The corner normals calculation uses positions and normals stored in the `pos` VBO. */
extract_normals_subdiv(mr, subdiv_cache, *buffers.vbo.pos, *buffers.vbo.nor);
extract_normals_subdiv(mr, subdiv_cache, *buffers.vbos.lookup(VBOType::Position), *vbo);
}
if (DRW_vbo_requested(buffers.vbo.edge_fac)) {
extract_edge_factor_subdiv(subdiv_cache, mr, *buffers.vbo.pos, *buffers.vbo.edge_fac);
if (gpu::VertBuf *vbo = vbos_to_create.lookup_default(VBOType::EdgeFactor, nullptr)) {
extract_edge_factor_subdiv(subdiv_cache, mr, *buffers.vbos.lookup(VBOType::Position), *vbo);
}
if (DRW_ibo_requested(buffers.ibo.lines) || DRW_ibo_requested(buffers.ibo.lines_loose)) {
extract_lines_subdiv(
subdiv_cache, mr, buffers.ibo.lines, buffers.ibo.lines_loose, cache.no_loose_wire);
if (ibos_to_create.contains(IBOType::Lines) || ibos_to_create.contains(IBOType::LinesLoose)) {
extract_lines_subdiv(subdiv_cache,
mr,
ibos_to_create.lookup_default(IBOType::Lines, nullptr),
ibos_to_create.lookup_default(IBOType::LinesLoose, nullptr),
cache.no_loose_wire);
}
if (DRW_ibo_requested(buffers.ibo.tris)) {
extract_tris_subdiv(subdiv_cache, cache, *buffers.ibo.tris);
if (gpu::IndexBuf *ibo = ibos_to_create.lookup_default(IBOType::Tris, nullptr)) {
extract_tris_subdiv(subdiv_cache, cache, *ibo);
}
if (DRW_ibo_requested(buffers.ibo.points)) {
extract_points_subdiv(mr, subdiv_cache, *buffers.ibo.points);
if (gpu::IndexBuf *ibo = ibos_to_create.lookup_default(IBOType::Points, nullptr)) {
extract_points_subdiv(mr, subdiv_cache, *ibo);
}
if (DRW_vbo_requested(buffers.vbo.edit_data)) {
extract_edit_data_subdiv(mr, subdiv_cache, *buffers.vbo.edit_data);
if (gpu::VertBuf *vbo = vbos_to_create.lookup_default(VBOType::EditData, nullptr)) {
extract_edit_data_subdiv(mr, subdiv_cache, *vbo);
}
if (DRW_vbo_requested(buffers.vbo.tan)) {
extract_tangents_subdiv(mr, subdiv_cache, cache, *buffers.vbo.tan);
if (gpu::VertBuf *vbo = vbos_to_create.lookup_default(VBOType::Tangents, nullptr)) {
extract_tangents_subdiv(mr, subdiv_cache, cache, *vbo);
}
if (DRW_vbo_requested(buffers.vbo.vert_idx)) {
extract_vert_index_subdiv(subdiv_cache, mr, *buffers.vbo.vert_idx);
if (gpu::VertBuf *vbo = vbos_to_create.lookup_default(VBOType::IndexVert, nullptr)) {
extract_vert_index_subdiv(subdiv_cache, mr, *vbo);
}
if (DRW_vbo_requested(buffers.vbo.edge_idx)) {
extract_edge_index_subdiv(subdiv_cache, mr, *buffers.vbo.edge_idx);
if (gpu::VertBuf *vbo = vbos_to_create.lookup_default(VBOType::IndexEdge, nullptr)) {
extract_edge_index_subdiv(subdiv_cache, mr, *vbo);
}
if (DRW_vbo_requested(buffers.vbo.face_idx)) {
extract_face_index_subdiv(subdiv_cache, mr, *buffers.vbo.face_idx);
if (gpu::VertBuf *vbo = vbos_to_create.lookup_default(VBOType::IndexFace, nullptr)) {
extract_face_index_subdiv(subdiv_cache, mr, *vbo);
}
if (DRW_vbo_requested(buffers.vbo.weights)) {
extract_weights_subdiv(mr, subdiv_cache, cache, *buffers.vbo.weights);
if (gpu::VertBuf *vbo = vbos_to_create.lookup_default(VBOType::VertexGroupWeight, nullptr)) {
extract_weights_subdiv(mr, subdiv_cache, cache, *vbo);
}
if (DRW_vbo_requested(buffers.vbo.fdots_nor) || DRW_vbo_requested(buffers.vbo.fdots_pos) ||
DRW_ibo_requested(buffers.ibo.fdots))
if (vbos_to_create.contains(VBOType::FaceDotNormal) ||
vbos_to_create.contains(VBOType::FaceDotPosition) ||
ibos_to_create.contains(IBOType::FaceDots))
{
/* We use only one extractor for face dots, as the work is done in a single compute shader. */
extract_face_dots_subdiv(
subdiv_cache, *buffers.vbo.fdots_pos, buffers.vbo.fdots_nor, *buffers.ibo.fdots);
extract_face_dots_subdiv(subdiv_cache,
*vbos_to_create.lookup_default(VBOType::FaceDotPosition, nullptr),
vbos_to_create.lookup_default(VBOType::FaceDotNormal, nullptr),
*ibos_to_create.lookup_default(IBOType::FaceDots, nullptr));
}
if (DRW_ibo_requested(buffers.ibo.lines_paint_mask)) {
extract_lines_paint_mask_subdiv(mr, subdiv_cache, *buffers.ibo.lines_paint_mask);
if (gpu::IndexBuf *ibo = ibos_to_create.lookup_default(IBOType::LinesPaintMask, nullptr)) {
extract_lines_paint_mask_subdiv(mr, subdiv_cache, *ibo);
}
if (DRW_ibo_requested(buffers.ibo.lines_adjacency)) {
extract_lines_adjacency_subdiv(subdiv_cache, *buffers.ibo.lines_adjacency, cache.is_manifold);
if (gpu::IndexBuf *ibo = ibos_to_create.lookup_default(IBOType::LinesAdjacency, nullptr)) {
extract_lines_adjacency_subdiv(subdiv_cache, *ibo, cache.is_manifold);
}
if (DRW_vbo_requested(buffers.vbo.sculpt_data)) {
extract_sculpt_data_subdiv(mr, subdiv_cache, *buffers.vbo.sculpt_data);
if (gpu::VertBuf *vbo = vbos_to_create.lookup_default(VBOType::SculptData, nullptr)) {
extract_sculpt_data_subdiv(mr, subdiv_cache, *vbo);
}
if (DRW_vbo_requested(buffers.vbo.uv)) {
if (gpu::VertBuf *vbo = vbos_to_create.lookup_default(VBOType::UVs, nullptr)) {
/* Make sure UVs are computed before edituv stuffs. */
extract_uv_maps_subdiv(subdiv_cache, cache, *buffers.vbo.uv);
extract_uv_maps_subdiv(subdiv_cache, cache, *vbo);
}
if (DRW_vbo_requested(buffers.vbo.edituv_stretch_area)) {
extract_edituv_stretch_area_subdiv(
mr, subdiv_cache, *buffers.vbo.edituv_stretch_area, cache.tot_area, cache.tot_uv_area);
if (gpu::VertBuf *vbo = vbos_to_create.lookup_default(VBOType::EditUVStretchArea, nullptr)) {
extract_edituv_stretch_area_subdiv(mr, subdiv_cache, *vbo, cache.tot_area, cache.tot_uv_area);
}
if (DRW_vbo_requested(buffers.vbo.edituv_stretch_area)) {
extract_edituv_stretch_angle_subdiv(
mr, subdiv_cache, cache, *buffers.vbo.edituv_stretch_angle);
if (gpu::VertBuf *vbo = vbos_to_create.lookup_default(VBOType::EditUVStretchAngle, nullptr)) {
extract_edituv_stretch_angle_subdiv(mr, subdiv_cache, cache, *vbo);
}
if (DRW_vbo_requested(buffers.vbo.edituv_data)) {
extract_edituv_data_subdiv(mr, subdiv_cache, *buffers.vbo.edituv_data);
if (gpu::VertBuf *vbo = vbos_to_create.lookup_default(VBOType::EditUVData, nullptr)) {
extract_edituv_data_subdiv(mr, subdiv_cache, *vbo);
}
if (DRW_ibo_requested(buffers.ibo.edituv_tris)) {
extract_edituv_tris_subdiv(mr, subdiv_cache, *buffers.ibo.edituv_tris);
if (gpu::IndexBuf *ibo = ibos_to_create.lookup_default(IBOType::EditUVTris, nullptr)) {
extract_edituv_tris_subdiv(mr, subdiv_cache, *ibo);
}
if (DRW_ibo_requested(buffers.ibo.edituv_lines)) {
extract_edituv_lines_subdiv(mr, subdiv_cache, *buffers.ibo.edituv_lines);
if (gpu::IndexBuf *ibo = ibos_to_create.lookup_default(IBOType::EditUVLines, nullptr)) {
extract_edituv_lines_subdiv(mr, subdiv_cache, *ibo);
}
if (DRW_ibo_requested(buffers.ibo.edituv_points)) {
extract_edituv_points_subdiv(mr, subdiv_cache, *buffers.ibo.edituv_points);
if (gpu::IndexBuf *ibo = ibos_to_create.lookup_default(IBOType::EditUVPoints, nullptr)) {
extract_edituv_points_subdiv(mr, subdiv_cache, *ibo);
}
if (attrs_requested) {
extract_attributes_subdiv(mr,
subdiv_cache,
{cache.attr_used.requests, GPU_MAX_ATTR},
{buffers.vbo.attr, GPU_MAX_ATTR});
if (attrs) {
for (const int8_t i : IndexRange(GPU_MAX_ATTR)) {
const VBOType vbo_type = VBOType(int8_t(VBOType::Attr0) + i);
if (gpu::VertBuf *vbo = vbos_to_create.lookup_default(vbo_type, nullptr)) {
extract_attribute_subdiv(mr, subdiv_cache, cache.attr_used.requests[i], *vbo);
}
}
}
}

View File

@@ -522,18 +522,17 @@ static void retrieve_active_attribute_names(MeshRenderData &mr,
mr.default_color_name = mesh_final.default_color_attribute;
}
std::unique_ptr<MeshRenderData> mesh_render_data_create(Object &object,
Mesh &mesh,
const bool is_editmode,
const bool is_paint_mode,
const float4x4 &object_to_world,
const bool do_final,
const bool do_uvedit,
const bool use_hide,
const ToolSettings *ts)
MeshRenderData mesh_render_data_create(Object &object,
Mesh &mesh,
const bool is_editmode,
const bool is_paint_mode,
const float4x4 &object_to_world,
const bool do_final,
const bool do_uvedit,
const bool use_hide,
const ToolSettings *ts)
{
std::unique_ptr<MeshRenderData> mr_ptr = std::make_unique<MeshRenderData>();
MeshRenderData &mr = *mr_ptr;
MeshRenderData mr{};
mr.toolsettings = ts;
mr.materials_num = BKE_object_material_used_with_fallback_eval(object);
@@ -683,7 +682,7 @@ std::unique_ptr<MeshRenderData> mesh_render_data_create(Object &object,
retrieve_active_attribute_names(mr, object, *mr.mesh);
return mr_ptr;
return mr;
}
/** \} */

View File

@@ -274,8 +274,6 @@ blender::gpu::Batch *DRW_mesh_batch_cache_get_edit_mesh_analysis(Mesh &mesh);
/** \name For Direct Data Access
* \{ */
gpu::VertBuf *DRW_mesh_batch_cache_pos_vertbuf_get(Mesh &mesh);
/* Edit mesh bit-flags (is this the right place?). */
enum {
VFLAG_VERT_ACTIVE = 1 << 0,

View File

@@ -8,6 +8,7 @@
* \brief Mesh API for render engines
*/
#include <array>
#include <optional>
#include "MEM_guardedalloc.h"
@@ -16,7 +17,6 @@
#include "BLI_listbase.h"
#include "BLI_span.hh"
#include "BLI_string_ref.hh"
#include "BLI_task.h"
#include "DNA_mesh_types.h"
#include "DNA_object_types.h"
@@ -56,149 +56,71 @@ namespace blender::draw {
/** \name Dependencies between buffer and batch
* \{ */
/* clang-format off */
#define BUFFER_INDEX(buff_name) ((offsetof(MeshBufferList, buff_name) - offsetof(MeshBufferList, vbo)) / sizeof(void *))
#define BUFFER_LEN (sizeof(MeshBufferList) / sizeof(void *))
#define _BATCH_MAP1(a) batches_that_use_buffer(BUFFER_INDEX(a))
#define _BATCH_MAP2(a, b) _BATCH_MAP1(a) | _BATCH_MAP1(b)
#define _BATCH_MAP3(a, b, c) _BATCH_MAP2(a, b) | _BATCH_MAP1(c)
#define _BATCH_MAP4(a, b, c, d) _BATCH_MAP3(a, b, c) | _BATCH_MAP1(d)
#define _BATCH_MAP5(a, b, c, d, e) _BATCH_MAP4(a, b, c, d) | _BATCH_MAP1(e)
#define _BATCH_MAP6(a, b, c, d, e, f) _BATCH_MAP5(a, b, c, d, e) | _BATCH_MAP1(f)
#define _BATCH_MAP7(a, b, c, d, e, f, g) _BATCH_MAP6(a, b, c, d, e, f) | _BATCH_MAP1(g)
#define _BATCH_MAP8(a, b, c, d, e, f, g, h) _BATCH_MAP7(a, b, c, d, e, f, g) | _BATCH_MAP1(h)
#define _BATCH_MAP9(a, b, c, d, e, f, g, h, i) _BATCH_MAP8(a, b, c, d, e, f, g, h) | _BATCH_MAP1(i)
#define _BATCH_MAP10(a, b, c, d, e, f, g, h, i, j) _BATCH_MAP9(a, b, c, d, e, f, g, h, i) | _BATCH_MAP1(j)
#define BATCH_MAP(...) VA_NARGS_CALL_OVERLOAD(_BATCH_MAP, __VA_ARGS__)
/* clang-format on */
#define TRIS_PER_MAT_INDEX BUFFER_LEN
static constexpr DRWBatchFlag batches_that_use_buffer(const int buffer_index)
{
switch (buffer_index) {
case BUFFER_INDEX(vbo.pos):
return MBC_SURFACE | MBC_SURFACE_WEIGHTS | MBC_EDIT_TRIANGLES | MBC_EDIT_VERTICES |
MBC_EDIT_EDGES | MBC_EDIT_VNOR | MBC_EDIT_LNOR | MBC_EDIT_MESH_ANALYSIS |
MBC_EDIT_SELECTION_VERTS | MBC_EDIT_SELECTION_EDGES | MBC_EDIT_SELECTION_FACES |
MBC_ALL_VERTS | MBC_ALL_EDGES | MBC_LOOSE_EDGES | MBC_EDGE_DETECTION |
MBC_WIRE_EDGES | MBC_WIRE_LOOPS | MBC_SCULPT_OVERLAYS | MBC_VIEWER_ATTRIBUTE_OVERLAY |
MBC_SURFACE_PER_MAT;
case BUFFER_INDEX(vbo.nor):
return MBC_SURFACE | MBC_EDIT_LNOR | MBC_WIRE_EDGES | MBC_WIRE_LOOPS | MBC_SURFACE_PER_MAT |
MBC_ALL_VERTS;
case BUFFER_INDEX(vbo.edge_fac):
return MBC_WIRE_EDGES;
case BUFFER_INDEX(vbo.weights):
return MBC_SURFACE_WEIGHTS;
case BUFFER_INDEX(vbo.uv):
return MBC_SURFACE | MBC_EDITUV_FACES_STRETCH_AREA | MBC_EDITUV_FACES_STRETCH_ANGLE |
MBC_EDITUV_FACES | MBC_EDITUV_EDGES | MBC_EDITUV_VERTS | MBC_WIRE_LOOPS_UVS |
MBC_SURFACE_PER_MAT;
case BUFFER_INDEX(vbo.tan):
return MBC_SURFACE_PER_MAT;
case BUFFER_INDEX(vbo.sculpt_data):
return MBC_SCULPT_OVERLAYS;
case BUFFER_INDEX(vbo.orco):
return MBC_SURFACE_PER_MAT;
case BUFFER_INDEX(vbo.edit_data):
return MBC_EDIT_TRIANGLES | MBC_EDIT_EDGES | MBC_EDIT_VERTICES;
case BUFFER_INDEX(vbo.edituv_data):
return MBC_EDITUV_FACES | MBC_EDITUV_FACES_STRETCH_AREA | MBC_EDITUV_FACES_STRETCH_ANGLE |
MBC_EDITUV_EDGES | MBC_EDITUV_VERTS;
case BUFFER_INDEX(vbo.edituv_stretch_area):
return MBC_EDITUV_FACES_STRETCH_AREA;
case BUFFER_INDEX(vbo.edituv_stretch_angle):
return MBC_EDITUV_FACES_STRETCH_ANGLE;
case BUFFER_INDEX(vbo.mesh_analysis):
return MBC_EDIT_MESH_ANALYSIS;
case BUFFER_INDEX(vbo.fdots_pos):
return MBC_EDIT_FACEDOTS | MBC_EDIT_SELECTION_FACEDOTS;
case BUFFER_INDEX(vbo.fdots_nor):
return MBC_EDIT_FACEDOTS;
case BUFFER_INDEX(vbo.fdots_uv):
return MBC_EDITUV_FACEDOTS;
case BUFFER_INDEX(vbo.fdots_edituv_data):
return MBC_EDITUV_FACEDOTS;
case BUFFER_INDEX(vbo.skin_roots):
return MBC_SKIN_ROOTS;
case BUFFER_INDEX(vbo.vert_idx):
return MBC_EDIT_SELECTION_VERTS;
case BUFFER_INDEX(vbo.edge_idx):
return MBC_EDIT_SELECTION_EDGES;
case BUFFER_INDEX(vbo.face_idx):
return MBC_EDIT_SELECTION_FACES;
case BUFFER_INDEX(vbo.fdot_idx):
return MBC_EDIT_SELECTION_FACEDOTS;
case BUFFER_INDEX(vbo.attr[0]):
case BUFFER_INDEX(vbo.attr[1]):
case BUFFER_INDEX(vbo.attr[2]):
case BUFFER_INDEX(vbo.attr[3]):
case BUFFER_INDEX(vbo.attr[4]):
case BUFFER_INDEX(vbo.attr[5]):
case BUFFER_INDEX(vbo.attr[6]):
case BUFFER_INDEX(vbo.attr[7]):
case BUFFER_INDEX(vbo.attr[8]):
case BUFFER_INDEX(vbo.attr[9]):
case BUFFER_INDEX(vbo.attr[10]):
case BUFFER_INDEX(vbo.attr[11]):
case BUFFER_INDEX(vbo.attr[12]):
case BUFFER_INDEX(vbo.attr[13]):
case BUFFER_INDEX(vbo.attr[14]):
return MBC_SURFACE | MBC_SURFACE_PER_MAT;
case BUFFER_INDEX(vbo.attr_viewer):
return MBC_VIEWER_ATTRIBUTE_OVERLAY;
case BUFFER_INDEX(vbo.vnor):
return MBC_EDIT_VNOR;
case BUFFER_INDEX(ibo.tris):
return MBC_SURFACE | MBC_SURFACE_WEIGHTS | MBC_EDIT_TRIANGLES | MBC_EDIT_LNOR |
MBC_EDIT_MESH_ANALYSIS | MBC_EDIT_SELECTION_FACES | MBC_SCULPT_OVERLAYS |
MBC_VIEWER_ATTRIBUTE_OVERLAY;
case BUFFER_INDEX(ibo.lines):
return MBC_EDIT_EDGES | MBC_EDIT_SELECTION_EDGES | MBC_ALL_EDGES | MBC_WIRE_EDGES;
case BUFFER_INDEX(ibo.lines_loose):
return MBC_LOOSE_EDGES;
case BUFFER_INDEX(ibo.points):
return MBC_EDIT_VNOR | MBC_EDIT_VERTICES | MBC_EDIT_SELECTION_VERTS;
case BUFFER_INDEX(ibo.fdots):
return MBC_EDIT_FACEDOTS | MBC_EDIT_SELECTION_FACEDOTS;
case BUFFER_INDEX(ibo.lines_paint_mask):
return MBC_WIRE_LOOPS;
case BUFFER_INDEX(ibo.lines_adjacency):
return MBC_EDGE_DETECTION;
case BUFFER_INDEX(ibo.edituv_tris):
return MBC_EDITUV_FACES | MBC_EDITUV_FACES_STRETCH_AREA | MBC_EDITUV_FACES_STRETCH_ANGLE;
case BUFFER_INDEX(ibo.edituv_lines):
return MBC_EDITUV_EDGES | MBC_WIRE_LOOPS_UVS;
case BUFFER_INDEX(ibo.edituv_points):
return MBC_EDITUV_VERTS;
case BUFFER_INDEX(ibo.edituv_fdots):
return MBC_EDITUV_FACEDOTS;
case TRIS_PER_MAT_INDEX:
return MBC_SURFACE_PER_MAT;
}
return (DRWBatchFlag)0;
}
static void mesh_batch_cache_discard_surface_batches(MeshBatchCache &cache);
static void mesh_batch_cache_clear(MeshBatchCache &cache);
static void mesh_batch_cache_discard_batch(MeshBatchCache &cache, const DRWBatchFlag batch_map)
static void discard_buffers(MeshBatchCache &cache,
const Span<VBOType> vbos,
const Span<IBOType> ibos)
{
for (int i = 0; i < MBC_BATCH_LEN; i++) {
DRWBatchFlag batch_requested = (DRWBatchFlag)(1u << i);
if (batch_map & batch_requested) {
GPU_BATCH_DISCARD_SAFE(((gpu::Batch **)&cache.batch)[i]);
cache.batch_ready &= ~batch_requested;
Set<const void *, 16> buffer_ptrs;
buffer_ptrs.reserve(vbos.size() + ibos.size());
FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
for (const VBOType vbo : vbos) {
if (const auto *buffer = mbc->buff.vbos.lookup_ptr(vbo)) {
buffer_ptrs.add(buffer->get());
}
}
}
FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
for (const IBOType ibo : ibos) {
if (const auto *buffer = mbc->buff.ibos.lookup_ptr(ibo)) {
buffer_ptrs.add(buffer->get());
}
}
}
if (batch_map & MBC_SURFACE_PER_MAT) {
mesh_batch_cache_discard_surface_batches(cache);
const auto batch_contains_data = [&](gpu::Batch &batch) {
if (buffer_ptrs.contains(batch.elem)) {
return true;
}
if (std::any_of(batch.verts, batch.verts + ARRAY_SIZE(batch.verts), [&](gpu::VertBuf *vbo) {
return vbo && buffer_ptrs.contains(vbo);
}))
{
return true;
}
return false;
};
for (const int i : IndexRange(MBC_BATCH_LEN)) {
gpu::Batch *batch = ((gpu::Batch **)&cache.batch)[i];
if (batch && batch_contains_data(*batch)) {
GPU_BATCH_DISCARD_SAFE(((gpu::Batch **)&cache.batch)[i]);
cache.batch_ready &= ~DRWBatchFlag(1u << i);
}
}
if (!cache.surface_per_mat.is_empty()) {
if (cache.surface_per_mat.first() && batch_contains_data(*cache.surface_per_mat.first())) {
/* The format for all `surface_per_mat` batches is the same, discard them all. */
for (const int i : cache.surface_per_mat.index_range()) {
GPU_BATCH_DISCARD_SAFE(cache.surface_per_mat[i]);
}
cache.batch_ready &= ~(MBC_SURFACE | MBC_SURFACE_PER_MAT);
}
}
for (const VBOType vbo : vbos) {
cache.final.buff.vbos.remove(vbo);
cache.cage.buff.vbos.remove(vbo);
cache.uv_cage.buff.vbos.remove(vbo);
}
for (const IBOType ibo : ibos) {
cache.final.buff.ibos.remove(ibo);
cache.cage.buff.ibos.remove(ibo);
cache.uv_cage.buff.ibos.remove(ibo);
}
}
@@ -606,7 +528,7 @@ static void mesh_batch_cache_check_vertex_group(MeshBatchCache &cache,
{
if (!drw_mesh_weight_state_compare(&cache.weight_state, wstate)) {
FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.weights);
mbc->buff.vbos.remove(VBOType::VertexGroupWeight);
}
GPU_BATCH_CLEAR_SAFE(cache.batch.surface_weights);
@@ -618,68 +540,33 @@ static void mesh_batch_cache_check_vertex_group(MeshBatchCache &cache,
static void mesh_batch_cache_request_surface_batches(MeshBatchCache &cache)
{
mesh_batch_cache_add_request(cache, MBC_SURFACE);
mesh_batch_cache_add_request(cache, MBC_SURFACE | MBC_SURFACE_PER_MAT);
DRW_batch_request(&cache.batch.surface);
for (int i = 0; i < cache.mat_len; i++) {
DRW_batch_request(&cache.surface_per_mat[i]);
}
}
/* Free batches with material-mapped corner_tris.
* NOTE: The updating of the indices buffers (#tris_per_mat) is handled in the extractors.
* No need to discard they here. */
static void mesh_batch_cache_discard_surface_batches(MeshBatchCache &cache)
{
GPU_BATCH_DISCARD_SAFE(cache.batch.surface);
for (int i = 0; i < cache.mat_len; i++) {
GPU_BATCH_DISCARD_SAFE(cache.surface_per_mat[i]);
}
cache.batch_ready &= ~MBC_SURFACE;
}
static void mesh_batch_cache_discard_shaded_tri(MeshBatchCache &cache)
{
FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.uv);
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.tan);
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.orco);
}
DRWBatchFlag batch_map = BATCH_MAP(vbo.uv, vbo.tan, vbo.orco);
mesh_batch_cache_discard_batch(cache, batch_map);
mesh_cd_layers_type_clear(&cache.cd_used);
discard_buffers(cache, {VBOType::UVs, VBOType::Tangents, VBOType::Orco}, {});
}
static void mesh_batch_cache_discard_uvedit(MeshBatchCache &cache)
{
FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edituv_stretch_angle);
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edituv_stretch_area);
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.uv);
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edituv_data);
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_uv);
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_edituv_data);
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_tris);
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_lines);
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_points);
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_fdots);
}
DRWBatchFlag batch_map = BATCH_MAP(vbo.edituv_stretch_angle,
vbo.edituv_stretch_area,
vbo.uv,
vbo.edituv_data,
vbo.fdots_uv,
vbo.fdots_edituv_data,
ibo.edituv_tris,
ibo.edituv_lines,
ibo.edituv_points,
ibo.edituv_fdots);
mesh_batch_cache_discard_batch(cache, batch_map);
discard_buffers(
cache,
{VBOType::EditUVStretchAngle,
VBOType::EditUVStretchArea,
VBOType::UVs,
VBOType::EditUVData,
VBOType::FaceDotUV,
VBOType::FaceDotEditUVData},
{IBOType::EditUVTris, IBOType::EditUVLines, IBOType::EditUVPoints, IBOType::EditUVFaceDots});
cache.tot_area = 0.0f;
cache.tot_uv_area = 0.0f;
cache.batch_ready &= ~MBC_EDITUV;
/* We discarded the vbo.uv so we need to reset the cd_used flag. */
cache.cd_used.uv = 0;
cache.cd_used.edit_uv = 0;
@@ -687,21 +574,10 @@ static void mesh_batch_cache_discard_uvedit(MeshBatchCache &cache)
static void mesh_batch_cache_discard_uvedit_select(MeshBatchCache &cache)
{
FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edituv_data);
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_edituv_data);
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_tris);
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_lines);
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_points);
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_fdots);
}
DRWBatchFlag batch_map = BATCH_MAP(vbo.edituv_data,
vbo.fdots_edituv_data,
ibo.edituv_tris,
ibo.edituv_lines,
ibo.edituv_points,
ibo.edituv_fdots);
mesh_batch_cache_discard_batch(cache, batch_map);
discard_buffers(
cache,
{VBOType::EditUVData, VBOType::FaceDotEditUVData},
{IBOType::EditUVTris, IBOType::EditUVLines, IBOType::EditUVPoints, IBOType::EditUVFaceDots});
}
void DRW_mesh_batch_cache_dirty_tag(Mesh *mesh, eMeshBatchDirtyMode mode)
@@ -710,15 +586,9 @@ void DRW_mesh_batch_cache_dirty_tag(Mesh *mesh, eMeshBatchDirtyMode mode)
return;
}
MeshBatchCache &cache = *static_cast<MeshBatchCache *>(mesh->runtime->batch_cache);
DRWBatchFlag batch_map;
switch (mode) {
case BKE_MESH_BATCH_DIRTY_SELECT:
FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edit_data);
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_nor);
}
batch_map = BATCH_MAP(vbo.edit_data, vbo.fdots_nor);
mesh_batch_cache_discard_batch(cache, batch_map);
discard_buffers(cache, {VBOType::EditData, VBOType::FaceDotNormal}, {});
/* Because visible UVs depends on edit mode selection, discard topology. */
mesh_batch_cache_discard_uvedit_select(cache);
@@ -726,13 +596,7 @@ void DRW_mesh_batch_cache_dirty_tag(Mesh *mesh, eMeshBatchDirtyMode mode)
case BKE_MESH_BATCH_DIRTY_SELECT_PAINT:
/* Paint mode selection flag is packed inside the nor attribute.
* Note that it can be slow if auto smooth is enabled. (see #63946) */
FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.lines_paint_mask);
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.pos);
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.nor);
}
batch_map = BATCH_MAP(ibo.lines_paint_mask, vbo.pos, vbo.nor);
mesh_batch_cache_discard_batch(cache, batch_map);
discard_buffers(cache, {VBOType::CornerNormal}, {IBOType::LinesPaintMask});
break;
case BKE_MESH_BATCH_DIRTY_ALL:
cache.is_dirty = true;
@@ -745,33 +609,17 @@ void DRW_mesh_batch_cache_dirty_tag(Mesh *mesh, eMeshBatchDirtyMode mode)
mesh_batch_cache_discard_uvedit(cache);
break;
case BKE_MESH_BATCH_DIRTY_UVEDIT_SELECT:
FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edituv_data);
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_edituv_data);
}
batch_map = BATCH_MAP(vbo.edituv_data, vbo.fdots_edituv_data);
mesh_batch_cache_discard_batch(cache, batch_map);
discard_buffers(cache, {VBOType::EditData, VBOType::FaceDotEditUVData}, {});
break;
default:
BLI_assert(0);
}
}
static void mesh_buffer_list_clear(MeshBufferList *mbuflist)
{
gpu::VertBuf **vbos = (gpu::VertBuf **)&mbuflist->vbo;
gpu::IndexBuf **ibos = (gpu::IndexBuf **)&mbuflist->ibo;
for (int i = 0; i < sizeof(mbuflist->vbo) / sizeof(void *); i++) {
GPU_VERTBUF_DISCARD_SAFE(vbos[i]);
}
for (int i = 0; i < sizeof(mbuflist->ibo) / sizeof(void *); i++) {
GPU_INDEXBUF_DISCARD_SAFE(ibos[i]);
}
}
static void mesh_buffer_cache_clear(MeshBufferCache *mbc)
{
mesh_buffer_list_clear(&mbc->buff);
mbc->buff.ibos.clear();
mbc->buff.vbos.clear();
mbc->loose_geom = {};
mbc->face_sorted = {};
@@ -801,6 +649,9 @@ static void mesh_batch_cache_clear(MeshBatchCache &cache)
gpu::Batch **batch = (gpu::Batch **)&cache.batch;
GPU_BATCH_DISCARD_SAFE(batch[i]);
}
for (const int i : cache.surface_per_mat.index_range()) {
GPU_BATCH_DISCARD_SAFE(cache.surface_per_mat[i]);
}
mesh_batch_cache_discard_shaded_tri(cache);
mesh_batch_cache_discard_uvedit(cache);
@@ -889,11 +740,10 @@ gpu::Batch *DRW_mesh_batch_cache_get_surface(Mesh &mesh)
gpu::Batch *DRW_mesh_batch_cache_get_loose_edges(Mesh &mesh)
{
MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
mesh_batch_cache_add_request(cache, MBC_LOOSE_EDGES);
if (cache.no_loose_wire) {
return nullptr;
}
mesh_batch_cache_add_request(cache, MBC_LOOSE_EDGES);
return DRW_batch_request(&cache.batch.loose_edges);
}
@@ -1030,22 +880,6 @@ gpu::Batch *DRW_mesh_batch_cache_get_surface_viewer_attribute(Mesh &mesh)
/** \} */
/* ---------------------------------------------------------------------- */
/** \name Edit Mode API
* \{ */
gpu::VertBuf *DRW_mesh_batch_cache_pos_vertbuf_get(Mesh &mesh)
{
MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
/* Request surface to trigger the vbo filling. Otherwise it may do nothing. */
mesh_batch_cache_request_surface_batches(cache);
DRW_vbo_request(nullptr, &cache.final.buff.vbo.pos);
return cache.final.buff.vbo.pos;
}
/** \} */
/* ---------------------------------------------------------------------- */
/** \name Edit Mode API
* \{ */
@@ -1256,49 +1090,6 @@ void DRW_mesh_batch_cache_free_old(Mesh *mesh, int ctime)
drw_attributes_clear(&cache->attr_used_over_time);
}
static void drw_add_attributes_vbo(gpu::Batch *batch,
MeshBufferList *mbuflist,
DRW_Attributes *attr_used)
{
for (int i = 0; i < attr_used->num_requests; i++) {
DRW_vbo_request(batch, &mbuflist->vbo.attr[i]);
}
}
#ifndef NDEBUG
/* Sanity check function to test if all requested batches are available. */
static void drw_mesh_batch_cache_check_available(TaskGraph &task_graph, Mesh &mesh)
{
MeshBatchCache *cache = mesh_batch_cache_get(mesh);
/* Make sure all requested batches have been setup. */
/* NOTE: The next line creates a different scheduling than during release builds what can lead to
* some issues (See #77867 where we needed to disable this function in order to debug what was
* happening in release builds). */
BLI_task_graph_work_and_wait(&task_graph);
for (int i = 0; i < MBC_BATCH_LEN; i++) {
BLI_assert(!DRW_batch_requested(((gpu::Batch **)&cache->batch)[i], (GPUPrimType)0));
}
for (int i = 0; i < MBC_VBO_LEN; i++) {
BLI_assert(!DRW_vbo_requested(((gpu::VertBuf **)&cache->final.buff.vbo)[i]));
}
for (int i = 0; i < MBC_IBO_LEN; i++) {
BLI_assert(!DRW_ibo_requested(((gpu::IndexBuf **)&cache->final.buff.ibo)[i]));
}
for (int i = 0; i < MBC_VBO_LEN; i++) {
BLI_assert(!DRW_vbo_requested(((gpu::VertBuf **)&cache->cage.buff.vbo)[i]));
}
for (int i = 0; i < MBC_IBO_LEN; i++) {
BLI_assert(!DRW_ibo_requested(((gpu::IndexBuf **)&cache->cage.buff.ibo)[i]));
}
for (int i = 0; i < MBC_VBO_LEN; i++) {
BLI_assert(!DRW_vbo_requested(((gpu::VertBuf **)&cache->uv_cage.buff.vbo)[i]));
}
for (int i = 0; i < MBC_IBO_LEN; i++) {
BLI_assert(!DRW_ibo_requested(((gpu::IndexBuf **)&cache->uv_cage.buff.ibo)[i]));
}
}
#endif
static void init_empty_dummy_batch(gpu::Batch &batch)
{
/* The dummy batch is only used in cases with invalid edit mode mapping, so the overhead of
@@ -1327,30 +1118,9 @@ void DRW_mesh_batch_cache_create_requested(TaskGraph &task_graph,
/* Early out */
if (cache.batch_requested == 0) {
#ifndef NDEBUG
drw_mesh_batch_cache_check_available(task_graph, mesh);
#endif
return;
}
#ifndef NDEBUG
/* Map the index of a buffer to a flag containing all batches that use it. */
Map<int, DRWBatchFlag> batches_that_use_buffer_local;
auto assert_deps_valid = [&](DRWBatchFlag batch_flag, Span<int> used_buffer_indices) {
for (const int buffer_index : used_buffer_indices) {
batches_that_use_buffer_local.add_or_modify(
buffer_index,
[&](DRWBatchFlag *value) { *value = batch_flag; },
[&](DRWBatchFlag *value) { *value |= batch_flag; });
BLI_assert(batches_that_use_buffer(buffer_index) & batch_flag);
}
};
#else
auto assert_deps_valid = [&](DRWBatchFlag /*batch_flag*/, Span<int> /*used_buffer_indices*/) {};
#endif
/* Sanity check. */
if ((mesh.runtime->edit_mesh != nullptr) && (ob.mode & OB_MODE_EDIT)) {
BLI_assert(BKE_object_get_editmesh_eval_final(&ob) != nullptr);
@@ -1374,7 +1144,7 @@ void DRW_mesh_batch_cache_create_requested(TaskGraph &task_graph,
}
if (batch_requested &
(MBC_SURFACE | MBC_WIRE_LOOPS_UVS | MBC_EDITUV_FACES_STRETCH_AREA |
(MBC_SURFACE | MBC_SURFACE_PER_MAT | MBC_WIRE_LOOPS_UVS | MBC_EDITUV_FACES_STRETCH_AREA |
MBC_EDITUV_FACES_STRETCH_ANGLE | MBC_EDITUV_FACES | MBC_EDITUV_EDGES | MBC_EDITUV_VERTS))
{
/* Modifiers will only generate an orco layer if the mesh is deformed. */
@@ -1397,23 +1167,23 @@ void DRW_mesh_batch_cache_create_requested(TaskGraph &task_graph,
if (cd_overlap == false || attr_overlap == false) {
FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
if ((cache.cd_used.uv & cache.cd_needed.uv) != cache.cd_needed.uv) {
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.uv);
mbc->buff.vbos.remove(VBOType::UVs);
cd_uv_update = true;
}
if ((cache.cd_used.tan & cache.cd_needed.tan) != cache.cd_needed.tan ||
cache.cd_used.tan_orco != cache.cd_needed.tan_orco)
{
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.tan);
mbc->buff.vbos.remove(VBOType::Tangents);
}
if (cache.cd_used.orco != cache.cd_needed.orco) {
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.orco);
mbc->buff.vbos.remove(VBOType::Orco);
}
if (cache.cd_used.sculpt_overlays != cache.cd_needed.sculpt_overlays) {
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.sculpt_data);
mbc->buff.vbos.remove(VBOType::SculptData);
}
if (!drw_attributes_overlap(&cache.attr_used, &cache.attr_needed)) {
for (int i = 0; i < GPU_MAX_ATTR; i++) {
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.attr[i]);
mbc->buff.vbos.remove(VBOType(int8_t(VBOType::Attr0) + i));
}
}
}
@@ -1423,7 +1193,7 @@ void DRW_mesh_batch_cache_create_requested(TaskGraph &task_graph,
GPU_BATCH_CLEAR_SAFE(cache.surface_per_mat[i]);
}
GPU_BATCH_CLEAR_SAFE(cache.batch.surface);
cache.batch_ready &= ~(MBC_SURFACE);
cache.batch_ready &= ~(MBC_SURFACE | MBC_SURFACE_PER_MAT);
mesh_cd_layers_type_merge(&cache.cd_used, cache.cd_needed);
drw_attributes_merge(&cache.attr_used, &cache.attr_needed, mesh.runtime->render_mutex);
@@ -1442,13 +1212,13 @@ void DRW_mesh_batch_cache_create_requested(TaskGraph &task_graph,
if (cd_uv_update || (cache.is_uvsyncsel != is_uvsyncsel)) {
cache.is_uvsyncsel = is_uvsyncsel;
FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edituv_data);
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_uv);
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_edituv_data);
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_tris);
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_lines);
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_points);
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_fdots);
mbc->buff.vbos.remove(VBOType::EditUVData);
mbc->buff.vbos.remove(VBOType::FaceDotUV);
mbc->buff.vbos.remove(VBOType::FaceDotEditUVData);
mbc->buff.ibos.remove(IBOType::EditUVTris);
mbc->buff.ibos.remove(IBOType::EditUVLines);
mbc->buff.ibos.remove(IBOType::EditUVPoints);
mbc->buff.ibos.remove(IBOType::EditUVFaceDots);
}
/* We only clear the batches as they may already have been
* referenced. */
@@ -1465,9 +1235,6 @@ void DRW_mesh_batch_cache_create_requested(TaskGraph &task_graph,
/* Second chance to early out */
if ((batch_requested & ~cache.batch_ready) == 0) {
#ifndef NDEBUG
drw_mesh_batch_cache_check_available(task_graph, mesh);
#endif
return;
}
@@ -1480,8 +1247,6 @@ void DRW_mesh_batch_cache_create_requested(TaskGraph &task_graph,
bke::pbvh::update_normals_from_eval(ob, *pbvh);
}
cache.batch_ready |= batch_requested;
/* This is the mesh before modifier evaluation, used to test how the mesh changed during
* evaluation to decide which data is valid to extract. */
const Mesh *orig_edit_mesh = is_editmode ? BKE_object_get_pre_modified_mesh(&ob) : nullptr;
@@ -1510,155 +1275,113 @@ void DRW_mesh_batch_cache_create_requested(TaskGraph &task_graph,
mesh.runtime->wrapper_type == ME_WRAPPER_TYPE_BMESH);
}
const DRWBatchFlag batches_to_create = batch_requested & ~cache.batch_ready;
const bool do_subdivision = BKE_subsurf_modifier_has_gpu_subdiv(&mesh);
MeshBufferList *mbuflist = &cache.final.buff;
enum class BufferList { Final, Cage, UVCage };
/* Initialize batches and request VBO's & IBO's. */
assert_deps_valid(MBC_SURFACE,
{BUFFER_INDEX(ibo.tris),
BUFFER_INDEX(vbo.nor),
BUFFER_INDEX(vbo.pos),
BUFFER_INDEX(vbo.uv),
BUFFER_INDEX(vbo.attr[0]),
BUFFER_INDEX(vbo.attr[1]),
BUFFER_INDEX(vbo.attr[2]),
BUFFER_INDEX(vbo.attr[3]),
BUFFER_INDEX(vbo.attr[4]),
BUFFER_INDEX(vbo.attr[5]),
BUFFER_INDEX(vbo.attr[6]),
BUFFER_INDEX(vbo.attr[7]),
BUFFER_INDEX(vbo.attr[8]),
BUFFER_INDEX(vbo.attr[9]),
BUFFER_INDEX(vbo.attr[10]),
BUFFER_INDEX(vbo.attr[11]),
BUFFER_INDEX(vbo.attr[12]),
BUFFER_INDEX(vbo.attr[13]),
BUFFER_INDEX(vbo.attr[14])});
if (DRW_batch_requested(cache.batch.surface, GPU_PRIM_TRIS)) {
DRW_ibo_request(cache.batch.surface, &mbuflist->ibo.tris);
/* Order matters. First ones override latest VBO's attributes. */
DRW_vbo_request(cache.batch.surface, &mbuflist->vbo.nor);
DRW_vbo_request(cache.batch.surface, &mbuflist->vbo.pos);
if (cache.cd_used.uv != 0) {
DRW_vbo_request(cache.batch.surface, &mbuflist->vbo.uv);
}
drw_add_attributes_vbo(cache.batch.surface, mbuflist, &cache.attr_used);
}
assert_deps_valid(
MBC_VIEWER_ATTRIBUTE_OVERLAY,
{BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.attr_viewer)});
if (DRW_batch_requested(cache.batch.surface_viewer_attribute, GPU_PRIM_TRIS)) {
DRW_ibo_request(cache.batch.surface_viewer_attribute, &mbuflist->ibo.tris);
DRW_vbo_request(cache.batch.surface_viewer_attribute, &mbuflist->vbo.pos);
DRW_vbo_request(cache.batch.surface_viewer_attribute, &mbuflist->vbo.attr_viewer);
}
assert_deps_valid(MBC_ALL_VERTS, {BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.nor)});
if (DRW_batch_requested(cache.batch.all_verts, GPU_PRIM_POINTS)) {
DRW_vbo_request(cache.batch.all_verts, &mbuflist->vbo.pos);
DRW_vbo_request(cache.batch.all_verts, &mbuflist->vbo.nor);
}
assert_deps_valid(
MBC_SCULPT_OVERLAYS,
{BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.sculpt_data)});
if (DRW_batch_requested(cache.batch.sculpt_overlays, GPU_PRIM_TRIS)) {
DRW_ibo_request(cache.batch.sculpt_overlays, &mbuflist->ibo.tris);
DRW_vbo_request(cache.batch.sculpt_overlays, &mbuflist->vbo.pos);
DRW_vbo_request(cache.batch.sculpt_overlays, &mbuflist->vbo.sculpt_data);
}
assert_deps_valid(MBC_ALL_EDGES, {BUFFER_INDEX(ibo.lines), BUFFER_INDEX(vbo.pos)});
if (DRW_batch_requested(cache.batch.all_edges, GPU_PRIM_LINES)) {
DRW_ibo_request(cache.batch.all_edges, &mbuflist->ibo.lines);
DRW_vbo_request(cache.batch.all_edges, &mbuflist->vbo.pos);
}
assert_deps_valid(MBC_LOOSE_EDGES, {BUFFER_INDEX(ibo.lines_loose), BUFFER_INDEX(vbo.pos)});
if (DRW_batch_requested(cache.batch.loose_edges, GPU_PRIM_LINES)) {
DRW_ibo_request(cache.batch.loose_edges, &mbuflist->ibo.lines_loose);
DRW_vbo_request(cache.batch.loose_edges, &mbuflist->vbo.pos);
}
assert_deps_valid(MBC_EDGE_DETECTION,
{BUFFER_INDEX(ibo.lines_adjacency), BUFFER_INDEX(vbo.pos)});
if (DRW_batch_requested(cache.batch.edge_detection, GPU_PRIM_LINES_ADJ)) {
DRW_ibo_request(cache.batch.edge_detection, &mbuflist->ibo.lines_adjacency);
DRW_vbo_request(cache.batch.edge_detection, &mbuflist->vbo.pos);
}
assert_deps_valid(MBC_SURFACE_WEIGHTS,
{BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.weights)});
if (DRW_batch_requested(cache.batch.surface_weights, GPU_PRIM_TRIS)) {
DRW_ibo_request(cache.batch.surface_weights, &mbuflist->ibo.tris);
DRW_vbo_request(cache.batch.surface_weights, &mbuflist->vbo.pos);
DRW_vbo_request(cache.batch.surface_weights, &mbuflist->vbo.nor);
DRW_vbo_request(cache.batch.surface_weights, &mbuflist->vbo.weights);
}
assert_deps_valid(
MBC_WIRE_LOOPS,
{BUFFER_INDEX(ibo.lines_paint_mask), BUFFER_INDEX(vbo.nor), BUFFER_INDEX(vbo.pos)});
if (DRW_batch_requested(cache.batch.wire_loops, GPU_PRIM_LINES)) {
DRW_ibo_request(cache.batch.wire_loops, &mbuflist->ibo.lines_paint_mask);
/* Order matters. First ones override latest VBO's attributes. */
DRW_vbo_request(cache.batch.wire_loops, &mbuflist->vbo.nor);
DRW_vbo_request(cache.batch.wire_loops, &mbuflist->vbo.pos);
}
assert_deps_valid(MBC_WIRE_EDGES,
{BUFFER_INDEX(ibo.lines),
BUFFER_INDEX(vbo.nor),
BUFFER_INDEX(vbo.pos),
BUFFER_INDEX(vbo.edge_fac)});
if (DRW_batch_requested(cache.batch.wire_edges, GPU_PRIM_LINES)) {
DRW_ibo_request(cache.batch.wire_edges, &mbuflist->ibo.lines);
DRW_vbo_request(cache.batch.wire_edges, &mbuflist->vbo.nor);
DRW_vbo_request(cache.batch.wire_edges, &mbuflist->vbo.pos);
DRW_vbo_request(cache.batch.wire_edges, &mbuflist->vbo.edge_fac);
}
assert_deps_valid(MBC_WIRE_LOOPS_UVS, {BUFFER_INDEX(ibo.edituv_lines), BUFFER_INDEX(vbo.uv)});
if (DRW_batch_requested(cache.batch.wire_loops_uvs, GPU_PRIM_LINES)) {
DRW_ibo_request(cache.batch.wire_loops_uvs, &mbuflist->ibo.edituv_lines);
/* For paint overlay. Active layer should have been queried. */
if (cache.cd_used.uv != 0) {
DRW_vbo_request(cache.batch.wire_loops_uvs, &mbuflist->vbo.uv);
}
}
assert_deps_valid(
MBC_EDIT_MESH_ANALYSIS,
{BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.mesh_analysis)});
if (DRW_batch_requested(cache.batch.edit_mesh_analysis, GPU_PRIM_TRIS)) {
DRW_ibo_request(cache.batch.edit_mesh_analysis, &mbuflist->ibo.tris);
DRW_vbo_request(cache.batch.edit_mesh_analysis, &mbuflist->vbo.pos);
DRW_vbo_request(cache.batch.edit_mesh_analysis, &mbuflist->vbo.mesh_analysis);
}
struct BatchCreateData {
gpu::Batch &batch;
GPUPrimType prim_type;
BufferList list;
std::optional<IBOType> ibo;
Vector<VBOType> vbos;
};
Vector<BatchCreateData> batch_info;
/* Per Material */
assert_deps_valid(
MBC_SURFACE_PER_MAT,
{BUFFER_INDEX(vbo.nor), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.uv),
BUFFER_INDEX(vbo.tan), BUFFER_INDEX(vbo.orco), BUFFER_INDEX(vbo.attr[0]),
BUFFER_INDEX(vbo.attr[1]), BUFFER_INDEX(vbo.attr[2]), BUFFER_INDEX(vbo.attr[3]),
BUFFER_INDEX(vbo.attr[4]), BUFFER_INDEX(vbo.attr[5]), BUFFER_INDEX(vbo.attr[6]),
BUFFER_INDEX(vbo.attr[7]), BUFFER_INDEX(vbo.attr[8]), BUFFER_INDEX(vbo.attr[9]),
BUFFER_INDEX(vbo.attr[10]), BUFFER_INDEX(vbo.attr[11]), BUFFER_INDEX(vbo.attr[12]),
BUFFER_INDEX(vbo.attr[13]), BUFFER_INDEX(vbo.attr[14])});
assert_deps_valid(MBC_SURFACE_PER_MAT, {TRIS_PER_MAT_INDEX});
for (int i = 0; i < cache.mat_len; i++) {
if (DRW_batch_requested(cache.surface_per_mat[i], GPU_PRIM_TRIS)) {
DRW_ibo_request(cache.surface_per_mat[i], &cache.tris_per_mat[i]);
/* Order matters. First ones override latest VBO's attributes. */
DRW_vbo_request(cache.surface_per_mat[i], &mbuflist->vbo.nor);
DRW_vbo_request(cache.surface_per_mat[i], &mbuflist->vbo.pos);
{
const BufferList list = BufferList::Final;
if (batches_to_create & MBC_SURFACE) {
BatchCreateData batch{*cache.batch.surface,
GPU_PRIM_TRIS,
list,
IBOType::Tris,
{VBOType::Position, VBOType::CornerNormal}};
if (cache.cd_used.uv != 0) {
DRW_vbo_request(cache.surface_per_mat[i], &mbuflist->vbo.uv);
batch.vbos.append(VBOType::UVs);
}
if ((cache.cd_used.tan != 0) || (cache.cd_used.tan_orco != 0)) {
DRW_vbo_request(cache.surface_per_mat[i], &mbuflist->vbo.tan);
for (const int i : IndexRange(cache.attr_used.num_requests)) {
batch.vbos.append(VBOType(int8_t(VBOType::Attr0) + i));
}
if (cache.cd_used.orco != 0) {
DRW_vbo_request(cache.surface_per_mat[i], &mbuflist->vbo.orco);
batch_info.append(std::move(batch));
}
if (batches_to_create & MBC_VIEWER_ATTRIBUTE_OVERLAY) {
batch_info.append({*cache.batch.surface_viewer_attribute,
GPU_PRIM_TRIS,
list,
IBOType::Tris,
{VBOType::Position, VBOType::AttrViewer}});
}
if (batches_to_create & MBC_ALL_VERTS) {
batch_info.append({*cache.batch.all_verts,
GPU_PRIM_POINTS,
list,
std::nullopt,
{VBOType::Position, VBOType::CornerNormal}});
}
if (batches_to_create & MBC_SCULPT_OVERLAYS) {
batch_info.append({*cache.batch.sculpt_overlays,
GPU_PRIM_TRIS,
list,
IBOType::Tris,
{VBOType::Position, VBOType::SculptData}});
}
if (batches_to_create & MBC_ALL_EDGES) {
batch_info.append(
{*cache.batch.all_edges, GPU_PRIM_LINES, list, IBOType::Lines, {VBOType::Position}});
}
if (batches_to_create & MBC_LOOSE_EDGES) {
batch_info.append({*cache.batch.loose_edges,
GPU_PRIM_LINES,
list,
IBOType::LinesLoose,
{VBOType::Position}});
}
if (batches_to_create & MBC_EDGE_DETECTION) {
batch_info.append({*cache.batch.edge_detection,
GPU_PRIM_LINES_ADJ,
list,
IBOType::LinesAdjacency,
{VBOType::Position}});
}
if (batches_to_create & MBC_SURFACE_WEIGHTS) {
batch_info.append({*cache.batch.surface_weights,
GPU_PRIM_TRIS,
list,
IBOType::Tris,
{VBOType::Position, VBOType::CornerNormal, VBOType::VertexGroupWeight}});
}
if (batches_to_create & MBC_WIRE_LOOPS) {
batch_info.append({*cache.batch.wire_loops,
GPU_PRIM_LINES,
list,
IBOType::LinesPaintMask,
{VBOType::Position, VBOType::CornerNormal}});
}
if (batches_to_create & MBC_WIRE_EDGES) {
batch_info.append({*cache.batch.wire_edges,
GPU_PRIM_LINES,
list,
IBOType::Lines,
{VBOType::Position, VBOType::CornerNormal, VBOType::EdgeFactor}});
}
if (batches_to_create & MBC_WIRE_LOOPS_UVS) {
BatchCreateData batch{
*cache.batch.wire_loops_uvs, GPU_PRIM_LINES, list, IBOType::EditUVLines, {}};
if (cache.cd_used.uv != 0) {
batch.vbos.append(VBOType::UVs);
}
drw_add_attributes_vbo(cache.surface_per_mat[i], mbuflist, &cache.attr_used);
}
if (batches_to_create & MBC_EDIT_MESH_ANALYSIS) {
batch_info.append({*cache.batch.edit_mesh_analysis,
GPU_PRIM_TRIS,
list,
IBOType::Tris,
{VBOType::Position, VBOType::MeshAnalysis}});
}
}
mbuflist = (do_cage) ? &cache.cage.buff : &cache.final.buff;
/* When the mesh doesn't correspond to the object's original mesh (i.e. the mesh was replaced by
* another with the object info node during evaluation), don't extract edit mode data for it.
* That data can be invalid because any original indices (#CD_ORIGINDEX) on the evaluated mesh
@@ -1666,324 +1389,288 @@ void DRW_mesh_batch_cache_create_requested(TaskGraph &task_graph,
const bool edit_mapping_valid = is_editmode && BKE_editmesh_eval_orig_map_available(
*edit_data_mesh, orig_edit_mesh);
/* Edit Mesh */
assert_deps_valid(MBC_EDIT_TRIANGLES,
{BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.edit_data)});
if (DRW_batch_requested(cache.batch.edit_triangles, GPU_PRIM_TRIS)) {
if (edit_mapping_valid) {
DRW_ibo_request(cache.batch.edit_triangles, &mbuflist->ibo.tris);
DRW_vbo_request(cache.batch.edit_triangles, &mbuflist->vbo.pos);
DRW_vbo_request(cache.batch.edit_triangles, &mbuflist->vbo.edit_data);
}
else {
init_empty_dummy_batch(*cache.batch.edit_triangles);
}
}
assert_deps_valid(
MBC_EDIT_VERTICES,
{BUFFER_INDEX(ibo.points), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.edit_data)});
if (DRW_batch_requested(cache.batch.edit_vertices, GPU_PRIM_POINTS)) {
if (edit_mapping_valid) {
DRW_ibo_request(cache.batch.edit_vertices, &mbuflist->ibo.points);
DRW_vbo_request(cache.batch.edit_vertices, &mbuflist->vbo.pos);
DRW_vbo_request(cache.batch.edit_vertices, &mbuflist->vbo.edit_data);
if (!do_subdivision || do_cage) {
/* For GPU subdivision, vertex normals are included in the `pos` VBO. */
DRW_vbo_request(cache.batch.edit_vertices, &mbuflist->vbo.vnor);
{
const BufferList list = do_cage ? BufferList::Cage : BufferList::Final;
if (batches_to_create & MBC_EDIT_TRIANGLES) {
if (edit_mapping_valid) {
batch_info.append({*cache.batch.edit_triangles,
GPU_PRIM_TRIS,
list,
IBOType::Tris,
{VBOType::Position, VBOType::EditData}});
}
else {
init_empty_dummy_batch(*cache.batch.edit_triangles);
}
}
else {
init_empty_dummy_batch(*cache.batch.edit_vertices);
}
}
assert_deps_valid(MBC_EDIT_EDGES,
{BUFFER_INDEX(ibo.lines), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.edit_data)});
if (DRW_batch_requested(cache.batch.edit_edges, GPU_PRIM_LINES)) {
if (edit_mapping_valid) {
DRW_ibo_request(cache.batch.edit_edges, &mbuflist->ibo.lines);
DRW_vbo_request(cache.batch.edit_edges, &mbuflist->vbo.pos);
DRW_vbo_request(cache.batch.edit_edges, &mbuflist->vbo.edit_data);
if (!do_subdivision || do_cage) {
/* For GPU subdivision, vertex normals are included in the `pos` VBO. */
DRW_vbo_request(cache.batch.edit_edges, &mbuflist->vbo.vnor);
if (batches_to_create & MBC_EDIT_VERTICES) {
if (edit_mapping_valid) {
BatchCreateData batch{*cache.batch.edit_vertices,
GPU_PRIM_POINTS,
list,
IBOType::Points,
{VBOType::Position, VBOType::EditData}};
if (!do_subdivision || do_cage) {
batch.vbos.append(VBOType::CornerNormal);
}
batch_info.append(std::move(batch));
}
else {
init_empty_dummy_batch(*cache.batch.edit_vertices);
}
}
else {
init_empty_dummy_batch(*cache.batch.edit_edges);
}
}
assert_deps_valid(MBC_EDIT_VNOR,
{BUFFER_INDEX(ibo.points), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.vnor)});
if (DRW_batch_requested(cache.batch.edit_vnor, GPU_PRIM_POINTS)) {
if (edit_mapping_valid) {
DRW_ibo_request(cache.batch.edit_vnor, &mbuflist->ibo.points);
DRW_vbo_request(cache.batch.edit_vnor, &mbuflist->vbo.pos);
if (!do_subdivision || do_cage) {
/* For GPU subdivision, vertex normals are included in the `pos` VBO. */
DRW_vbo_request(cache.batch.edit_vnor, &mbuflist->vbo.vnor);
if (batches_to_create & MBC_EDIT_EDGES) {
if (edit_mapping_valid) {
BatchCreateData batch{*cache.batch.edit_edges,
GPU_PRIM_LINES,
list,
IBOType::Lines,
{VBOType::Position, VBOType::EditData}};
if (!do_subdivision || do_cage) {
batch.vbos.append(VBOType::VertexNormal);
}
batch_info.append(std::move(batch));
}
else {
init_empty_dummy_batch(*cache.batch.edit_edges);
}
}
else {
init_empty_dummy_batch(*cache.batch.edit_vnor);
if (batches_to_create & MBC_EDIT_VNOR) {
if (edit_mapping_valid) {
batch_info.append({*cache.batch.edit_vnor,
GPU_PRIM_POINTS,
list,
IBOType::Points,
{VBOType::Position, VBOType::VertexNormal}});
}
else {
init_empty_dummy_batch(*cache.batch.edit_vnor);
}
}
}
assert_deps_valid(MBC_EDIT_LNOR,
{BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.nor)});
if (DRW_batch_requested(cache.batch.edit_lnor, GPU_PRIM_POINTS)) {
if (edit_mapping_valid) {
DRW_ibo_request(cache.batch.edit_lnor, &mbuflist->ibo.tris);
DRW_vbo_request(cache.batch.edit_lnor, &mbuflist->vbo.pos);
DRW_vbo_request(cache.batch.edit_lnor, &mbuflist->vbo.nor);
if (batches_to_create & MBC_EDIT_LNOR) {
if (edit_mapping_valid) {
batch_info.append({*cache.batch.edit_lnor,
GPU_PRIM_POINTS,
list,
IBOType::Tris,
{VBOType::Position, VBOType::CornerNormal}});
}
else {
init_empty_dummy_batch(*cache.batch.edit_lnor);
}
}
else {
init_empty_dummy_batch(*cache.batch.edit_lnor);
if (batches_to_create & MBC_EDIT_FACEDOTS) {
if (edit_mapping_valid) {
batch_info.append({*cache.batch.edit_fdots,
GPU_PRIM_POINTS,
list,
IBOType::FaceDots,
{VBOType::FaceDotPosition, VBOType::FaceDotNormal}});
}
else {
init_empty_dummy_batch(*cache.batch.edit_fdots);
}
}
}
assert_deps_valid(
MBC_EDIT_FACEDOTS,
{BUFFER_INDEX(ibo.fdots), BUFFER_INDEX(vbo.fdots_pos), BUFFER_INDEX(vbo.fdots_nor)});
if (DRW_batch_requested(cache.batch.edit_fdots, GPU_PRIM_POINTS)) {
if (edit_mapping_valid) {
DRW_ibo_request(cache.batch.edit_fdots, &mbuflist->ibo.fdots);
DRW_vbo_request(cache.batch.edit_fdots, &mbuflist->vbo.fdots_pos);
DRW_vbo_request(cache.batch.edit_fdots, &mbuflist->vbo.fdots_nor);
if (batches_to_create & MBC_SKIN_ROOTS) {
if (edit_mapping_valid) {
batch_info.append({*cache.batch.edit_skin_roots,
GPU_PRIM_POINTS,
list,
std::nullopt,
{VBOType::SkinRoots}});
}
else {
init_empty_dummy_batch(*cache.batch.edit_skin_roots);
}
}
else {
init_empty_dummy_batch(*cache.batch.edit_fdots);
if (batches_to_create & MBC_EDIT_SELECTION_VERTS) {
if (is_editmode && !edit_mapping_valid) {
init_empty_dummy_batch(*cache.batch.edit_selection_verts);
}
else {
batch_info.append({*cache.batch.edit_selection_verts,
GPU_PRIM_POINTS,
list,
IBOType::Points,
{VBOType::Position, VBOType::IndexVert}});
}
}
}
assert_deps_valid(MBC_SKIN_ROOTS, {BUFFER_INDEX(vbo.skin_roots)});
if (DRW_batch_requested(cache.batch.edit_skin_roots, GPU_PRIM_POINTS)) {
if (edit_mapping_valid) {
DRW_vbo_request(cache.batch.edit_skin_roots, &mbuflist->vbo.skin_roots);
if (batches_to_create & MBC_EDIT_SELECTION_EDGES) {
if (is_editmode && !edit_mapping_valid) {
init_empty_dummy_batch(*cache.batch.edit_selection_edges);
}
else {
batch_info.append({*cache.batch.edit_selection_edges,
GPU_PRIM_LINES,
list,
IBOType::Lines,
{VBOType::Position, VBOType::IndexEdge}});
}
}
else {
init_empty_dummy_batch(*cache.batch.edit_skin_roots);
if (batches_to_create & MBC_EDIT_SELECTION_FACES) {
if (is_editmode && !edit_mapping_valid) {
init_empty_dummy_batch(*cache.batch.edit_selection_faces);
}
else {
batch_info.append({*cache.batch.edit_selection_faces,
GPU_PRIM_TRIS,
list,
IBOType::Tris,
{VBOType::Position, VBOType::IndexFace}});
}
}
if (batches_to_create & MBC_EDIT_SELECTION_FACEDOTS) {
if (is_editmode && !edit_mapping_valid) {
init_empty_dummy_batch(*cache.batch.edit_selection_fdots);
}
else {
batch_info.append({*cache.batch.edit_selection_fdots,
GPU_PRIM_POINTS,
list,
IBOType::FaceDots,
{VBOType::FaceDotPosition, VBOType::IndexFaceDot}});
}
}
}
/* Selection */
assert_deps_valid(MBC_EDIT_SELECTION_VERTS,
{BUFFER_INDEX(ibo.points), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.vert_idx)});
if (DRW_batch_requested(cache.batch.edit_selection_verts, GPU_PRIM_POINTS)) {
if (is_editmode && !edit_mapping_valid) {
init_empty_dummy_batch(*cache.batch.edit_selection_verts);
{
/**
* TODO: The code and data structure is ready to support modified UV display
* but the selection code for UVs needs to support it first. So for now, only
* display the cage in all cases.
*/
const BufferList list = do_uvcage ? BufferList::UVCage : BufferList::Final;
if (batches_to_create & MBC_EDITUV_FACES) {
if (edit_mapping_valid) {
batch_info.append({*cache.batch.edituv_faces,
GPU_PRIM_TRIS,
list,
IBOType::EditUVTris,
{VBOType::UVs, VBOType::EditUVData}});
}
else {
init_empty_dummy_batch(*cache.batch.edituv_faces);
}
}
else {
DRW_ibo_request(cache.batch.edit_selection_verts, &mbuflist->ibo.points);
DRW_vbo_request(cache.batch.edit_selection_verts, &mbuflist->vbo.pos);
DRW_vbo_request(cache.batch.edit_selection_verts, &mbuflist->vbo.vert_idx);
if (batches_to_create & MBC_EDITUV_FACES_STRETCH_AREA) {
if (edit_mapping_valid) {
batch_info.append({*cache.batch.edituv_faces_stretch_area,
GPU_PRIM_TRIS,
list,
IBOType::EditUVTris,
{VBOType::UVs, VBOType::EditUVData, VBOType::EditUVStretchArea}});
}
else {
init_empty_dummy_batch(*cache.batch.edituv_faces_stretch_area);
}
}
}
assert_deps_valid(MBC_EDIT_SELECTION_EDGES,
{BUFFER_INDEX(ibo.lines), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.edge_idx)});
if (DRW_batch_requested(cache.batch.edit_selection_edges, GPU_PRIM_LINES)) {
if (is_editmode && !edit_mapping_valid) {
init_empty_dummy_batch(*cache.batch.edit_selection_edges);
if (batches_to_create & MBC_EDITUV_FACES_STRETCH_ANGLE) {
if (edit_mapping_valid) {
batch_info.append({*cache.batch.edituv_faces_stretch_angle,
GPU_PRIM_TRIS,
list,
IBOType::EditUVTris,
{VBOType::UVs, VBOType::EditUVData, VBOType::EditUVStretchAngle}});
}
else {
init_empty_dummy_batch(*cache.batch.edituv_faces_stretch_angle);
}
}
else {
DRW_ibo_request(cache.batch.edit_selection_edges, &mbuflist->ibo.lines);
DRW_vbo_request(cache.batch.edit_selection_edges, &mbuflist->vbo.pos);
DRW_vbo_request(cache.batch.edit_selection_edges, &mbuflist->vbo.edge_idx);
if (batches_to_create & MBC_EDITUV_EDGES) {
if (edit_mapping_valid) {
batch_info.append({*cache.batch.edituv_edges,
GPU_PRIM_LINES,
list,
IBOType::EditUVLines,
{VBOType::UVs, VBOType::EditUVData}});
}
else {
init_empty_dummy_batch(*cache.batch.edituv_edges);
}
}
}
assert_deps_valid(MBC_EDIT_SELECTION_FACES,
{BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos), BUFFER_INDEX(vbo.face_idx)});
if (DRW_batch_requested(cache.batch.edit_selection_faces, GPU_PRIM_TRIS)) {
if (is_editmode && !edit_mapping_valid) {
init_empty_dummy_batch(*cache.batch.edit_selection_faces);
if (batches_to_create & MBC_EDITUV_VERTS) {
if (edit_mapping_valid) {
batch_info.append({*cache.batch.edituv_verts,
GPU_PRIM_POINTS,
list,
IBOType::EditUVPoints,
{VBOType::UVs, VBOType::EditUVData}});
}
else {
init_empty_dummy_batch(*cache.batch.edituv_verts);
}
}
else {
DRW_ibo_request(cache.batch.edit_selection_faces, &mbuflist->ibo.tris);
DRW_vbo_request(cache.batch.edit_selection_faces, &mbuflist->vbo.pos);
DRW_vbo_request(cache.batch.edit_selection_faces, &mbuflist->vbo.face_idx);
}
}
assert_deps_valid(
MBC_EDIT_SELECTION_FACEDOTS,
{BUFFER_INDEX(ibo.fdots), BUFFER_INDEX(vbo.fdots_pos), BUFFER_INDEX(vbo.fdot_idx)});
if (DRW_batch_requested(cache.batch.edit_selection_fdots, GPU_PRIM_POINTS)) {
if (is_editmode && !edit_mapping_valid) {
init_empty_dummy_batch(*cache.batch.edit_selection_fdots);
}
else {
DRW_ibo_request(cache.batch.edit_selection_fdots, &mbuflist->ibo.fdots);
DRW_vbo_request(cache.batch.edit_selection_fdots, &mbuflist->vbo.fdots_pos);
DRW_vbo_request(cache.batch.edit_selection_fdots, &mbuflist->vbo.fdot_idx);
if (batches_to_create & MBC_EDITUV_FACEDOTS) {
if (edit_mapping_valid) {
batch_info.append({*cache.batch.edituv_fdots,
GPU_PRIM_POINTS,
list,
IBOType::EditUVFaceDots,
{VBOType::FaceDotUV, VBOType::FaceDotEditUVData}});
}
else {
init_empty_dummy_batch(*cache.batch.edituv_fdots);
}
}
}
/**
* TODO: The code and data structure is ready to support modified UV display
* but the selection code for UVs needs to support it first. So for now, only
* display the cage in all cases.
*/
mbuflist = (do_uvcage) ? &cache.uv_cage.buff : &cache.final.buff;
/* Edit UV */
assert_deps_valid(
MBC_EDITUV_FACES,
{BUFFER_INDEX(ibo.edituv_tris), BUFFER_INDEX(vbo.uv), BUFFER_INDEX(vbo.edituv_data)});
if (DRW_batch_requested(cache.batch.edituv_faces, GPU_PRIM_TRIS)) {
if (edit_mapping_valid) {
DRW_ibo_request(cache.batch.edituv_faces, &mbuflist->ibo.edituv_tris);
DRW_vbo_request(cache.batch.edituv_faces, &mbuflist->vbo.uv);
DRW_vbo_request(cache.batch.edituv_faces, &mbuflist->vbo.edituv_data);
}
else {
init_empty_dummy_batch(*cache.batch.edituv_faces);
}
}
assert_deps_valid(MBC_EDITUV_FACES_STRETCH_AREA,
{BUFFER_INDEX(ibo.edituv_tris),
BUFFER_INDEX(vbo.uv),
BUFFER_INDEX(vbo.edituv_data),
BUFFER_INDEX(vbo.edituv_stretch_area)});
if (DRW_batch_requested(cache.batch.edituv_faces_stretch_area, GPU_PRIM_TRIS)) {
if (edit_mapping_valid) {
DRW_ibo_request(cache.batch.edituv_faces_stretch_area, &mbuflist->ibo.edituv_tris);
DRW_vbo_request(cache.batch.edituv_faces_stretch_area, &mbuflist->vbo.uv);
DRW_vbo_request(cache.batch.edituv_faces_stretch_area, &mbuflist->vbo.edituv_data);
DRW_vbo_request(cache.batch.edituv_faces_stretch_area, &mbuflist->vbo.edituv_stretch_area);
}
else {
init_empty_dummy_batch(*cache.batch.edituv_faces_stretch_area);
}
}
assert_deps_valid(MBC_EDITUV_FACES_STRETCH_ANGLE,
{BUFFER_INDEX(ibo.edituv_tris),
BUFFER_INDEX(vbo.uv),
BUFFER_INDEX(vbo.edituv_data),
BUFFER_INDEX(vbo.edituv_stretch_angle)});
if (DRW_batch_requested(cache.batch.edituv_faces_stretch_angle, GPU_PRIM_TRIS)) {
if (edit_mapping_valid) {
DRW_ibo_request(cache.batch.edituv_faces_stretch_angle, &mbuflist->ibo.edituv_tris);
DRW_vbo_request(cache.batch.edituv_faces_stretch_angle, &mbuflist->vbo.uv);
DRW_vbo_request(cache.batch.edituv_faces_stretch_angle, &mbuflist->vbo.edituv_data);
DRW_vbo_request(cache.batch.edituv_faces_stretch_angle, &mbuflist->vbo.edituv_stretch_angle);
}
else {
init_empty_dummy_batch(*cache.batch.edituv_faces_stretch_angle);
}
}
assert_deps_valid(
MBC_EDITUV_EDGES,
{BUFFER_INDEX(ibo.edituv_lines), BUFFER_INDEX(vbo.uv), BUFFER_INDEX(vbo.edituv_data)});
if (DRW_batch_requested(cache.batch.edituv_edges, GPU_PRIM_LINES)) {
if (edit_mapping_valid) {
DRW_ibo_request(cache.batch.edituv_edges, &mbuflist->ibo.edituv_lines);
DRW_vbo_request(cache.batch.edituv_edges, &mbuflist->vbo.uv);
DRW_vbo_request(cache.batch.edituv_edges, &mbuflist->vbo.edituv_data);
}
else {
init_empty_dummy_batch(*cache.batch.edituv_edges);
}
}
assert_deps_valid(
MBC_EDITUV_VERTS,
{BUFFER_INDEX(ibo.edituv_points), BUFFER_INDEX(vbo.uv), BUFFER_INDEX(vbo.edituv_data)});
if (DRW_batch_requested(cache.batch.edituv_verts, GPU_PRIM_POINTS)) {
if (edit_mapping_valid) {
DRW_ibo_request(cache.batch.edituv_verts, &mbuflist->ibo.edituv_points);
DRW_vbo_request(cache.batch.edituv_verts, &mbuflist->vbo.uv);
DRW_vbo_request(cache.batch.edituv_verts, &mbuflist->vbo.edituv_data);
}
else {
init_empty_dummy_batch(*cache.batch.edituv_verts);
}
}
assert_deps_valid(MBC_EDITUV_FACEDOTS,
{BUFFER_INDEX(ibo.edituv_fdots),
BUFFER_INDEX(vbo.fdots_uv),
BUFFER_INDEX(vbo.fdots_edituv_data)});
if (DRW_batch_requested(cache.batch.edituv_fdots, GPU_PRIM_POINTS)) {
if (edit_mapping_valid) {
DRW_ibo_request(cache.batch.edituv_fdots, &mbuflist->ibo.edituv_fdots);
DRW_vbo_request(cache.batch.edituv_fdots, &mbuflist->vbo.fdots_uv);
DRW_vbo_request(cache.batch.edituv_fdots, &mbuflist->vbo.fdots_edituv_data);
}
else {
init_empty_dummy_batch(*cache.batch.edituv_fdots);
std::array<VectorSet<IBOType>, 3> ibo_requests;
std::array<VectorSet<VBOType>, 3> vbo_requests;
for (const BatchCreateData &batch : batch_info) {
if (batch.ibo) {
ibo_requests[int(batch.list)].add(*batch.ibo);
}
vbo_requests[int(batch.list)].add_multiple(batch.vbos);
}
#ifndef NDEBUG
auto assert_final_deps_valid = [&](const int buffer_index) {
BLI_assert(batches_that_use_buffer(buffer_index) ==
batches_that_use_buffer_local.lookup(buffer_index));
};
assert_final_deps_valid(BUFFER_INDEX(vbo.nor));
assert_final_deps_valid(BUFFER_INDEX(vbo.pos));
assert_final_deps_valid(BUFFER_INDEX(vbo.uv));
assert_final_deps_valid(BUFFER_INDEX(vbo.sculpt_data));
assert_final_deps_valid(BUFFER_INDEX(vbo.weights));
assert_final_deps_valid(BUFFER_INDEX(vbo.edge_fac));
assert_final_deps_valid(BUFFER_INDEX(vbo.mesh_analysis));
assert_final_deps_valid(BUFFER_INDEX(vbo.tan));
assert_final_deps_valid(BUFFER_INDEX(vbo.orco));
assert_final_deps_valid(BUFFER_INDEX(vbo.edit_data));
assert_final_deps_valid(BUFFER_INDEX(vbo.fdots_pos));
assert_final_deps_valid(BUFFER_INDEX(vbo.fdots_nor));
assert_final_deps_valid(BUFFER_INDEX(vbo.skin_roots));
assert_final_deps_valid(BUFFER_INDEX(vbo.vert_idx));
assert_final_deps_valid(BUFFER_INDEX(vbo.edge_idx));
assert_final_deps_valid(BUFFER_INDEX(vbo.face_idx));
assert_final_deps_valid(BUFFER_INDEX(vbo.fdot_idx));
assert_final_deps_valid(BUFFER_INDEX(vbo.edituv_data));
assert_final_deps_valid(BUFFER_INDEX(vbo.edituv_stretch_area));
assert_final_deps_valid(BUFFER_INDEX(vbo.edituv_stretch_angle));
assert_final_deps_valid(BUFFER_INDEX(vbo.fdots_uv));
assert_final_deps_valid(BUFFER_INDEX(vbo.fdots_edituv_data));
for (const int i : IndexRange(GPU_MAX_ATTR)) {
assert_final_deps_valid(BUFFER_INDEX(vbo.attr[i]));
if (batches_to_create & MBC_SURFACE_PER_MAT) {
ibo_requests[int(BufferList::Final)].add(IBOType::Tris);
vbo_requests[int(BufferList::Final)].add(VBOType::CornerNormal);
vbo_requests[int(BufferList::Final)].add(VBOType::Position);
for (const int i : IndexRange(cache.attr_used.num_requests)) {
vbo_requests[int(BufferList::Final)].add(VBOType(int8_t(VBOType::Attr0) + i));
}
if (cache.cd_used.uv != 0) {
vbo_requests[int(BufferList::Final)].add(VBOType::UVs);
}
if ((cache.cd_used.tan != 0) || (cache.cd_used.tan_orco != 0)) {
vbo_requests[int(BufferList::Final)].add(VBOType::Tangents);
}
if (cache.cd_used.orco != 0) {
vbo_requests[int(BufferList::Final)].add(VBOType::Orco);
}
}
assert_final_deps_valid(BUFFER_INDEX(vbo.attr_viewer));
assert_final_deps_valid(BUFFER_INDEX(vbo.vnor));
assert_final_deps_valid(BUFFER_INDEX(ibo.tris));
assert_final_deps_valid(BUFFER_INDEX(ibo.lines));
assert_final_deps_valid(BUFFER_INDEX(ibo.lines_loose));
assert_final_deps_valid(BUFFER_INDEX(ibo.lines_adjacency));
assert_final_deps_valid(BUFFER_INDEX(ibo.lines_paint_mask));
assert_final_deps_valid(BUFFER_INDEX(ibo.points));
assert_final_deps_valid(BUFFER_INDEX(ibo.fdots));
assert_final_deps_valid(BUFFER_INDEX(ibo.edituv_tris));
assert_final_deps_valid(BUFFER_INDEX(ibo.edituv_lines));
assert_final_deps_valid(BUFFER_INDEX(ibo.edituv_points));
assert_final_deps_valid(BUFFER_INDEX(ibo.edituv_fdots));
assert_final_deps_valid(TRIS_PER_MAT_INDEX);
#endif
if (do_uvcage) {
mesh_buffer_cache_create_requested(task_graph,
scene,
cache,
cache.uv_cage,
ibo_requests[int(BufferList::UVCage)],
vbo_requests[int(BufferList::UVCage)],
ob,
mesh,
is_editmode,
is_paint_mode,
ob.object_to_world(),
false,
true,
scene,
ts,
true);
}
if (do_cage) {
mesh_buffer_cache_create_requested(task_graph,
scene,
cache,
cache.cage,
ibo_requests[int(BufferList::Cage)],
vbo_requests[int(BufferList::Cage)],
ob,
mesh,
is_editmode,
is_paint_mode,
ob.object_to_world(),
false,
false,
scene,
ts,
true);
}
@@ -1992,6 +1679,8 @@ void DRW_mesh_batch_cache_create_requested(TaskGraph &task_graph,
mesh,
cache,
cache.final,
ibo_requests[int(BufferList::Final)],
vbo_requests[int(BufferList::Final)],
is_editmode,
is_paint_mode,
ob.object_to_world(),
@@ -2008,30 +1697,57 @@ void DRW_mesh_batch_cache_create_requested(TaskGraph &task_graph,
}
mesh_buffer_cache_create_requested(task_graph,
scene,
cache,
cache.final,
ibo_requests[int(BufferList::Final)],
vbo_requests[int(BufferList::Final)],
ob,
mesh,
is_editmode,
is_paint_mode,
ob.object_to_world(),
true,
false,
scene,
ts,
use_hide);
/* Ensure that all requested batches have finished.
* Ideally we want to remove this sync, but there are cases where this doesn't work.
* See #79038 for example.
*
* An idea to improve this is to separate the Object mode from the edit mode draw caches. And
* based on the mode the correct one will be updated. Other option is to look into using
* drw_batch_cache_generate_requested_delayed. */
BLI_task_graph_work_and_wait(&task_graph);
#ifndef NDEBUG
drw_mesh_batch_cache_check_available(task_graph, mesh);
#endif
std::array<MeshBufferCache *, 3> caches{&cache.final, &cache.cage, &cache.uv_cage};
for (const BatchCreateData &batch : batch_info) {
MeshBufferCache &cache_for_batch = *caches[int(batch.list)];
gpu::IndexBuf *ibo = batch.ibo ? caches[int(batch.list)]->buff.ibos.lookup(*batch.ibo).get() :
nullptr;
GPU_batch_init(&batch.batch, batch.prim_type, nullptr, ibo);
for (const VBOType vbo_request : batch.vbos) {
GPU_batch_vertbuf_add(
&batch.batch, cache_for_batch.buff.vbos.lookup(vbo_request).get(), false);
}
}
if (batches_to_create & MBC_SURFACE_PER_MAT) {
MeshBufferList &buffers = cache.final.buff;
gpu::IndexBuf &tris_ibo = *buffers.ibos.lookup(IBOType::Tris);
create_material_subranges(cache.final.face_sorted, tris_ibo, cache.tris_per_mat);
for (const int material : IndexRange(cache.mat_len)) {
gpu::Batch *batch = cache.surface_per_mat[material];
GPU_batch_init(batch, GPU_PRIM_TRIS, nullptr, cache.tris_per_mat[material]);
GPU_batch_vertbuf_add(batch, buffers.vbos.lookup(VBOType::CornerNormal).get(), false);
GPU_batch_vertbuf_add(batch, buffers.vbos.lookup(VBOType::Position).get(), false);
if (cache.cd_used.uv != 0) {
GPU_batch_vertbuf_add(batch, buffers.vbos.lookup(VBOType::UVs).get(), false);
}
if ((cache.cd_used.tan != 0) || (cache.cd_used.tan_orco != 0)) {
GPU_batch_vertbuf_add(batch, buffers.vbos.lookup(VBOType::Tangents).get(), false);
}
if (cache.cd_used.orco != 0) {
GPU_batch_vertbuf_add(batch, buffers.vbos.lookup(VBOType::Orco).get(), false);
}
for (const int i : IndexRange(cache.attr_used.num_requests)) {
GPU_batch_vertbuf_add(
batch, buffers.vbos.lookup(VBOType(int8_t(VBOType::Attr0) + i)).get(), false);
}
}
}
cache.batch_ready |= batch_requested;
}
/** \} */

View File

@@ -1589,6 +1589,8 @@ static bool draw_subdiv_create_requested_buffers(Object &ob,
Mesh &mesh,
MeshBatchCache &batch_cache,
MeshBufferCache &mbc,
const Span<IBOType> ibo_requests,
const Span<VBOType> vbo_requests,
const bool is_editmode,
const bool is_paint_mode,
const float4x4 &object_to_world,
@@ -1680,22 +1682,23 @@ static bool draw_subdiv_create_requested_buffers(Object &ob,
draw_cache.use_custom_loop_normals = (runtime_data->use_loop_normals) &&
mesh_eval->attributes().contains("custom_normal");
if (DRW_ibo_requested(mbc.buff.ibo.tris)) {
if (ibo_requests.contains(IBOType::Tris)) {
draw_subdiv_cache_ensure_mat_offsets(draw_cache, mesh_eval, batch_cache.mat_len);
}
std::unique_ptr<MeshRenderData> mr = mesh_render_data_create(
MeshRenderData mr = mesh_render_data_create(
ob, mesh, is_editmode, is_paint_mode, object_to_world, do_final, do_uvedit, use_hide, ts);
draw_cache.use_hide = use_hide;
/* Used for setting loop normals flags. Mapped extraction is only used during edit mode.
* See comments in #extract_lnor_iter_face_mesh.
*/
draw_cache.is_edit_mode = mr->edit_bmesh != nullptr;
draw_cache.is_edit_mode = mr.edit_bmesh != nullptr;
draw_subdiv_cache_update_extra_coarse_face_data(draw_cache, mesh_eval, *mr);
draw_subdiv_cache_update_extra_coarse_face_data(draw_cache, mesh_eval, mr);
mesh_buffer_cache_create_requested_subdiv(batch_cache, mbc, draw_cache, *mr);
mesh_buffer_cache_create_requested_subdiv(
batch_cache, mbc, ibo_requests, vbo_requests, draw_cache, mr);
maybe_increment_cache_ref(subdiv);
return true;
@@ -1761,6 +1764,8 @@ void DRW_create_subdivision(Object &ob,
Mesh &mesh,
MeshBatchCache &batch_cache,
MeshBufferCache &mbc,
const Span<IBOType> ibo_requests,
const Span<VBOType> vbo_requests,
const bool is_editmode,
const bool is_paint_mode,
const float4x4 &object_to_world,
@@ -1781,6 +1786,8 @@ void DRW_create_subdivision(Object &ob,
mesh,
batch_cache,
mbc,
ibo_requests,
vbo_requests,
is_editmode,
is_paint_mode,
object_to_world,

View File

@@ -155,6 +155,8 @@ void DRW_create_subdivision(Object &ob,
Mesh &mesh,
MeshBatchCache &batch_cache,
MeshBufferCache &mbc,
Span<IBOType> ibo_requests,
Span<VBOType> vbo_requests,
bool is_editmode,
bool is_paint_mode,
const float4x4 &object_to_world,

View File

@@ -62,7 +62,6 @@ struct MeshRenderData {
bool use_hide;
bool use_subsurf_fdots;
bool use_final_mesh;
bool hide_unmapped_edges;
bool use_simplify_normals;
@@ -185,15 +184,15 @@ BLI_INLINE const float *bm_face_no_get(const MeshRenderData &mr, const BMFace *e
* \param edit_mode_active: When true, use the modifiers from the edit-data,
* otherwise don't use modifiers as they are not from this object.
*/
std::unique_ptr<MeshRenderData> mesh_render_data_create(Object &object,
Mesh &mesh,
bool is_editmode,
bool is_paint_mode,
const float4x4 &object_to_world,
bool do_final,
bool do_uvedit,
bool use_hide,
const ToolSettings *ts);
MeshRenderData mesh_render_data_create(Object &object,
Mesh &mesh,
bool is_editmode,
bool is_paint_mode,
const float4x4 &object_to_world,
bool do_final,
bool do_uvedit,
bool use_hide,
const ToolSettings *ts);
void mesh_render_data_update_corner_normals(MeshRenderData &mr);
void mesh_render_data_update_face_normals(MeshRenderData &mr);
void mesh_render_data_update_loose_geom(MeshRenderData &mr, MeshBufferCache &cache);
@@ -272,10 +271,10 @@ void extract_edge_factor_subdiv(const DRWSubdivCache &subdiv_cache,
gpu::VertBuf &pos_nor,
gpu::VertBuf &vbo);
void extract_tris(const MeshRenderData &mr,
const SortedFaceData &face_sorted,
MeshBatchCache &cache,
gpu::IndexBuf &ibo);
void extract_tris(const MeshRenderData &mr, const SortedFaceData &face_sorted, gpu::IndexBuf &ibo);
void create_material_subranges(const SortedFaceData &face_sorted,
gpu::IndexBuf &tris_ibo,
MutableSpan<gpu::IndexBuf *> ibos);
void extract_tris_subdiv(const DRWSubdivCache &subdiv_cache,
MeshBatchCache &cache,
gpu::IndexBuf &ibo);
@@ -392,13 +391,13 @@ void extract_orco(const MeshRenderData &mr, gpu::VertBuf &vbo);
void extract_mesh_analysis(const MeshRenderData &mr, gpu::VertBuf &vbo);
void extract_attributes(const MeshRenderData &mr,
const Span<DRW_AttributeRequest> requests,
const Span<gpu::VertBuf *> vbos);
void extract_attributes_subdiv(const MeshRenderData &mr,
const DRWSubdivCache &subdiv_cache,
const Span<DRW_AttributeRequest> requests,
const Span<gpu::VertBuf *> vbos);
void extract_attribute(const MeshRenderData &mr,
const DRW_AttributeRequest &request,
gpu::VertBuf &vbo);
void extract_attribute_subdiv(const MeshRenderData &mr,
const DRWSubdivCache &subdiv_cache,
const DRW_AttributeRequest &request,
gpu::VertBuf &vbo);
void extract_attr_viewer(const MeshRenderData &mr, gpu::VertBuf &vbo);
} // namespace blender::draw

View File

@@ -92,33 +92,29 @@ static void extract_tris_bmesh(const MeshRenderData &mr,
GPU_indexbuf_build_in_place_ex(&builder, 0, bm.totloop, false, &ibo);
}
static void create_material_subranges(const MeshRenderData &mr,
const SortedFaceData &face_sorted,
MeshBatchCache &cache,
gpu::IndexBuf &ibo)
void create_material_subranges(const SortedFaceData &face_sorted,
gpu::IndexBuf &tris_ibo,
MutableSpan<gpu::IndexBuf *> ibos)
{
/* Create ibo sub-ranges. Always do this to avoid error when the standard surface batch
* is created before the surfaces-per-material. */
int mat_start = 0;
for (int i = 0; i < mr.materials_num; i++) {
for (const int i : face_sorted.tris_num_by_material.index_range()) {
/* These IBOs have not been queried yet but we create them just in case they are needed
* later since they are not tracked by mesh_buffer_cache_create_requested(). */
if (cache.tris_per_mat[i] == nullptr) {
cache.tris_per_mat[i] = GPU_indexbuf_calloc();
if (ibos[i] == nullptr) {
ibos[i] = GPU_indexbuf_calloc();
}
const int mat_tri_len = face_sorted.tris_num_by_material[i];
/* Multiply by 3 because these are triangle indices. */
const int start = mat_start * 3;
const int len = mat_tri_len * 3;
GPU_indexbuf_create_subrange_in_place(cache.tris_per_mat[i], &ibo, start, len);
GPU_indexbuf_create_subrange_in_place(ibos[i], &tris_ibo, start, len);
mat_start += mat_tri_len;
}
}
void extract_tris(const MeshRenderData &mr,
const SortedFaceData &face_sorted,
MeshBatchCache &cache,
gpu::IndexBuf &ibo)
void extract_tris(const MeshRenderData &mr, const SortedFaceData &face_sorted, gpu::IndexBuf &ibo)
{
if (mr.extract_type == MeshExtractType::Mesh) {
extract_tris_mesh(mr, face_sorted, ibo);
@@ -126,10 +122,6 @@ void extract_tris(const MeshRenderData &mr,
else {
extract_tris_bmesh(mr, face_sorted, ibo);
}
if (mr.use_final_mesh && !cache.tris_per_mat.is_empty()) {
create_material_subranges(mr, face_sorted, cache, ibo);
}
}
void extract_tris_subdiv(const DRWSubdivCache &subdiv_cache,

View File

@@ -186,9 +186,9 @@ static const CustomData *get_custom_data_for_domain(const BMesh &bm, bke::AttrDo
}
}
static void extract_attribute(const MeshRenderData &mr,
const DRW_AttributeRequest &request,
gpu::VertBuf &vbo)
static void extract_attribute_no_init(const MeshRenderData &mr,
const DRW_AttributeRequest &request,
gpu::VertBuf &vbo)
{
if (mr.extract_type == MeshExtractType::BMesh) {
const CustomData &custom_data = *get_custom_data_for_domain(*mr.bm, request.domain);
@@ -246,59 +246,48 @@ static void extract_attribute(const MeshRenderData &mr,
});
}
}
void extract_attributes(const MeshRenderData &mr,
const Span<DRW_AttributeRequest> requests,
const Span<gpu::VertBuf *> vbos)
void extract_attribute(const MeshRenderData &mr,
const DRW_AttributeRequest &request,
gpu::VertBuf &vbo)
{
for (const int i : vbos.index_range()) {
if (DRW_vbo_requested(vbos[i])) {
init_vbo_for_attribute(mr, *vbos[i], requests[i], false, uint32_t(mr.corners_num));
extract_attribute(mr, requests[i], *vbos[i]);
}
}
init_vbo_for_attribute(mr, vbo, request, false, uint32_t(mr.corners_num));
extract_attribute_no_init(mr, request, vbo);
}
void extract_attributes_subdiv(const MeshRenderData &mr,
const DRWSubdivCache &subdiv_cache,
const Span<DRW_AttributeRequest> requests,
const Span<gpu::VertBuf *> vbos)
void extract_attribute_subdiv(const MeshRenderData &mr,
const DRWSubdivCache &subdiv_cache,
const DRW_AttributeRequest &request,
gpu::VertBuf &vbo)
{
for (const int i : vbos.index_range()) {
if (DRW_vbo_requested(vbos[i])) {
const DRW_AttributeRequest &request = requests[i];
const Mesh *coarse_mesh = subdiv_cache.mesh;
const Mesh *coarse_mesh = subdiv_cache.mesh;
/* Prepare VBO for coarse data. The compute shader only expects floats. */
gpu::VertBuf *src_data = GPU_vertbuf_calloc();
GPUVertFormat coarse_format = draw::init_format_for_attribute(request.cd_type, "data");
GPU_vertbuf_init_with_format_ex(*src_data, coarse_format, GPU_USAGE_STATIC);
GPU_vertbuf_data_alloc(*src_data, uint32_t(coarse_mesh->corners_num));
/* Prepare VBO for coarse data. The compute shader only expects floats. */
gpu::VertBuf *src_data = GPU_vertbuf_calloc();
GPUVertFormat coarse_format = draw::init_format_for_attribute(request.cd_type, "data");
GPU_vertbuf_init_with_format_ex(*src_data, coarse_format, GPU_USAGE_STATIC);
GPU_vertbuf_data_alloc(*src_data, uint32_t(coarse_mesh->corners_num));
extract_attribute(mr, request, *src_data);
extract_attribute_no_init(mr, request, *src_data);
gpu::VertBuf &dst_buffer = *vbos[i];
init_vbo_for_attribute(mr, dst_buffer, request, true, subdiv_cache.num_subdiv_loops);
init_vbo_for_attribute(mr, vbo, request, true, subdiv_cache.num_subdiv_loops);
/* Ensure data is uploaded properly. */
GPU_vertbuf_tag_dirty(src_data);
bke::attribute_math::convert_to_static_type(request.cd_type, [&](auto dummy) {
using T = decltype(dummy);
using Converter = AttributeConverter<T>;
if constexpr (!std::is_void_v<typename Converter::VBOType>) {
draw_subdiv_interp_custom_data(subdiv_cache,
*src_data,
dst_buffer,
Converter::gpu_component_type,
Converter::gpu_component_len,
0);
}
});
GPU_vertbuf_discard(src_data);
/* Ensure data is uploaded properly. */
GPU_vertbuf_tag_dirty(src_data);
bke::attribute_math::convert_to_static_type(request.cd_type, [&](auto dummy) {
using T = decltype(dummy);
using Converter = AttributeConverter<T>;
if constexpr (!std::is_void_v<typename Converter::VBOType>) {
draw_subdiv_interp_custom_data(subdiv_cache,
*src_data,
vbo,
Converter::gpu_component_type,
Converter::gpu_component_len,
0);
}
}
});
GPU_vertbuf_discard(src_data);
}
void extract_attr_viewer(const MeshRenderData &mr, gpu::VertBuf &vbo)

View File

@@ -236,8 +236,8 @@ void extract_edituv_stretch_angle_subdiv(const MeshRenderData &mr,
GPU_vertbuf_init_build_on_device(
vbo, get_edituv_stretch_angle_format_subdiv(), subdiv_cache.num_subdiv_loops);
gpu::VertBuf *pos_nor = cache.final.buff.vbo.pos;
gpu::VertBuf *uvs = cache.final.buff.vbo.uv;
gpu::VertBuf *pos_nor = cache.final.buff.vbos.lookup(VBOType::Position).get();
gpu::VertBuf *uvs = cache.final.buff.vbos.lookup(VBOType::UVs).get();
/* It may happen that the data for the UV editor is requested before (as a separate draw update)
* the data for the mesh when switching to the `UV Editing` workspace, and therefore the position
@@ -281,10 +281,6 @@ void extract_edituv_stretch_angle_subdiv(const MeshRenderData &mr,
uvs_offset *= subdiv_cache.num_subdiv_loops * 2;
draw_subdiv_build_edituv_stretch_angle_buffer(subdiv_cache, pos_nor, uvs, uvs_offset, &vbo);
if (!cache.final.buff.vbo.pos) {
GPU_vertbuf_discard(pos_nor);
}
}
} // namespace blender::draw

View File

@@ -271,3 +271,15 @@ int GPU_indexbuf_primitive_len(GPUPrimType prim_type);
elem = nullptr; \
} \
} while (0)
namespace blender::gpu {
class IndexBufDeleter {
public:
void operator()(IndexBuf *ibo)
{
GPU_indexbuf_discard(ibo);
}
};
} // namespace blender::gpu

View File

@@ -295,3 +295,15 @@ uint GPU_vertbuf_get_memory_usage();
verts = nullptr; \
} \
} while (0)
namespace blender::gpu {
class VertBufDeleter {
public:
void operator()(VertBuf *vbo)
{
GPU_vertbuf_discard(vbo);
}
};
} // namespace blender::gpu