DRW: Make vertex format threadsafe

Wrapping the vertformat into lambda expression to make
them threadsafe.

Pull Request: https://projects.blender.org/blender/blender/pulls/134685
This commit is contained in:
Clément Foucault
2025-02-17 18:24:31 +01:00
committed by Clément Foucault
parent dd03a322fe
commit a6364eae75
33 changed files with 311 additions and 326 deletions

View File

@@ -71,11 +71,12 @@ namespace blender::draw {
void DRW_vertbuf_create_wiredata(blender::gpu::VertBuf *vbo, const int vert_len)
{
static GPUVertFormat format = {0};
static struct {
uint wd;
} attr_id;
if (format.attr_len == 0) {
static const GPUVertFormat format = [&]() {
GPUVertFormat format{};
/* initialize vertex format */
if (!GPU_crappy_amd_driver()) {
/* Some AMD drivers strangely crash with a vbo with this format. */
@@ -85,7 +86,8 @@ void DRW_vertbuf_create_wiredata(blender::gpu::VertBuf *vbo, const int vert_len)
else {
attr_id.wd = GPU_vertformat_attr_add(&format, "wd", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
}
}
return format;
}();
GPU_vertbuf_init_with_format(*vbo, format);
GPU_vertbuf_data_alloc(*vbo, vert_len);

View File

@@ -457,13 +457,15 @@ static void curve_create_curves_pos(CurveRenderData *rdata, gpu::VertBuf *vbo_cu
return;
}
static GPUVertFormat format = {0};
static struct {
uint pos;
} attr_id;
if (format.attr_len == 0) {
static const GPUVertFormat format = [&]() {
GPUVertFormat format{};
attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
}
return format;
}();
const int vert_len = curve_render_data_wire_verts_len_get(rdata);
GPU_vertbuf_init_with_format(*vbo_curves_pos, format);
@@ -480,10 +482,11 @@ static void curve_create_attribute(CurveRenderData *rdata, gpu::VertBuf &vbo_att
return;
}
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = []() {
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, "attribute_value", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
}
return format;
}();
const int vert_len = curve_render_data_wire_verts_len_get(rdata);
GPU_vertbuf_init_with_format(vbo_attr, format);
@@ -536,28 +539,32 @@ static void curve_create_edit_curves_nor(CurveRenderData *rdata,
const bool do_hq_normals = (scene->r.perf_flag & SCE_PERF_HQ_NORMALS) != 0 ||
GPU_use_hq_normals_workaround();
static GPUVertFormat format = {0};
static GPUVertFormat format_hq = {0};
static struct {
uint pos, nor, tan, rad;
uint pos_hq, nor_hq, tan_hq, rad_hq;
} attr_id;
if (format.attr_len == 0) {
/* initialize vertex formats */
static const GPUVertFormat format = [&]() {
GPUVertFormat format{};
attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
attr_id.rad = GPU_vertformat_attr_add(&format, "rad", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
attr_id.nor = GPU_vertformat_attr_add(
&format, "nor", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
attr_id.tan = GPU_vertformat_attr_add(
&format, "tan", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
return format;
}();
attr_id.pos_hq = GPU_vertformat_attr_add(&format_hq, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
attr_id.rad_hq = GPU_vertformat_attr_add(&format_hq, "rad", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
static const GPUVertFormat format_hq = [&]() {
GPUVertFormat format{};
attr_id.pos_hq = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
attr_id.rad_hq = GPU_vertformat_attr_add(&format, "rad", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
attr_id.nor_hq = GPU_vertformat_attr_add(
&format_hq, "nor", GPU_COMP_I16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
&format, "nor", GPU_COMP_I16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
attr_id.tan_hq = GPU_vertformat_attr_add(
&format_hq, "tan", GPU_COMP_I16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
}
&format, "tan", GPU_COMP_I16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
return format;
}();
const GPUVertFormat &format_ptr = do_hq_normals ? format_hq : format;
@@ -650,16 +657,21 @@ static void curve_create_edit_data_and_handles(CurveRenderData *rdata,
gpu::IndexBuf *ibo_edit_verts_points,
gpu::IndexBuf *ibo_edit_lines)
{
static GPUVertFormat format_pos = {0};
static GPUVertFormat format_data = {0};
static struct {
uint pos, data;
} attr_id;
if (format_pos.attr_len == 0) {
/* initialize vertex formats */
attr_id.pos = GPU_vertformat_attr_add(&format_pos, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
attr_id.data = GPU_vertformat_attr_add(&format_data, "data", GPU_COMP_U32, 1, GPU_FETCH_INT);
}
static const GPUVertFormat format_pos = [&]() {
GPUVertFormat format{};
attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
return format;
}();
static const GPUVertFormat format_data = [&]() {
GPUVertFormat format{};
attr_id.data = GPU_vertformat_attr_add(&format, "data", GPU_COMP_U32, 1, GPU_FETCH_INT);
return format;
}();
int verts_len_capacity = curve_render_data_overlay_verts_len_get(rdata);
int edges_len_capacity = curve_render_data_overlay_edges_len_get(rdata) * 2;

View File

@@ -103,19 +103,6 @@ struct CurvesBatchCache {
std::mutex render_mutex;
};
static uint DUMMY_ID;
static GPUVertFormat single_attr_vbo_format(const char *name,
const GPUVertCompType comp_type,
const uint comp_len,
const GPUVertFetchMode fetch_mode,
uint &attr_id = DUMMY_ID)
{
GPUVertFormat format{};
attr_id = GPU_vertformat_attr_add(&format, name, comp_type, comp_len, fetch_mode);
return format;
}
static bool batch_cache_is_dirty(const Curves &curves)
{
const CurvesBatchCache *cache = static_cast<CurvesBatchCache *>(curves.batch_cache);
@@ -284,11 +271,11 @@ static void create_edit_points_position_and_data(
const bke::crazyspace::GeometryDeformation deformation,
CurvesBatchCache &cache)
{
static GPUVertFormat format_pos = single_attr_vbo_format(
static const GPUVertFormat format_pos = GPU_vertformat_from_attribute(
"pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
/* GPU_COMP_U32 is used instead of GPU_COMP_U8 because depending on running hardware stride might
* still be 4. Thus adding complexity to the code and still sparing no memory. */
static GPUVertFormat format_data = single_attr_vbo_format(
static const GPUVertFormat format_data = GPU_vertformat_from_attribute(
"data", GPU_COMP_U32, 1, GPU_FETCH_INT);
Span<float3> deformed_positions = deformation.positions;
@@ -380,7 +367,7 @@ static void create_edit_points_selection(const bke::CurvesGeometry &curves,
const OffsetIndices<int> bezier_dst_offsets,
CurvesBatchCache &cache)
{
static GPUVertFormat format_data = single_attr_vbo_format(
static const GPUVertFormat format_data = GPU_vertformat_from_attribute(
"selection", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
const int bezier_point_count = bezier_dst_offsets.total_size();
@@ -679,11 +666,8 @@ static void calc_final_indices(const bke::CurvesGeometry &curves,
verts_per_curve = (cache.final.resolution - 1) * verts_per_segment;
}
static GPUVertFormat format = {0};
GPU_vertformat_clear(&format);
/* initialize vertex format */
GPU_vertformat_attr_add(&format, "dummy", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"dummy", GPU_COMP_U32, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
gpu::VertBuf *vbo = GPU_vertbuf_create_with_format(format);
GPU_vertbuf_data_alloc(*vbo, 1);
@@ -1048,9 +1032,8 @@ static void create_edit_points_position_vbo(
const bke::crazyspace::GeometryDeformation & /*deformation*/,
CurvesBatchCache &cache)
{
static uint attr_id;
static GPUVertFormat format = single_attr_vbo_format(
"pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT, attr_id);
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
/* TODO: Deform curves using deformations. */
const Span<float3> positions = curves.evaluated_positions();

View File

@@ -84,14 +84,15 @@ struct GreasePencilStrokeVert {
float uv_fill[2], u_stroke, opacity;
};
static GPUVertFormat *grease_pencil_stroke_format()
static const GPUVertFormat *grease_pencil_stroke_format()
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = []() {
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
GPU_vertformat_attr_add(&format, "ma", GPU_COMP_I32, 4, GPU_FETCH_INT);
GPU_vertformat_attr_add(&format, "uv", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
}
return format;
}();
return &format;
}
@@ -101,13 +102,14 @@ struct GreasePencilColorVert {
float fcol[4]; /* Fill color */
};
static GPUVertFormat *grease_pencil_color_format()
static const GPUVertFormat *grease_pencil_color_format()
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = []() {
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, "col", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
GPU_vertformat_attr_add(&format, "fcol", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
}
return format;
}();
return &format;
}
@@ -265,15 +267,11 @@ static void grease_pencil_weight_batch_ensure(Object &object,
const Span<const Layer *> layers = grease_pencil.layers();
static GPUVertFormat format_points_pos = {0};
if (format_points_pos.attr_len == 0) {
GPU_vertformat_attr_add(&format_points_pos, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
}
static const GPUVertFormat format_points_pos = GPU_vertformat_from_attribute(
"pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
static GPUVertFormat format_points_weight = {0};
if (format_points_weight.attr_len == 0) {
GPU_vertformat_attr_add(&format_points_weight, "selection", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
}
static const GPUVertFormat format_points_weight = GPU_vertformat_from_attribute(
"selection", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
GPUUsageType vbo_flag = GPU_USAGE_STATIC | GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY;
cache->edit_points_pos = GPU_vertbuf_create_with_format_ex(format_points_pos, vbo_flag);
@@ -699,32 +697,20 @@ static void grease_pencil_edit_batch_ensure(Object &object,
const Span<const Layer *> layers = grease_pencil.layers();
static GPUVertFormat format_edit_points_pos = {0};
if (format_edit_points_pos.attr_len == 0) {
GPU_vertformat_attr_add(&format_edit_points_pos, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
}
static const GPUVertFormat format_edit_points_pos = GPU_vertformat_from_attribute(
"pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
static GPUVertFormat format_edit_line_pos = {0};
if (format_edit_line_pos.attr_len == 0) {
GPU_vertformat_attr_add(&format_edit_line_pos, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
}
static const GPUVertFormat format_edit_line_pos = GPU_vertformat_from_attribute(
"pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
static GPUVertFormat format_edit_points_selection = {0};
if (format_edit_points_selection.attr_len == 0) {
GPU_vertformat_attr_add(
&format_edit_points_selection, "selection", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
}
static const GPUVertFormat format_edit_points_selection = GPU_vertformat_from_attribute(
"selection", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
static GPUVertFormat format_edit_points_vflag = {0};
if (format_edit_points_vflag.attr_len == 0) {
GPU_vertformat_attr_add(&format_edit_points_vflag, "vflag", GPU_COMP_U32, 1, GPU_FETCH_INT);
}
static const GPUVertFormat format_edit_points_vflag = GPU_vertformat_from_attribute(
"vflag", GPU_COMP_U32, 1, GPU_FETCH_INT);
static GPUVertFormat format_edit_line_selection = {0};
if (format_edit_line_selection.attr_len == 0) {
GPU_vertformat_attr_add(
&format_edit_line_selection, "selection", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
}
static const GPUVertFormat format_edit_line_selection = GPU_vertformat_from_attribute(
"selection", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
GPUUsageType vbo_flag = GPU_USAGE_STATIC | GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY;
cache->edit_points_pos = GPU_vertbuf_create_with_format_ex(format_edit_points_pos, vbo_flag);
@@ -1138,8 +1124,8 @@ static void grease_pencil_geom_batch_ensure(Object &object,
GPUUsageType vbo_flag = GPU_USAGE_STATIC | GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY;
/* Create VBOs. */
GPUVertFormat *format = grease_pencil_stroke_format();
GPUVertFormat *format_col = grease_pencil_color_format();
const GPUVertFormat *format = grease_pencil_stroke_format();
const GPUVertFormat *format_col = grease_pencil_color_format();
cache->vbo = GPU_vertbuf_create_with_format_ex(*format, vbo_flag);
cache->vbo_col = GPU_vertbuf_create_with_format_ex(*format_col, vbo_flag);
/* Add extra space at the end of the buffer because of quad load. */

View File

@@ -420,15 +420,15 @@ static void lattice_batch_cache_create_overlay_batches(Lattice *lt)
LatticeRenderData *rdata = lattice_render_data_create(lt, options);
if (cache->overlay_verts == nullptr) {
static GPUVertFormat format = {0};
static struct {
uint pos, data;
} attr_id;
if (format.attr_len == 0) {
/* initialize vertex format */
static const GPUVertFormat format = [&]() {
GPUVertFormat format{};
attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
attr_id.data = GPU_vertformat_attr_add(&format, "data", GPU_COMP_U8, 1, GPU_FETCH_INT);
}
return format;
}();
const int vert_len = lattice_render_data_verts_len_get(rdata);

View File

@@ -94,16 +94,15 @@ struct EditStrandData {
float selection;
};
static GPUVertFormat *edit_points_vert_format_get(uint *r_pos_id, uint *r_selection_id)
static const GPUVertFormat *edit_points_vert_format_get(uint *r_pos_id, uint *r_selection_id)
{
static GPUVertFormat edit_point_format = {0};
static uint pos_id, selection_id;
if (edit_point_format.attr_len == 0) {
/* Keep in sync with EditStrandData */
pos_id = GPU_vertformat_attr_add(&edit_point_format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
selection_id = GPU_vertformat_attr_add(
&edit_point_format, "selection", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
}
static const GPUVertFormat edit_point_format = [&]() {
GPUVertFormat format{};
pos_id = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
selection_id = GPU_vertformat_attr_add(&format, "selection", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
return format;
}();
*r_pos_id = pos_id;
*r_selection_id = selection_id;
return &edit_point_format;
@@ -821,8 +820,8 @@ static void particle_batch_cache_ensure_procedural_final_points(ParticleHairCach
int subdiv)
{
/* Same format as proc_point_buf. */
GPUVertFormat format = {0};
GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"pos", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
/* Procedural Subdiv buffer only needs to be resident in device memory. */
cache->final[subdiv].proc_buf = GPU_vertbuf_create_with_format_ex(
@@ -1088,12 +1087,8 @@ static void particle_batch_cache_ensure_procedural_indices(PTCacheEdit *edit,
int element_count = (verts_per_hair + 1) * cache->strands_len;
GPUPrimType prim_type = (thickness_res == 1) ? GPU_PRIM_LINE_STRIP : GPU_PRIM_TRI_STRIP;
static GPUVertFormat format = {0};
GPU_vertformat_clear(&format);
/* NOTE: initialize vertex format. Using GPU_COMP_U32 to satisfy Metal's 4-byte minimum
* stride requirement. */
GPU_vertformat_attr_add(&format, "dummy", GPU_COMP_U32, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"dummy", GPU_COMP_U32, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
gpu::VertBuf *vbo = GPU_vertbuf_create_with_format(format);
GPU_vertbuf_data_alloc(*vbo, 1);
@@ -1188,7 +1183,7 @@ static void particle_batch_cache_ensure_pos_and_seg(PTCacheEdit *edit,
GPU_VERTBUF_DISCARD_SAFE(hair_cache->pos);
GPU_INDEXBUF_DISCARD_SAFE(hair_cache->indices);
static GPUVertFormat format = {0};
GPUVertFormat format = {0};
HairAttributeID attr_id;
uint *uv_id = nullptr;
uint *col_id = nullptr;
@@ -1217,9 +1212,6 @@ static void particle_batch_cache_ensure_pos_and_seg(PTCacheEdit *edit,
}
}
GPU_vertformat_clear(&format);
/* initialize vertex format */
attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
attr_id.tan = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
attr_id.ind = GPU_vertformat_attr_add(&format, "ind", GPU_COMP_I32, 1, GPU_FETCH_INT);
@@ -1383,8 +1375,6 @@ static void particle_batch_cache_ensure_pos(Object *object,
return;
}
static GPUVertFormat format = {0};
static uint pos_id, rot_id, val_id;
int i, curr_point;
ParticleData *pa;
ParticleKey state;
@@ -1400,12 +1390,14 @@ static void particle_batch_cache_ensure_pos(Object *object,
GPU_VERTBUF_DISCARD_SAFE(point_cache->pos);
if (format.attr_len == 0) {
/* initialize vertex format */
static uint pos_id, rot_id, val_id;
static const GPUVertFormat format = [&]() {
GPUVertFormat format{};
pos_id = GPU_vertformat_attr_add(&format, "part_pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
val_id = GPU_vertformat_attr_add(&format, "part_val", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
rot_id = GPU_vertformat_attr_add(&format, "part_rot", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
}
return format;
}();
point_cache->pos = GPU_vertbuf_create_with_format(format);
GPU_vertbuf_data_alloc(*point_cache->pos, psys->totpart);
@@ -1559,7 +1551,7 @@ static void particle_batch_cache_ensure_edit_pos_and_seg(PTCacheEdit *edit,
GPUVertBufRaw data_step;
GPUIndexBufBuilder elb;
uint pos_id, selection_id;
GPUVertFormat *edit_point_format = edit_points_vert_format_get(&pos_id, &selection_id);
const GPUVertFormat *edit_point_format = edit_points_vert_format_get(&pos_id, &selection_id);
hair_cache->pos = GPU_vertbuf_create_with_format(*edit_point_format);
GPU_vertbuf_data_alloc(*hair_cache->pos, hair_cache->point_len);
@@ -1620,7 +1612,7 @@ static void particle_batch_cache_ensure_edit_inner_pos(PTCacheEdit *edit,
}
uint pos_id, selection_id;
GPUVertFormat *edit_point_format = edit_points_vert_format_get(&pos_id, &selection_id);
const GPUVertFormat *edit_point_format = edit_points_vert_format_get(&pos_id, &selection_id);
cache->edit_inner_pos = GPU_vertbuf_create_with_format(*edit_point_format);
GPU_vertbuf_data_alloc(*cache->edit_inner_pos, cache->edit_inner_point_len);
@@ -1678,7 +1670,7 @@ static void particle_batch_cache_ensure_edit_tip_pos(PTCacheEdit *edit, Particle
}
uint pos_id, selection_id;
GPUVertFormat *edit_point_format = edit_points_vert_format_get(&pos_id, &selection_id);
const GPUVertFormat *edit_point_format = edit_points_vert_format_get(&pos_id, &selection_id);
cache->edit_tip_pos = GPU_vertbuf_create_with_format(*edit_point_format);
GPU_vertbuf_data_alloc(*cache->edit_tip_pos, cache->edit_tip_point_len);

View File

@@ -256,10 +256,11 @@ static void pointcloud_extract_position_and_radius(const PointCloud &pointcloud,
const bke::AttributeAccessor attributes = pointcloud.attributes();
const Span<float3> positions = pointcloud.positions();
const VArray<float> radii = *attributes.lookup<float>("radius");
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = [&]() {
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
}
return format;
}();
GPUUsageType usage_flag = GPU_USAGE_STATIC | GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY;
GPU_vertbuf_init_with_format_ex(*cache.eval_cache.pos_rad, format, usage_flag);
@@ -307,10 +308,11 @@ static void pointcloud_extract_attribute(const PointCloud &pointcloud,
bke::AttributeReader<ColorGeometry4f> attribute = attributes.lookup_or_default<ColorGeometry4f>(
request.attribute_name, request.domain, {0.0f, 0.0f, 0.0f, 1.0f});
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = [&]() {
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, "attr", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
}
return format;
}();
GPUUsageType usage_flag = GPU_USAGE_STATIC | GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY;
GPU_vertbuf_init_with_format_ex(attr_buf, format, usage_flag);
GPU_vertbuf_data_alloc(attr_buf, pointcloud.totpoint);

View File

@@ -58,47 +58,51 @@ namespace blender::draw {
static const GPUVertFormat &get_uvs_format()
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = [&]() {
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, "uvs", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
}
return format;
}();
return format;
}
/* Vertex format for `OpenSubdiv::Osd::PatchArray`. */
static const GPUVertFormat &get_patch_array_format()
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = [&]() {
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, "regDesc", GPU_COMP_I32, 1, GPU_FETCH_INT);
GPU_vertformat_attr_add(&format, "desc", GPU_COMP_I32, 1, GPU_FETCH_INT);
GPU_vertformat_attr_add(&format, "numPatches", GPU_COMP_I32, 1, GPU_FETCH_INT);
GPU_vertformat_attr_add(&format, "indexBase", GPU_COMP_I32, 1, GPU_FETCH_INT);
GPU_vertformat_attr_add(&format, "stride", GPU_COMP_I32, 1, GPU_FETCH_INT);
GPU_vertformat_attr_add(&format, "primitiveIdBase", GPU_COMP_I32, 1, GPU_FETCH_INT);
}
return format;
}();
return format;
}
/* Vertex format used for the `PatchTable::PatchHandle`. */
static const GPUVertFormat &get_patch_handle_format()
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = [&]() {
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, "vertex_index", GPU_COMP_I32, 1, GPU_FETCH_INT);
GPU_vertformat_attr_add(&format, "array_index", GPU_COMP_I32, 1, GPU_FETCH_INT);
GPU_vertformat_attr_add(&format, "patch_index", GPU_COMP_I32, 1, GPU_FETCH_INT);
}
return format;
}();
return format;
}
/* Vertex format used for the quad-tree nodes of the PatchMap. */
static const GPUVertFormat &get_quadtree_format()
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = [&]() {
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, "child", GPU_COMP_U32, 4, GPU_FETCH_INT);
}
return format;
}();
return format;
}
@@ -106,32 +110,35 @@ static const GPUVertFormat &get_quadtree_format()
* that the #gpu::VertBuf used to wrap the OpenSubdiv patch param buffer is valid. */
static const GPUVertFormat &get_patch_param_format()
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = [&]() {
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, "data", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
}
return format;
}();
return format;
}
/* Vertex format for the patches' vertices index buffer. */
static const GPUVertFormat &get_patch_index_format()
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = [&]() {
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, "data", GPU_COMP_I32, 1, GPU_FETCH_INT);
}
return format;
}();
return format;
}
/* Vertex format for the OpenSubdiv vertex buffer. */
static const GPUVertFormat &get_subdiv_vertex_format()
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = [&]() {
GPUVertFormat format{};
/* We use 4 components for the vectors to account for padding in the compute shaders, where
* vec3 is promoted to vec4. */
GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
}
return format;
}();
return format;
}
@@ -153,12 +160,13 @@ MINLINE CompressedPatchCoord make_patch_coord(int ptex_face_index, float u, floa
/* Vertex format used for the #CompressedPatchCoord. */
static const GPUVertFormat &get_blender_patch_coords_format()
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = [&]() {
GPUVertFormat format{};
/* WARNING! Adjust #CompressedPatchCoord accordingly. */
GPU_vertformat_attr_add(&format, "ptex_face_index", GPU_COMP_U32, 1, GPU_FETCH_INT);
GPU_vertformat_attr_add(&format, "uv", GPU_COMP_U32, 1, GPU_FETCH_INT);
}
return format;
}();
return format;
}
@@ -166,21 +174,23 @@ static const GPUVertFormat &get_blender_patch_coords_format()
static const GPUVertFormat &get_origindex_format()
{
static GPUVertFormat format;
if (format.attr_len == 0) {
static const GPUVertFormat format = [&]() {
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, "index", GPU_COMP_I32, 1, GPU_FETCH_INT);
}
return format;
}();
return format;
}
const GPUVertFormat &draw_subdiv_get_pos_nor_format()
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = [&]() {
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
GPU_vertformat_attr_add(&format, "nor", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
GPU_vertformat_alias_add(&format, "vnor");
}
return format;
}();
return format;
}
@@ -504,10 +514,11 @@ static void draw_subdiv_cache_update_extra_coarse_face_data(DRWSubdivCache &cach
{
if (cache.extra_coarse_face_data == nullptr) {
cache.extra_coarse_face_data = GPU_vertbuf_calloc();
static GPUVertFormat format;
if (format.attr_len == 0) {
static const GPUVertFormat format = []() {
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, "data", GPU_COMP_U32, 1, GPU_FETCH_INT);
}
return format;
}();
GPU_vertbuf_init_with_format_ex(*cache.extra_coarse_face_data, format, GPU_USAGE_DYNAMIC);
GPU_vertbuf_data_alloc(*cache.extra_coarse_face_data,
mr.extract_type == MeshExtractType::BMesh ? cache.bm->totface :

View File

@@ -148,21 +148,26 @@ static void drw_volume_wireframe_cb(
GPU_use_hq_normals_workaround();
/* Create vertex buffer. */
static GPUVertFormat format = {0};
static GPUVertFormat format_hq = {0};
static struct {
uint pos_id, nor_id;
uint pos_hq_id, nor_hq_id;
} attr_id;
if (format.attr_len == 0) {
static const GPUVertFormat format = [&]() {
GPUVertFormat format{};
attr_id.pos_id = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
attr_id.nor_id = GPU_vertformat_attr_add(
&format, "nor", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
attr_id.pos_id = GPU_vertformat_attr_add(&format_hq, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
attr_id.nor_id = GPU_vertformat_attr_add(
&format_hq, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
}
return format;
}();
static const GPUVertFormat format_hq = [&]() {
GPUVertFormat format{};
attr_id.pos_hq_id = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
attr_id.nor_hq_id = GPU_vertformat_attr_add(
&format, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
return format;
}();
static float normal[3] = {1.0f, 0.0f, 0.0f};
GPUNormal packed_normal;
@@ -233,11 +238,12 @@ static void drw_volume_selection_surface_cb(
Volume *volume = static_cast<Volume *>(userdata);
VolumeBatchCache *cache = static_cast<VolumeBatchCache *>(volume->batch_cache);
static GPUVertFormat format = {0};
static uint pos_id;
if (format.attr_len == 0) {
static const GPUVertFormat format = [&]() {
GPUVertFormat format{};
pos_id = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
}
return format;
}();
/* Create vertex buffer. */
gpu::VertBuf *vbo_surface = GPU_vertbuf_create_with_format(format);

View File

@@ -279,37 +279,29 @@ BLI_NOINLINE static void free_batches(const MutableSpan<gpu::Batch *> batches,
static const GPUVertFormat &position_format()
{
static GPUVertFormat format{};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
}
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
return format;
}
static const GPUVertFormat &normal_format()
{
static GPUVertFormat format{};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
}
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
return format;
}
static const GPUVertFormat &mask_format()
{
static GPUVertFormat format{};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "msk", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
}
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"msk", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
return format;
}
static const GPUVertFormat &face_set_format()
{
static GPUVertFormat format{};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "fset", GPU_COMP_U8, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
}
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"fset", GPU_COMP_U8, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
return format;
}

View File

@@ -260,10 +260,8 @@ static void extract_lines_loose_geom_subdiv(const DRWSubdivCache &subdiv_cache,
const int loose_edges_num = subdiv_loose_edges_num(mr, subdiv_cache);
/* Update flags for loose edges, points are already handled. */
static GPUVertFormat format;
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "data", GPU_COMP_U32, 1, GPU_FETCH_INT);
}
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"data", GPU_COMP_U32, 1, GPU_FETCH_INT);
gpu::VertBuf *flags = GPU_vertbuf_calloc();
GPU_vertbuf_init_with_format(*flags, format);

View File

@@ -303,10 +303,8 @@ void extract_attributes_subdiv(const MeshRenderData &mr,
void extract_attr_viewer(const MeshRenderData &mr, gpu::VertBuf &vbo)
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "attribute_value", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
}
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"attribute_value", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
GPU_vertbuf_init_with_format(vbo, format);
GPU_vertbuf_data_alloc(vbo, mr.corners_num);

View File

@@ -139,10 +139,8 @@ static void extract_edge_factor_bm(const MeshRenderData &mr, MutableSpan<T> vbo_
void extract_edge_factor(const MeshRenderData &mr, gpu::VertBuf &vbo)
{
if (GPU_crappy_amd_driver() || GPU_minimum_per_vertex_stride() > 1) {
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "wd", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
}
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"wd", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
GPU_vertbuf_init_with_format(vbo, format);
GPU_vertbuf_data_alloc(vbo, mr.corners_num + mr.loose_indices_num);
MutableSpan vbo_data = vbo.data<float>();
@@ -155,10 +153,8 @@ void extract_edge_factor(const MeshRenderData &mr, gpu::VertBuf &vbo)
vbo_data.take_back(mr.loose_indices_num).fill(0.0f);
}
else {
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "wd", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
}
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"wd", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
GPU_vertbuf_init_with_format(vbo, format);
GPU_vertbuf_data_alloc(vbo, mr.corners_num + mr.loose_indices_num);
MutableSpan vbo_data = vbo.data<uint8_t>();
@@ -176,15 +172,14 @@ void extract_edge_factor(const MeshRenderData &mr, gpu::VertBuf &vbo)
* the buggy AMD driver case. */
static const GPUVertFormat &get_subdiv_edge_fac_format()
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
if (GPU_crappy_amd_driver() || GPU_minimum_per_vertex_stride() > 1) {
GPU_vertformat_attr_add(&format, "wd", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
}
else {
GPU_vertformat_attr_add(&format, "wd", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
}
if (GPU_crappy_amd_driver() || GPU_minimum_per_vertex_stride() > 1) {
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"wd", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
return format;
}
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"wd", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
return format;
}
@@ -192,10 +187,8 @@ static gpu::VertBuf *build_poly_other_map_vbo(const DRWSubdivCache &subdiv_cache
{
gpu::VertBuf *vbo = GPU_vertbuf_calloc();
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "poly_other", GPU_COMP_I32, 1, GPU_FETCH_INT);
}
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"poly_other", GPU_COMP_I32, 1, GPU_FETCH_INT);
GPU_vertbuf_init_with_format(*vbo, format);
GPU_vertbuf_data_alloc(*vbo, subdiv_cache.num_subdiv_loops);

View File

@@ -102,12 +102,13 @@ static void mesh_render_data_vert_flag(const MeshRenderData &mr,
static const GPUVertFormat &get_edit_data_format()
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = []() {
GPUVertFormat format{};
/* WARNING: Adjust #EditLoopData struct accordingly. */
GPU_vertformat_attr_add(&format, "data", GPU_COMP_U8, 4, GPU_FETCH_INT);
GPU_vertformat_alias_add(&format, "flag");
}
return format;
}();
return format;
}

View File

@@ -22,12 +22,13 @@ namespace blender::draw {
static const GPUVertFormat &edituv_data_format()
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = []() {
GPUVertFormat format{};
/* WARNING: Adjust #EditLoopData struct accordingly. */
GPU_vertformat_attr_add(&format, "data", GPU_COMP_U8, 4, GPU_FETCH_INT);
GPU_vertformat_alias_add(&format, "flag");
}
return format;
}();
return format;
}

View File

@@ -194,12 +194,14 @@ static void extract_uv_stretch_angle_mesh(const MeshRenderData &mr,
void extract_edituv_stretch_angle(const MeshRenderData &mr, gpu::VertBuf &vbo)
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = []() {
GPUVertFormat format{};
/* Waning: adjust #UVStretchAngle struct accordingly. */
GPU_vertformat_attr_add(&format, "uv_angles", GPU_COMP_I16, 2, GPU_FETCH_INT_TO_FLOAT_UNIT);
GPU_vertformat_attr_add(&format, "angle", GPU_COMP_I16, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
}
return format;
}();
GPU_vertbuf_init_with_format(vbo, format);
GPU_vertbuf_data_alloc(vbo, mr.corners_num);
MutableSpan vbo_data = vbo.data<UVStretchAngle>();
@@ -214,12 +216,13 @@ void extract_edituv_stretch_angle(const MeshRenderData &mr, gpu::VertBuf &vbo)
static const GPUVertFormat &get_edituv_stretch_angle_format_subdiv()
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = []() {
GPUVertFormat format{};
/* Waning: adjust #UVStretchAngle struct accordingly. */
GPU_vertformat_attr_add(&format, "angle", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
GPU_vertformat_attr_add(&format, "uv_angles", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
}
return format;
}();
return format;
}

View File

@@ -100,10 +100,8 @@ void extract_edituv_stretch_area(const MeshRenderData &mr,
tot_area = info.tot_area;
tot_uv_area = info.tot_uv_area;
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "ratio", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
}
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"ratio", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
GPU_vertbuf_init_with_format(vbo, format);
GPU_vertbuf_data_alloc(vbo, mr.corners_num);
MutableSpan<float> vbo_data = vbo.data<float>();
@@ -138,10 +136,8 @@ void extract_edituv_stretch_area_subdiv(const MeshRenderData &mr,
float &tot_area,
float &tot_uv_area)
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "ratio", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
}
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"ratio", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
GPU_vertbuf_init_build_on_device(vbo, format, subdiv_cache.num_subdiv_loops);
gpu::VertBuf *coarse_vbo = GPU_vertbuf_calloc();

View File

@@ -14,11 +14,12 @@ namespace blender::draw {
void extract_face_dots_edituv_data(const MeshRenderData &mr, gpu::VertBuf &vbo)
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = []() {
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, "data", GPU_COMP_U8, 4, GPU_FETCH_INT);
GPU_vertformat_alias_add(&format, "flag");
}
return format;
}();
GPU_vertbuf_init_with_format(vbo, format);
GPU_vertbuf_data_alloc(vbo, mr.faces_num);
MutableSpan vbo_data = vbo.data<EditLoopData>();

View File

@@ -58,10 +58,8 @@ void extract_face_dot_normals_bm(const MeshRenderData &mr, MutableSpan<GPUType>
void extract_face_dot_normals(const MeshRenderData &mr, const bool use_hq, gpu::VertBuf &vbo)
{
if (use_hq) {
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "norAndFlag", GPU_COMP_I16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
}
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"norAndFlag", GPU_COMP_I16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
GPU_vertbuf_init_with_format(vbo, format);
GPU_vertbuf_data_alloc(vbo, mr.faces_num);
MutableSpan vbo_data = vbo.data<short4>();
@@ -74,10 +72,8 @@ void extract_face_dot_normals(const MeshRenderData &mr, const bool use_hq, gpu::
}
}
else {
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "norAndFlag", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
}
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"norAndFlag", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
GPU_vertbuf_init_with_format(vbo, format);
GPU_vertbuf_data_alloc(vbo, mr.faces_num);
MutableSpan vbo_data = vbo.data<GPUPackedNormal>();

View File

@@ -16,19 +16,15 @@ namespace blender::draw {
static const GPUVertFormat &get_fdots_pos_format()
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
}
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
return format;
}
static const GPUVertFormat &get_fdots_nor_format_subdiv()
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "norAndFlag", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
}
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"norAndFlag", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
return format;
}

View File

@@ -65,12 +65,13 @@ static void extract_face_dots_uv_bm(const MeshRenderData &mr, MutableSpan<float2
void extract_face_dots_uv(const MeshRenderData &mr, gpu::VertBuf &vbo)
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = []() {
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, "u", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
GPU_vertformat_alias_add(&format, "au");
GPU_vertformat_alias_add(&format, "pos");
}
return format;
}();
GPU_vertbuf_init_with_format(vbo, format);
GPU_vertbuf_data_alloc(vbo, mr.faces_num);
MutableSpan<float2> vbo_data = vbo.data<float2>();

View File

@@ -281,11 +281,12 @@ void extract_normals(const MeshRenderData &mr, const bool use_hq, gpu::VertBuf &
{
const int size = mr.corners_num + mr.loose_indices_num;
if (use_hq) {
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = []() {
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
GPU_vertformat_alias_add(&format, "lnor");
}
return format;
}();
GPU_vertbuf_init_with_format(vbo, format);
GPU_vertbuf_data_alloc(vbo, size);
MutableSpan vbo_data = vbo.data<short4>();
@@ -304,11 +305,12 @@ void extract_normals(const MeshRenderData &mr, const bool use_hq, gpu::VertBuf &
loose_data.fill(short4(0));
}
else {
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = []() {
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
GPU_vertformat_alias_add(&format, "lnor");
}
return format;
}();
GPU_vertbuf_init_with_format(vbo, format);
GPU_vertbuf_data_alloc(vbo, size);
MutableSpan vbo_data = vbo.data<GPUPackedNormal>();
@@ -330,12 +332,13 @@ void extract_normals(const MeshRenderData &mr, const bool use_hq, gpu::VertBuf &
static const GPUVertFormat &get_subdiv_lnor_format()
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = []() {
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, "nor", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
GPU_vertformat_alias_add(&format, "lnor");
GPU_vertformat_alias_add(&format, "vnor");
}
return format;
}();
return format;
}

View File

@@ -571,10 +571,9 @@ void extract_mesh_analysis(const MeshRenderData &mr, gpu::VertBuf &vbo)
{
BLI_assert(mr.edit_bmesh);
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "weight", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
}
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"weight", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
GPU_vertbuf_init_with_format(vbo, format);
GPU_vertbuf_data_alloc(vbo, mr.corners_num);
MutableSpan<float> vbo_data = vbo.data<float>();

View File

@@ -16,14 +16,13 @@ void extract_orco(const MeshRenderData &mr, gpu::VertBuf &vbo)
static_cast<const float3 *>(CustomData_get_layer(&mr.mesh->vert_data, CD_ORCO)),
mr.corners_num);
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
/* FIXME(fclem): We use the last component as a way to differentiate from generic vertex
* attributes. This is a substantial waste of video-ram and should be done another way.
* Unfortunately, at the time of writing, I did not found any other "non disruptive"
* alternative. */
GPU_vertformat_attr_add(&format, "orco", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
}
/* FIXME(fclem): We use the last component as a way to differentiate from generic vertex
* attributes. This is a substantial waste of video-ram and should be done another way.
* Unfortunately, at the time of writing, I did not found any other "non disruptive"
* alternative. */
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"orco", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
GPU_vertbuf_init_with_format(vbo, format);
GPU_vertbuf_data_alloc(vbo, mr.corners_num);
MutableSpan vbo_data = vbo.data<float4>();

View File

@@ -69,10 +69,8 @@ static void extract_positions_bm(const MeshRenderData &mr, MutableSpan<float3> v
void extract_positions(const MeshRenderData &mr, gpu::VertBuf &vbo)
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
}
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
GPU_vertbuf_init_with_format(vbo, format);
GPU_vertbuf_data_alloc(vbo, mr.corners_num + mr.loose_indices_num);
@@ -87,21 +85,23 @@ void extract_positions(const MeshRenderData &mr, gpu::VertBuf &vbo)
static const GPUVertFormat &get_normals_format()
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = []() {
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, "nor", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
GPU_vertformat_alias_add(&format, "lnor");
}
return format;
}();
return format;
}
static const GPUVertFormat &get_custom_normals_format()
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = []() {
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, "nor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
GPU_vertformat_alias_add(&format, "lnor");
}
return format;
}();
return format;
}
@@ -192,12 +192,14 @@ void extract_positions_subdiv(const DRWSubdivCache &subdiv_cache,
return;
}
static const GPUVertFormat flag_format = []() {
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, "data", GPU_COMP_I32, 1, GPU_FETCH_INT);
GPU_vertformat_alias_add(&format, "flag");
return format;
}();
gpu::VertBuf *flags_buffer = GPU_vertbuf_calloc();
static GPUVertFormat flag_format = {0};
if (flag_format.attr_len == 0) {
GPU_vertformat_attr_add(&flag_format, "data", GPU_COMP_I32, 1, GPU_FETCH_INT);
GPU_vertformat_alias_add(&flag_format, "flag");
}
GPU_vertbuf_init_with_format(*flags_buffer, flag_format);
GPU_vertbuf_data_alloc(*flags_buffer, divide_ceil_u(mr.verts_num, 4));
char *flags = flags_buffer->data<char>().data();
@@ -205,14 +207,12 @@ void extract_positions_subdiv(const DRWSubdivCache &subdiv_cache,
GPU_vertbuf_tag_dirty(flags_buffer);
if (orco_vbo) {
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
/* FIXME(fclem): We use the last component as a way to differentiate from generic vertex
* attributes. This is a substantial waste of video-ram and should be done another way.
* Unfortunately, at the time of writing, I did not found any other "non disruptive"
* alternative. */
GPU_vertformat_attr_add(&format, "orco", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
}
/* FIXME(fclem): We use the last component as a way to differentiate from generic vertex
* attributes. This is a substantial waste of video-ram and should be done another way.
* Unfortunately, at the time of writing, I did not found any other "non disruptive"
* alternative. */
static const GPUVertFormat format = GPU_vertformat_from_attribute(
"orco", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
GPU_vertbuf_init_build_on_device(*orco_vbo, format, subdiv_cache.num_subdiv_loops);
}

View File

@@ -19,11 +19,12 @@ namespace blender::draw {
static const GPUVertFormat &get_sculpt_data_format()
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = []() {
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, "fset", GPU_COMP_U8, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
GPU_vertformat_attr_add(&format, "msk", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
}
return format;
}();
return format;
}

View File

@@ -15,10 +15,8 @@ namespace blender::draw {
static MutableSpan<int> init_vbo_data(gpu::VertBuf &vbo, const int size)
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "index", GPU_COMP_I32, 1, GPU_FETCH_INT);
}
static GPUVertFormat format = GPU_vertformat_from_attribute(
"index", GPU_COMP_I32, 1, GPU_FETCH_INT);
GPU_vertbuf_init_with_format(vbo, format);
GPU_vertbuf_data_alloc(vbo, size);
return vbo.data<int>();

View File

@@ -22,11 +22,12 @@ void extract_skin_roots(const MeshRenderData &mr, gpu::VertBuf &vbo)
/* Exclusively for edit mode. */
BLI_assert(mr.bm);
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
static const GPUVertFormat format = []() {
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, "size", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
GPU_vertformat_attr_add(&format, "local_pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
}
return format;
}();
Vector<SkinRootData> skin_roots;
const int offset = CustomData_get_offset(&mr.bm->vdata, CD_MVERT_SKIN);

View File

@@ -238,10 +238,8 @@ void extract_tangents(const MeshRenderData &mr,
static const GPUVertFormat &get_coarse_tan_format()
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "tan", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
}
static GPUVertFormat format = GPU_vertformat_from_attribute(
"tan", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
return format;
}

View File

@@ -68,10 +68,9 @@ static void extract_vert_normals_bm(const MeshRenderData &mr,
void extract_vert_normals(const MeshRenderData &mr, gpu::VertBuf &vbo)
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "vnor", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
}
static GPUVertFormat format = GPU_vertformat_from_attribute(
"vnor", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
const int size = mr.corners_num + mr.loose_indices_num;
GPU_vertbuf_init_with_format(vbo, format);
GPU_vertbuf_data_alloc(vbo, size);

View File

@@ -115,10 +115,9 @@ static void extract_weights_bm(const MeshRenderData &mr,
void extract_weights(const MeshRenderData &mr, const MeshBatchCache &cache, gpu::VertBuf &vbo)
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "weight", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
}
static GPUVertFormat format = GPU_vertformat_from_attribute(
"weight", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
GPU_vertbuf_init_with_format(vbo, format);
GPU_vertbuf_data_alloc(vbo, mr.corners_num);
MutableSpan<float> vbo_data = vbo.data<float>();
@@ -142,10 +141,9 @@ void extract_weights_subdiv(const MeshRenderData &mr,
const MeshBatchCache &cache,
gpu::VertBuf &vbo)
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "weight", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
}
static GPUVertFormat format = GPU_vertformat_from_attribute(
"weight", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
GPU_vertbuf_init_build_on_device(vbo, format, subdiv_cache.num_subdiv_loops);
gpu::VertBuf *coarse_weights = GPU_vertbuf_calloc();

View File

@@ -96,6 +96,15 @@ uint GPU_vertformat_attr_add(
GPUVertFormat *, const char *name, GPUVertCompType, uint comp_len, GPUVertFetchMode);
void GPU_vertformat_alias_add(GPUVertFormat *, const char *alias);
/**
* Return a vertex format from a single attribute description.
* The attribute ID is ensured to be 0.
*/
GPUVertFormat GPU_vertformat_from_attribute(const char *name,
const GPUVertCompType comp_type,
const uint comp_len,
const GPUVertFetchMode fetch_mode);
/**
* Makes vertex attribute from the next vertices to be accessible in the vertex shader.
* For an attribute named "attr" you can access the next nth vertex using "attr{number}".

View File

@@ -174,6 +174,16 @@ void GPU_vertformat_alias_add(GPUVertFormat *format, const char *alias)
attr->names[attr->name_len++] = copy_attr_name(format, alias);
}
GPUVertFormat GPU_vertformat_from_attribute(const char *name,
const GPUVertCompType comp_type,
const uint comp_len,
const GPUVertFetchMode fetch_mode)
{
GPUVertFormat format{};
GPU_vertformat_attr_add(&format, name, comp_type, comp_len, fetch_mode);
return format;
}
void GPU_vertformat_multiload_enable(GPUVertFormat *format, int load_count)
{
/* Sanity check. Maximum can be upgraded if needed. */