2022-02-11 09:07:11 +11:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
|
* Copyright 2017 Blender Foundation. All rights reserved. */
|
2017-01-25 09:16:29 +01:00
|
|
|
|
2019-02-18 08:08:12 +11:00
|
|
|
/** \file
|
|
|
|
|
* \ingroup draw
|
2017-01-25 09:16:29 +01:00
|
|
|
*
|
|
|
|
|
* \brief Mesh API for render engines
|
|
|
|
|
*/
|
|
|
|
|
|
2022-06-05 12:04:42 +02:00
|
|
|
#include <optional>
|
|
|
|
|
|
2017-01-25 09:16:29 +01:00
|
|
|
#include "MEM_guardedalloc.h"
|
|
|
|
|
|
2019-07-01 23:20:36 +10:00
|
|
|
#include "BLI_bitmap.h"
|
2018-09-29 19:42:09 +02:00
|
|
|
#include "BLI_buffer.h"
|
2020-03-19 09:33:03 +01:00
|
|
|
#include "BLI_edgehash.h"
|
2022-06-05 12:04:42 +02:00
|
|
|
#include "BLI_index_range.hh"
|
2020-04-03 17:38:58 +02:00
|
|
|
#include "BLI_listbase.h"
|
2022-06-05 12:04:42 +02:00
|
|
|
#include "BLI_map.hh"
|
2017-06-28 13:38:24 +10:00
|
|
|
#include "BLI_math_bits.h"
|
2020-03-19 09:33:03 +01:00
|
|
|
#include "BLI_math_vector.h"
|
2022-06-05 12:04:42 +02:00
|
|
|
#include "BLI_span.hh"
|
2017-05-01 17:58:15 +02:00
|
|
|
#include "BLI_string.h"
|
2020-06-02 15:07:17 +02:00
|
|
|
#include "BLI_task.h"
|
2020-03-19 09:33:03 +01:00
|
|
|
#include "BLI_utildefines.h"
|
2017-02-16 16:19:48 +01:00
|
|
|
|
2017-01-25 09:16:29 +01:00
|
|
|
#include "DNA_mesh_types.h"
|
|
|
|
|
#include "DNA_meshdata_types.h"
|
2017-05-03 18:55:40 +02:00
|
|
|
#include "DNA_object_types.h"
|
2018-12-17 23:00:05 +01:00
|
|
|
#include "DNA_scene_types.h"
|
2017-01-25 09:16:29 +01:00
|
|
|
|
2021-10-26 18:16:33 -03:00
|
|
|
#include "BKE_attribute.h"
|
2017-01-25 09:16:29 +01:00
|
|
|
#include "BKE_customdata.h"
|
2017-05-03 18:55:40 +02:00
|
|
|
#include "BKE_deform.h"
|
2017-01-25 09:16:29 +01:00
|
|
|
#include "BKE_editmesh.h"
|
2018-10-08 12:18:45 +11:00
|
|
|
#include "BKE_editmesh_cache.h"
|
2017-05-09 08:50:45 +10:00
|
|
|
#include "BKE_editmesh_tangent.h"
|
2017-01-25 09:16:29 +01:00
|
|
|
#include "BKE_mesh.h"
|
2018-10-11 15:07:29 +11:00
|
|
|
#include "BKE_mesh_runtime.h"
|
2020-03-19 09:33:03 +01:00
|
|
|
#include "BKE_mesh_tangent.h"
|
2019-07-07 18:58:11 +02:00
|
|
|
#include "BKE_modifier.h"
|
2018-12-17 23:00:05 +01:00
|
|
|
#include "BKE_object_deform.h"
|
2020-11-18 12:16:43 +01:00
|
|
|
#include "BKE_paint.h"
|
|
|
|
|
#include "BKE_pbvh.h"
|
OpenSubDiv: add support for an OpenGL evaluator
This evaluator is used in order to evaluate subdivision at render time, allowing for
faster renders of meshes with a subdivision surface modifier placed at the last
position in the modifier list.
When evaluating the subsurf modifier, we detect whether we can delegate evaluation
to the draw code. If so, the subdivision is first evaluated on the GPU using our own
custom evaluator (only the coarse data needs to be initially sent to the GPU), then,
buffers for the final `MeshBufferCache` are filled on the GPU using a set of
compute shaders. However, some buffers are still filled on the CPU side, if doing so
on the GPU is impractical (e.g. the line adjacency buffer used for x-ray, whose
logic is hardly GPU compatible).
This is done at the mesh buffer extraction level so that the result can be readily used
in the various OpenGL engines, without having to write custom geometry or tesselation
shaders.
We use our own subdivision evaluation shaders, instead of OpenSubDiv's vanilla one, in
order to control the data layout, and interpolation. For example, we store vertex colors
as compressed 16-bit integers, while OpenSubDiv's default evaluator only work for float
types.
In order to still access the modified geometry on the CPU side, for use in modifiers
or transform operators, a dedicated wrapper type is added `MESH_WRAPPER_TYPE_SUBD`.
Subdivision will be lazily evaluated via `BKE_object_get_evaluated_mesh` which will
create such a wrapper if possible. If the final subdivision surface is not needed on
the CPU side, `BKE_object_get_evaluated_mesh_no_subsurf` should be used.
Enabling or disabling GPU subdivision can be done through the user preferences (under
Viewport -> Subdivision).
See patch description for benchmarks.
Reviewed By: campbellbarton, jbakker, fclem, brecht, #eevee_viewport
Differential Revision: https://developer.blender.org/D12406
2021-12-27 16:34:47 +01:00
|
|
|
#include "BKE_subdiv_modifier.h"
|
2017-01-25 09:16:29 +01:00
|
|
|
|
2019-04-06 01:55:21 +02:00
|
|
|
#include "atomic_ops.h"
|
2018-12-17 17:01:06 +01:00
|
|
|
|
2017-02-23 11:54:40 +01:00
|
|
|
#include "bmesh.h"
|
|
|
|
|
|
2017-01-25 09:16:29 +01:00
|
|
|
#include "GPU_batch.h"
|
2017-06-28 13:38:24 +10:00
|
|
|
#include "GPU_material.h"
|
2018-05-19 13:31:44 +02:00
|
|
|
|
|
|
|
|
#include "DRW_render.h"
|
2017-01-25 09:16:29 +01:00
|
|
|
|
2018-09-29 19:42:09 +02:00
|
|
|
#include "ED_mesh.h"
|
|
|
|
|
#include "ED_uvedit.h"
|
|
|
|
|
|
2022-06-05 12:04:42 +02:00
|
|
|
#include "draw_cache_extract.hh"
|
2020-03-19 09:33:03 +01:00
|
|
|
#include "draw_cache_inline.h"
|
OpenSubDiv: add support for an OpenGL evaluator
This evaluator is used in order to evaluate subdivision at render time, allowing for
faster renders of meshes with a subdivision surface modifier placed at the last
position in the modifier list.
When evaluating the subsurf modifier, we detect whether we can delegate evaluation
to the draw code. If so, the subdivision is first evaluated on the GPU using our own
custom evaluator (only the coarse data needs to be initially sent to the GPU), then,
buffers for the final `MeshBufferCache` are filled on the GPU using a set of
compute shaders. However, some buffers are still filled on the CPU side, if doing so
on the GPU is impractical (e.g. the line adjacency buffer used for x-ray, whose
logic is hardly GPU compatible).
This is done at the mesh buffer extraction level so that the result can be readily used
in the various OpenGL engines, without having to write custom geometry or tesselation
shaders.
We use our own subdivision evaluation shaders, instead of OpenSubDiv's vanilla one, in
order to control the data layout, and interpolation. For example, we store vertex colors
as compressed 16-bit integers, while OpenSubDiv's default evaluator only work for float
types.
In order to still access the modified geometry on the CPU side, for use in modifiers
or transform operators, a dedicated wrapper type is added `MESH_WRAPPER_TYPE_SUBD`.
Subdivision will be lazily evaluated via `BKE_object_get_evaluated_mesh` which will
create such a wrapper if possible. If the final subdivision surface is not needed on
the CPU side, `BKE_object_get_evaluated_mesh_no_subsurf` should be used.
Enabling or disabling GPU subdivision can be done through the user preferences (under
Viewport -> Subdivision).
See patch description for benchmarks.
Reviewed By: campbellbarton, jbakker, fclem, brecht, #eevee_viewport
Differential Revision: https://developer.blender.org/D12406
2021-12-27 16:34:47 +01:00
|
|
|
#include "draw_subdivision.h"
|
2019-05-07 23:02:42 +02:00
|
|
|
|
2017-04-21 21:14:11 +10:00
|
|
|
#include "draw_cache_impl.h" /* own include */
|
2018-12-03 14:58:29 +11:00
|
|
|
|
2022-06-05 12:04:42 +02:00
|
|
|
#include "mesh_extractors/extract_mesh.hh"
|
|
|
|
|
|
|
|
|
|
using blender::IndexRange;
|
|
|
|
|
using blender::Map;
|
|
|
|
|
using blender::Span;
|
2021-07-26 09:54:59 -03:00
|
|
|
|
2021-06-24 10:53:22 -03:00
|
|
|
/* ---------------------------------------------------------------------- */
|
|
|
|
|
/** \name Dependencies between buffer and batch
|
|
|
|
|
* \{ */
|
|
|
|
|
|
|
|
|
|
/* clang-format off */
|
|
|
|
|
|
2021-08-23 12:33:12 -03:00
|
|
|
#define BUFFER_INDEX(buff_name) ((offsetof(MeshBufferList, buff_name) - offsetof(MeshBufferList, vbo)) / sizeof(void *))
|
|
|
|
|
#define BUFFER_LEN (sizeof(MeshBufferList) / sizeof(void *))
|
2021-08-23 09:52:13 -03:00
|
|
|
|
2022-06-05 12:04:42 +02:00
|
|
|
#define _BATCH_MAP1(a) batches_that_use_buffer(BUFFER_INDEX(a))
|
2021-08-23 09:52:13 -03:00
|
|
|
#define _BATCH_MAP2(a, b) _BATCH_MAP1(a) | _BATCH_MAP1(b)
|
|
|
|
|
#define _BATCH_MAP3(a, b, c) _BATCH_MAP2(a, b) | _BATCH_MAP1(c)
|
|
|
|
|
#define _BATCH_MAP4(a, b, c, d) _BATCH_MAP3(a, b, c) | _BATCH_MAP1(d)
|
|
|
|
|
#define _BATCH_MAP5(a, b, c, d, e) _BATCH_MAP4(a, b, c, d) | _BATCH_MAP1(e)
|
|
|
|
|
#define _BATCH_MAP6(a, b, c, d, e, f) _BATCH_MAP5(a, b, c, d, e) | _BATCH_MAP1(f)
|
|
|
|
|
#define _BATCH_MAP7(a, b, c, d, e, f, g) _BATCH_MAP6(a, b, c, d, e, f) | _BATCH_MAP1(g)
|
|
|
|
|
#define _BATCH_MAP8(a, b, c, d, e, f, g, h) _BATCH_MAP7(a, b, c, d, e, f, g) | _BATCH_MAP1(h)
|
|
|
|
|
#define _BATCH_MAP9(a, b, c, d, e, f, g, h, i) _BATCH_MAP8(a, b, c, d, e, f, g, h) | _BATCH_MAP1(i)
|
|
|
|
|
#define _BATCH_MAP10(a, b, c, d, e, f, g, h, i, j) _BATCH_MAP9(a, b, c, d, e, f, g, h, i) | _BATCH_MAP1(j)
|
|
|
|
|
|
|
|
|
|
#define BATCH_MAP(...) VA_NARGS_CALL_OVERLOAD(_BATCH_MAP, __VA_ARGS__)
|
2021-06-24 10:53:22 -03:00
|
|
|
|
|
|
|
|
/* clang-format on */
|
|
|
|
|
|
2021-08-23 09:52:13 -03:00
|
|
|
#define TRIS_PER_MAT_INDEX BUFFER_LEN
|
2022-06-05 12:04:42 +02:00
|
|
|
|
|
|
|
|
static constexpr DRWBatchFlag batches_that_use_buffer(const int buffer_index)
|
|
|
|
|
{
|
|
|
|
|
switch (buffer_index) {
|
|
|
|
|
case BUFFER_INDEX(vbo.pos_nor):
|
|
|
|
|
return MBC_SURFACE | MBC_SURFACE_WEIGHTS | MBC_EDIT_TRIANGLES | MBC_EDIT_VERTICES |
|
|
|
|
|
MBC_EDIT_EDGES | MBC_EDIT_VNOR | MBC_EDIT_LNOR | MBC_EDIT_MESH_ANALYSIS |
|
|
|
|
|
MBC_EDIT_SELECTION_VERTS | MBC_EDIT_SELECTION_EDGES | MBC_EDIT_SELECTION_FACES |
|
|
|
|
|
MBC_ALL_VERTS | MBC_ALL_EDGES | MBC_LOOSE_EDGES | MBC_EDGE_DETECTION |
|
|
|
|
|
MBC_WIRE_EDGES | MBC_WIRE_LOOPS | MBC_SCULPT_OVERLAYS | MBC_SURFACE_PER_MAT;
|
|
|
|
|
case BUFFER_INDEX(vbo.lnor):
|
|
|
|
|
return MBC_SURFACE | MBC_EDIT_LNOR | MBC_WIRE_LOOPS | MBC_SURFACE_PER_MAT;
|
|
|
|
|
case BUFFER_INDEX(vbo.edge_fac):
|
|
|
|
|
return MBC_WIRE_EDGES;
|
|
|
|
|
case BUFFER_INDEX(vbo.weights):
|
|
|
|
|
return MBC_SURFACE_WEIGHTS;
|
|
|
|
|
case BUFFER_INDEX(vbo.uv):
|
|
|
|
|
return MBC_SURFACE | MBC_EDITUV_FACES_STRETCH_AREA | MBC_EDITUV_FACES_STRETCH_ANGLE |
|
|
|
|
|
MBC_EDITUV_FACES | MBC_EDITUV_EDGES | MBC_EDITUV_VERTS | MBC_WIRE_LOOPS_UVS |
|
|
|
|
|
MBC_SURFACE_PER_MAT;
|
|
|
|
|
case BUFFER_INDEX(vbo.tan):
|
|
|
|
|
return MBC_SURFACE_PER_MAT;
|
|
|
|
|
case BUFFER_INDEX(vbo.vcol):
|
|
|
|
|
return MBC_SURFACE | MBC_SURFACE_PER_MAT;
|
|
|
|
|
case BUFFER_INDEX(vbo.sculpt_data):
|
|
|
|
|
return MBC_SCULPT_OVERLAYS;
|
|
|
|
|
case BUFFER_INDEX(vbo.orco):
|
|
|
|
|
return MBC_SURFACE_PER_MAT;
|
|
|
|
|
case BUFFER_INDEX(vbo.edit_data):
|
|
|
|
|
return MBC_EDIT_TRIANGLES | MBC_EDIT_EDGES | MBC_EDIT_VERTICES;
|
|
|
|
|
case BUFFER_INDEX(vbo.edituv_data):
|
|
|
|
|
return MBC_EDITUV_FACES | MBC_EDITUV_FACES_STRETCH_AREA | MBC_EDITUV_FACES_STRETCH_ANGLE |
|
|
|
|
|
MBC_EDITUV_EDGES | MBC_EDITUV_VERTS;
|
|
|
|
|
case BUFFER_INDEX(vbo.edituv_stretch_area):
|
|
|
|
|
return MBC_EDITUV_FACES_STRETCH_AREA;
|
|
|
|
|
case BUFFER_INDEX(vbo.edituv_stretch_angle):
|
|
|
|
|
return MBC_EDITUV_FACES_STRETCH_ANGLE;
|
|
|
|
|
case BUFFER_INDEX(vbo.mesh_analysis):
|
|
|
|
|
return MBC_EDIT_MESH_ANALYSIS;
|
|
|
|
|
case BUFFER_INDEX(vbo.fdots_pos):
|
|
|
|
|
return MBC_EDIT_FACEDOTS | MBC_EDIT_SELECTION_FACEDOTS;
|
|
|
|
|
case BUFFER_INDEX(vbo.fdots_nor):
|
|
|
|
|
return MBC_EDIT_FACEDOTS;
|
|
|
|
|
case BUFFER_INDEX(vbo.fdots_uv):
|
|
|
|
|
return MBC_EDITUV_FACEDOTS;
|
|
|
|
|
case BUFFER_INDEX(vbo.fdots_edituv_data):
|
|
|
|
|
return MBC_EDITUV_FACEDOTS;
|
|
|
|
|
case BUFFER_INDEX(vbo.skin_roots):
|
|
|
|
|
return MBC_SKIN_ROOTS;
|
|
|
|
|
case BUFFER_INDEX(vbo.vert_idx):
|
|
|
|
|
return MBC_EDIT_SELECTION_VERTS;
|
|
|
|
|
case BUFFER_INDEX(vbo.edge_idx):
|
|
|
|
|
return MBC_EDIT_SELECTION_EDGES;
|
|
|
|
|
case BUFFER_INDEX(vbo.poly_idx):
|
|
|
|
|
return MBC_EDIT_SELECTION_FACES;
|
|
|
|
|
case BUFFER_INDEX(vbo.fdot_idx):
|
|
|
|
|
return MBC_EDIT_SELECTION_FACEDOTS;
|
|
|
|
|
case BUFFER_INDEX(vbo.attr[0]):
|
|
|
|
|
case BUFFER_INDEX(vbo.attr[1]):
|
|
|
|
|
case BUFFER_INDEX(vbo.attr[2]):
|
|
|
|
|
case BUFFER_INDEX(vbo.attr[3]):
|
|
|
|
|
case BUFFER_INDEX(vbo.attr[4]):
|
|
|
|
|
case BUFFER_INDEX(vbo.attr[5]):
|
|
|
|
|
case BUFFER_INDEX(vbo.attr[6]):
|
|
|
|
|
case BUFFER_INDEX(vbo.attr[7]):
|
|
|
|
|
case BUFFER_INDEX(vbo.attr[8]):
|
|
|
|
|
case BUFFER_INDEX(vbo.attr[9]):
|
|
|
|
|
case BUFFER_INDEX(vbo.attr[10]):
|
|
|
|
|
case BUFFER_INDEX(vbo.attr[11]):
|
|
|
|
|
case BUFFER_INDEX(vbo.attr[12]):
|
|
|
|
|
case BUFFER_INDEX(vbo.attr[13]):
|
|
|
|
|
case BUFFER_INDEX(vbo.attr[14]):
|
|
|
|
|
return MBC_SURFACE | MBC_SURFACE_PER_MAT;
|
|
|
|
|
case BUFFER_INDEX(ibo.tris):
|
|
|
|
|
return MBC_SURFACE | MBC_SURFACE_WEIGHTS | MBC_EDIT_TRIANGLES | MBC_EDIT_LNOR |
|
|
|
|
|
MBC_EDIT_MESH_ANALYSIS | MBC_EDIT_SELECTION_FACES | MBC_SCULPT_OVERLAYS;
|
|
|
|
|
case BUFFER_INDEX(ibo.lines):
|
|
|
|
|
return MBC_EDIT_EDGES | MBC_EDIT_SELECTION_EDGES | MBC_ALL_EDGES | MBC_WIRE_EDGES;
|
|
|
|
|
case BUFFER_INDEX(ibo.lines_loose):
|
|
|
|
|
return MBC_LOOSE_EDGES;
|
|
|
|
|
case BUFFER_INDEX(ibo.points):
|
|
|
|
|
return MBC_EDIT_VNOR | MBC_EDIT_VERTICES | MBC_EDIT_SELECTION_VERTS;
|
|
|
|
|
case BUFFER_INDEX(ibo.fdots):
|
|
|
|
|
return MBC_EDIT_FACEDOTS | MBC_EDIT_SELECTION_FACEDOTS;
|
|
|
|
|
case BUFFER_INDEX(ibo.lines_paint_mask):
|
|
|
|
|
return MBC_WIRE_LOOPS;
|
|
|
|
|
case BUFFER_INDEX(ibo.lines_adjacency):
|
|
|
|
|
return MBC_EDGE_DETECTION;
|
|
|
|
|
case BUFFER_INDEX(ibo.edituv_tris):
|
|
|
|
|
return MBC_EDITUV_FACES | MBC_EDITUV_FACES_STRETCH_AREA | MBC_EDITUV_FACES_STRETCH_ANGLE;
|
|
|
|
|
case BUFFER_INDEX(ibo.edituv_lines):
|
|
|
|
|
return MBC_EDITUV_EDGES | MBC_WIRE_LOOPS_UVS;
|
|
|
|
|
case BUFFER_INDEX(ibo.edituv_points):
|
|
|
|
|
return MBC_EDITUV_VERTS;
|
|
|
|
|
case BUFFER_INDEX(ibo.edituv_fdots):
|
|
|
|
|
return MBC_EDITUV_FACEDOTS;
|
|
|
|
|
case TRIS_PER_MAT_INDEX:
|
|
|
|
|
return MBC_SURFACE_PER_MAT;
|
|
|
|
|
}
|
|
|
|
|
return (DRWBatchFlag)0;
|
|
|
|
|
}
|
2021-06-14 08:00:42 -03:00
|
|
|
|
|
|
|
|
static void mesh_batch_cache_discard_surface_batches(MeshBatchCache *cache);
|
2017-04-21 21:14:11 +10:00
|
|
|
static void mesh_batch_cache_clear(Mesh *me);
|
|
|
|
|
|
2021-06-14 08:00:42 -03:00
|
|
|
static void mesh_batch_cache_discard_batch(MeshBatchCache *cache, const DRWBatchFlag batch_map)
|
|
|
|
|
{
|
|
|
|
|
for (int i = 0; i < MBC_BATCH_LEN; i++) {
|
2022-06-05 12:04:42 +02:00
|
|
|
DRWBatchFlag batch_requested = (DRWBatchFlag)(1u << i);
|
2021-06-14 08:00:42 -03:00
|
|
|
if (batch_map & batch_requested) {
|
|
|
|
|
GPU_BATCH_DISCARD_SAFE(((GPUBatch **)&cache->batch)[i]);
|
|
|
|
|
cache->batch_ready &= ~batch_requested;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-06-05 12:04:42 +02:00
|
|
|
if (batch_map & MBC_SURFACE_PER_MAT) {
|
2021-06-14 08:00:42 -03:00
|
|
|
mesh_batch_cache_discard_surface_batches(cache);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-16 15:17:31 +01:00
|
|
|
/* Return true is all layers in _b_ are inside _a_. */
|
2019-04-06 01:55:21 +02:00
|
|
|
BLI_INLINE bool mesh_cd_layers_type_overlap(DRW_MeshCDMask a, DRW_MeshCDMask b)
|
2018-12-16 15:17:31 +01:00
|
|
|
{
|
2021-10-26 18:16:33 -03:00
|
|
|
return (*((uint32_t *)&a) & *((uint32_t *)&b)) == *((uint32_t *)&b);
|
2018-12-16 15:17:31 +01:00
|
|
|
}
|
|
|
|
|
|
2019-04-19 02:22:22 +02:00
|
|
|
BLI_INLINE bool mesh_cd_layers_type_equal(DRW_MeshCDMask a, DRW_MeshCDMask b)
|
|
|
|
|
{
|
2021-10-26 18:16:33 -03:00
|
|
|
return *((uint32_t *)&a) == *((uint32_t *)&b);
|
2019-04-19 02:22:22 +02:00
|
|
|
}
|
|
|
|
|
|
2019-04-06 01:55:21 +02:00
|
|
|
BLI_INLINE void mesh_cd_layers_type_merge(DRW_MeshCDMask *a, DRW_MeshCDMask b)
|
2018-12-16 15:17:31 +01:00
|
|
|
{
|
2020-07-06 08:41:28 +02:00
|
|
|
uint32_t *a_p = (uint32_t *)a;
|
|
|
|
|
uint32_t *b_p = (uint32_t *)&b;
|
|
|
|
|
atomic_fetch_and_or_uint32(a_p, *b_p);
|
2019-04-06 01:55:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BLI_INLINE void mesh_cd_layers_type_clear(DRW_MeshCDMask *a)
|
|
|
|
|
{
|
2021-10-26 18:16:33 -03:00
|
|
|
*((uint32_t *)a) = 0;
|
2018-12-16 15:17:31 +01:00
|
|
|
}
|
2017-04-18 23:29:55 +10:00
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
BLI_INLINE const Mesh *editmesh_final_or_this(const Object *object, const Mesh *me)
|
2020-07-25 21:30:08 +10:00
|
|
|
{
|
2022-06-10 10:29:35 +02:00
|
|
|
if (me->edit_mesh != nullptr) {
|
2022-01-11 15:42:07 +01:00
|
|
|
Mesh *editmesh_eval_final = BKE_object_get_editmesh_eval_final(object);
|
2022-06-10 10:29:35 +02:00
|
|
|
if (editmesh_eval_final != nullptr) {
|
2022-01-11 15:42:07 +01:00
|
|
|
return editmesh_eval_final;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return me;
|
2020-07-25 21:30:08 +10:00
|
|
|
}
|
|
|
|
|
|
2019-10-08 18:35:57 +02:00
|
|
|
static void mesh_cd_calc_edit_uv_layer(const Mesh *UNUSED(me), DRW_MeshCDMask *cd_used)
|
|
|
|
|
{
|
|
|
|
|
cd_used->edit_uv = 1;
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-25 20:16:42 +10:00
|
|
|
BLI_INLINE const CustomData *mesh_cd_ldata_get_from_mesh(const Mesh *me)
|
|
|
|
|
{
|
|
|
|
|
switch ((eMeshWrapperType)me->runtime.wrapper_type) {
|
OpenSubDiv: add support for an OpenGL evaluator
This evaluator is used in order to evaluate subdivision at render time, allowing for
faster renders of meshes with a subdivision surface modifier placed at the last
position in the modifier list.
When evaluating the subsurf modifier, we detect whether we can delegate evaluation
to the draw code. If so, the subdivision is first evaluated on the GPU using our own
custom evaluator (only the coarse data needs to be initially sent to the GPU), then,
buffers for the final `MeshBufferCache` are filled on the GPU using a set of
compute shaders. However, some buffers are still filled on the CPU side, if doing so
on the GPU is impractical (e.g. the line adjacency buffer used for x-ray, whose
logic is hardly GPU compatible).
This is done at the mesh buffer extraction level so that the result can be readily used
in the various OpenGL engines, without having to write custom geometry or tesselation
shaders.
We use our own subdivision evaluation shaders, instead of OpenSubDiv's vanilla one, in
order to control the data layout, and interpolation. For example, we store vertex colors
as compressed 16-bit integers, while OpenSubDiv's default evaluator only work for float
types.
In order to still access the modified geometry on the CPU side, for use in modifiers
or transform operators, a dedicated wrapper type is added `MESH_WRAPPER_TYPE_SUBD`.
Subdivision will be lazily evaluated via `BKE_object_get_evaluated_mesh` which will
create such a wrapper if possible. If the final subdivision surface is not needed on
the CPU side, `BKE_object_get_evaluated_mesh_no_subsurf` should be used.
Enabling or disabling GPU subdivision can be done through the user preferences (under
Viewport -> Subdivision).
See patch description for benchmarks.
Reviewed By: campbellbarton, jbakker, fclem, brecht, #eevee_viewport
Differential Revision: https://developer.blender.org/D12406
2021-12-27 16:34:47 +01:00
|
|
|
case ME_WRAPPER_TYPE_SUBD:
|
2020-05-25 20:16:42 +10:00
|
|
|
case ME_WRAPPER_TYPE_MDATA:
|
|
|
|
|
return &me->ldata;
|
|
|
|
|
break;
|
|
|
|
|
case ME_WRAPPER_TYPE_BMESH:
|
|
|
|
|
return &me->edit_mesh->bm->ldata;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BLI_assert(0);
|
|
|
|
|
return &me->ldata;
|
|
|
|
|
}
|
|
|
|
|
|
2021-10-26 18:16:33 -03:00
|
|
|
BLI_INLINE const CustomData *mesh_cd_pdata_get_from_mesh(const Mesh *me)
|
|
|
|
|
{
|
|
|
|
|
switch ((eMeshWrapperType)me->runtime.wrapper_type) {
|
OpenSubDiv: add support for an OpenGL evaluator
This evaluator is used in order to evaluate subdivision at render time, allowing for
faster renders of meshes with a subdivision surface modifier placed at the last
position in the modifier list.
When evaluating the subsurf modifier, we detect whether we can delegate evaluation
to the draw code. If so, the subdivision is first evaluated on the GPU using our own
custom evaluator (only the coarse data needs to be initially sent to the GPU), then,
buffers for the final `MeshBufferCache` are filled on the GPU using a set of
compute shaders. However, some buffers are still filled on the CPU side, if doing so
on the GPU is impractical (e.g. the line adjacency buffer used for x-ray, whose
logic is hardly GPU compatible).
This is done at the mesh buffer extraction level so that the result can be readily used
in the various OpenGL engines, without having to write custom geometry or tesselation
shaders.
We use our own subdivision evaluation shaders, instead of OpenSubDiv's vanilla one, in
order to control the data layout, and interpolation. For example, we store vertex colors
as compressed 16-bit integers, while OpenSubDiv's default evaluator only work for float
types.
In order to still access the modified geometry on the CPU side, for use in modifiers
or transform operators, a dedicated wrapper type is added `MESH_WRAPPER_TYPE_SUBD`.
Subdivision will be lazily evaluated via `BKE_object_get_evaluated_mesh` which will
create such a wrapper if possible. If the final subdivision surface is not needed on
the CPU side, `BKE_object_get_evaluated_mesh_no_subsurf` should be used.
Enabling or disabling GPU subdivision can be done through the user preferences (under
Viewport -> Subdivision).
See patch description for benchmarks.
Reviewed By: campbellbarton, jbakker, fclem, brecht, #eevee_viewport
Differential Revision: https://developer.blender.org/D12406
2021-12-27 16:34:47 +01:00
|
|
|
case ME_WRAPPER_TYPE_SUBD:
|
2021-10-26 18:16:33 -03:00
|
|
|
case ME_WRAPPER_TYPE_MDATA:
|
|
|
|
|
return &me->pdata;
|
|
|
|
|
break;
|
|
|
|
|
case ME_WRAPPER_TYPE_BMESH:
|
|
|
|
|
return &me->edit_mesh->bm->pdata;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BLI_assert(0);
|
|
|
|
|
return &me->pdata;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BLI_INLINE const CustomData *mesh_cd_edata_get_from_mesh(const Mesh *me)
|
|
|
|
|
{
|
|
|
|
|
switch ((eMeshWrapperType)me->runtime.wrapper_type) {
|
OpenSubDiv: add support for an OpenGL evaluator
This evaluator is used in order to evaluate subdivision at render time, allowing for
faster renders of meshes with a subdivision surface modifier placed at the last
position in the modifier list.
When evaluating the subsurf modifier, we detect whether we can delegate evaluation
to the draw code. If so, the subdivision is first evaluated on the GPU using our own
custom evaluator (only the coarse data needs to be initially sent to the GPU), then,
buffers for the final `MeshBufferCache` are filled on the GPU using a set of
compute shaders. However, some buffers are still filled on the CPU side, if doing so
on the GPU is impractical (e.g. the line adjacency buffer used for x-ray, whose
logic is hardly GPU compatible).
This is done at the mesh buffer extraction level so that the result can be readily used
in the various OpenGL engines, without having to write custom geometry or tesselation
shaders.
We use our own subdivision evaluation shaders, instead of OpenSubDiv's vanilla one, in
order to control the data layout, and interpolation. For example, we store vertex colors
as compressed 16-bit integers, while OpenSubDiv's default evaluator only work for float
types.
In order to still access the modified geometry on the CPU side, for use in modifiers
or transform operators, a dedicated wrapper type is added `MESH_WRAPPER_TYPE_SUBD`.
Subdivision will be lazily evaluated via `BKE_object_get_evaluated_mesh` which will
create such a wrapper if possible. If the final subdivision surface is not needed on
the CPU side, `BKE_object_get_evaluated_mesh_no_subsurf` should be used.
Enabling or disabling GPU subdivision can be done through the user preferences (under
Viewport -> Subdivision).
See patch description for benchmarks.
Reviewed By: campbellbarton, jbakker, fclem, brecht, #eevee_viewport
Differential Revision: https://developer.blender.org/D12406
2021-12-27 16:34:47 +01:00
|
|
|
case ME_WRAPPER_TYPE_SUBD:
|
2021-10-26 18:16:33 -03:00
|
|
|
case ME_WRAPPER_TYPE_MDATA:
|
|
|
|
|
return &me->edata;
|
|
|
|
|
break;
|
|
|
|
|
case ME_WRAPPER_TYPE_BMESH:
|
|
|
|
|
return &me->edit_mesh->bm->edata;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BLI_assert(0);
|
|
|
|
|
return &me->edata;
|
|
|
|
|
}
|
|
|
|
|
|
Sculpt Vertex Colors: Initial implementation
Sculpt Vertex Colors is a painting system that runs inside sculpt mode, reusing all its tools and optimizations. This provides much better performance, easier to maintain code and more advanced features (new brush engine, filters, symmetry options, masks and face sets compatibility...). This is also the initial step for future features like vertex painting in Multires and brushes that can sculpt and paint at the same time.
This commit includes:
- SCULPT_UNDO_COLOR for undo support in sculpt mode
- SCULPT_UPDATE_COLOR and PBVH flags and rendering
- Sculpt Color API functions
- Sculpt capability for sculpt tools (only enabled in the Paint Brush for now)
- Rendering support in workbench (default to Sculpt Vertex Colors except in Vertex Paint)
- Conversion operator between MPropCol (Sculpt Vertex Colors) and MLoopCol (Vertex Paint)
- Remesher reprojection in the Voxel Remehser
- Paint Brush and Smear Brush with color smoothing in alt-smooth mode
- Parameters for the new brush engine (density, opacity, flow, wet paint mixing, tip scale) implemented in Sculpt Vertex Colors
- Color Filter
- Color picker (uses S shortcut, replaces smooth)
- Color selector in the top bar
Reviewed By: brecht
Maniphest Tasks: T72866
Differential Revision: https://developer.blender.org/D5975
2020-06-22 20:05:28 +02:00
|
|
|
BLI_INLINE const CustomData *mesh_cd_vdata_get_from_mesh(const Mesh *me)
|
|
|
|
|
{
|
|
|
|
|
switch ((eMeshWrapperType)me->runtime.wrapper_type) {
|
OpenSubDiv: add support for an OpenGL evaluator
This evaluator is used in order to evaluate subdivision at render time, allowing for
faster renders of meshes with a subdivision surface modifier placed at the last
position in the modifier list.
When evaluating the subsurf modifier, we detect whether we can delegate evaluation
to the draw code. If so, the subdivision is first evaluated on the GPU using our own
custom evaluator (only the coarse data needs to be initially sent to the GPU), then,
buffers for the final `MeshBufferCache` are filled on the GPU using a set of
compute shaders. However, some buffers are still filled on the CPU side, if doing so
on the GPU is impractical (e.g. the line adjacency buffer used for x-ray, whose
logic is hardly GPU compatible).
This is done at the mesh buffer extraction level so that the result can be readily used
in the various OpenGL engines, without having to write custom geometry or tesselation
shaders.
We use our own subdivision evaluation shaders, instead of OpenSubDiv's vanilla one, in
order to control the data layout, and interpolation. For example, we store vertex colors
as compressed 16-bit integers, while OpenSubDiv's default evaluator only work for float
types.
In order to still access the modified geometry on the CPU side, for use in modifiers
or transform operators, a dedicated wrapper type is added `MESH_WRAPPER_TYPE_SUBD`.
Subdivision will be lazily evaluated via `BKE_object_get_evaluated_mesh` which will
create such a wrapper if possible. If the final subdivision surface is not needed on
the CPU side, `BKE_object_get_evaluated_mesh_no_subsurf` should be used.
Enabling or disabling GPU subdivision can be done through the user preferences (under
Viewport -> Subdivision).
See patch description for benchmarks.
Reviewed By: campbellbarton, jbakker, fclem, brecht, #eevee_viewport
Differential Revision: https://developer.blender.org/D12406
2021-12-27 16:34:47 +01:00
|
|
|
case ME_WRAPPER_TYPE_SUBD:
|
Sculpt Vertex Colors: Initial implementation
Sculpt Vertex Colors is a painting system that runs inside sculpt mode, reusing all its tools and optimizations. This provides much better performance, easier to maintain code and more advanced features (new brush engine, filters, symmetry options, masks and face sets compatibility...). This is also the initial step for future features like vertex painting in Multires and brushes that can sculpt and paint at the same time.
This commit includes:
- SCULPT_UNDO_COLOR for undo support in sculpt mode
- SCULPT_UPDATE_COLOR and PBVH flags and rendering
- Sculpt Color API functions
- Sculpt capability for sculpt tools (only enabled in the Paint Brush for now)
- Rendering support in workbench (default to Sculpt Vertex Colors except in Vertex Paint)
- Conversion operator between MPropCol (Sculpt Vertex Colors) and MLoopCol (Vertex Paint)
- Remesher reprojection in the Voxel Remehser
- Paint Brush and Smear Brush with color smoothing in alt-smooth mode
- Parameters for the new brush engine (density, opacity, flow, wet paint mixing, tip scale) implemented in Sculpt Vertex Colors
- Color Filter
- Color picker (uses S shortcut, replaces smooth)
- Color selector in the top bar
Reviewed By: brecht
Maniphest Tasks: T72866
Differential Revision: https://developer.blender.org/D5975
2020-06-22 20:05:28 +02:00
|
|
|
case ME_WRAPPER_TYPE_MDATA:
|
|
|
|
|
return &me->vdata;
|
|
|
|
|
break;
|
|
|
|
|
case ME_WRAPPER_TYPE_BMESH:
|
|
|
|
|
return &me->edit_mesh->bm->vdata;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BLI_assert(0);
|
|
|
|
|
return &me->vdata;
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
static void mesh_cd_calc_active_uv_layer(const Object *object,
|
|
|
|
|
const Mesh *me,
|
|
|
|
|
DRW_MeshCDMask *cd_used)
|
2018-12-17 11:37:27 +01:00
|
|
|
{
|
2022-01-11 15:42:07 +01:00
|
|
|
const Mesh *me_final = editmesh_final_or_this(object, me);
|
2020-05-25 20:16:42 +10:00
|
|
|
const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
|
2018-12-17 11:37:27 +01:00
|
|
|
int layer = CustomData_get_active_layer(cd_ldata, CD_MLOOPUV);
|
|
|
|
|
if (layer != -1) {
|
2019-04-06 01:55:21 +02:00
|
|
|
cd_used->uv |= (1 << layer);
|
2018-12-17 11:37:27 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
static void mesh_cd_calc_active_mask_uv_layer(const Object *object,
|
|
|
|
|
const Mesh *me,
|
|
|
|
|
DRW_MeshCDMask *cd_used)
|
2019-03-21 16:44:01 +01:00
|
|
|
{
|
2022-01-11 15:42:07 +01:00
|
|
|
const Mesh *me_final = editmesh_final_or_this(object, me);
|
2020-05-25 20:16:42 +10:00
|
|
|
const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
|
2019-03-21 16:44:01 +01:00
|
|
|
int layer = CustomData_get_stencil_layer(cd_ldata, CD_MLOOPUV);
|
|
|
|
|
if (layer != -1) {
|
2019-04-06 01:55:21 +02:00
|
|
|
cd_used->uv |= (1 << layer);
|
2019-03-21 16:44:01 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
static void mesh_cd_calc_active_mloopcol_layer(const Object *object,
|
|
|
|
|
const Mesh *me,
|
|
|
|
|
DRW_MeshCDMask *cd_used)
|
Sculpt Vertex Colors: Initial implementation
Sculpt Vertex Colors is a painting system that runs inside sculpt mode, reusing all its tools and optimizations. This provides much better performance, easier to maintain code and more advanced features (new brush engine, filters, symmetry options, masks and face sets compatibility...). This is also the initial step for future features like vertex painting in Multires and brushes that can sculpt and paint at the same time.
This commit includes:
- SCULPT_UNDO_COLOR for undo support in sculpt mode
- SCULPT_UPDATE_COLOR and PBVH flags and rendering
- Sculpt Color API functions
- Sculpt capability for sculpt tools (only enabled in the Paint Brush for now)
- Rendering support in workbench (default to Sculpt Vertex Colors except in Vertex Paint)
- Conversion operator between MPropCol (Sculpt Vertex Colors) and MLoopCol (Vertex Paint)
- Remesher reprojection in the Voxel Remehser
- Paint Brush and Smear Brush with color smoothing in alt-smooth mode
- Parameters for the new brush engine (density, opacity, flow, wet paint mixing, tip scale) implemented in Sculpt Vertex Colors
- Color Filter
- Color picker (uses S shortcut, replaces smooth)
- Color selector in the top bar
Reviewed By: brecht
Maniphest Tasks: T72866
Differential Revision: https://developer.blender.org/D5975
2020-06-22 20:05:28 +02:00
|
|
|
{
|
2022-01-11 15:42:07 +01:00
|
|
|
const Mesh *me_final = editmesh_final_or_this(object, me);
|
2022-06-05 12:04:42 +02:00
|
|
|
Mesh me_query = blender::dna::shallow_zero_initialize();
|
2022-04-05 11:42:55 -07:00
|
|
|
|
|
|
|
|
const CustomData *cd_vdata = mesh_cd_vdata_get_from_mesh(me_final);
|
2020-09-06 18:56:40 +02:00
|
|
|
const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
|
Sculpt Vertex Colors: Initial implementation
Sculpt Vertex Colors is a painting system that runs inside sculpt mode, reusing all its tools and optimizations. This provides much better performance, easier to maintain code and more advanced features (new brush engine, filters, symmetry options, masks and face sets compatibility...). This is also the initial step for future features like vertex painting in Multires and brushes that can sculpt and paint at the same time.
This commit includes:
- SCULPT_UNDO_COLOR for undo support in sculpt mode
- SCULPT_UPDATE_COLOR and PBVH flags and rendering
- Sculpt Color API functions
- Sculpt capability for sculpt tools (only enabled in the Paint Brush for now)
- Rendering support in workbench (default to Sculpt Vertex Colors except in Vertex Paint)
- Conversion operator between MPropCol (Sculpt Vertex Colors) and MLoopCol (Vertex Paint)
- Remesher reprojection in the Voxel Remehser
- Paint Brush and Smear Brush with color smoothing in alt-smooth mode
- Parameters for the new brush engine (density, opacity, flow, wet paint mixing, tip scale) implemented in Sculpt Vertex Colors
- Color Filter
- Color picker (uses S shortcut, replaces smooth)
- Color selector in the top bar
Reviewed By: brecht
Maniphest Tasks: T72866
Differential Revision: https://developer.blender.org/D5975
2020-06-22 20:05:28 +02:00
|
|
|
|
2022-06-10 10:29:35 +02:00
|
|
|
BKE_id_attribute_copy_domains_temp(
|
|
|
|
|
ID_ME, cd_vdata, nullptr, cd_ldata, nullptr, nullptr, &me_query.id);
|
2022-04-05 11:42:55 -07:00
|
|
|
|
2022-06-07 18:55:56 +02:00
|
|
|
const CustomDataLayer *layer = BKE_id_attributes_active_color_get(&me_query.id);
|
2022-04-05 11:42:55 -07:00
|
|
|
int layer_i = BKE_id_attribute_to_index(
|
|
|
|
|
&me_query.id, layer, ATTR_DOMAIN_MASK_COLOR, CD_MASK_COLOR_ALL);
|
|
|
|
|
|
|
|
|
|
if (layer_i != -1) {
|
|
|
|
|
cd_used->vcol |= (1UL << (uint)layer_i);
|
2018-12-18 02:18:55 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-04-05 11:42:55 -07:00
|
|
|
static uint mesh_cd_calc_gpu_layers_vcol_used(const Mesh *me_query,
|
|
|
|
|
const CustomData *cd_vdata,
|
|
|
|
|
const CustomData *cd_ldata,
|
|
|
|
|
const char name[])
|
|
|
|
|
{
|
2022-06-10 10:29:35 +02:00
|
|
|
const CustomDataLayer *layer = nullptr;
|
2022-06-01 14:38:06 +10:00
|
|
|
eAttrDomain domain;
|
2022-04-05 11:42:55 -07:00
|
|
|
|
|
|
|
|
if (name[0]) {
|
|
|
|
|
int layer_i = 0;
|
|
|
|
|
|
|
|
|
|
domain = ATTR_DOMAIN_POINT;
|
|
|
|
|
layer_i = CustomData_get_named_layer_index(cd_vdata, CD_PROP_COLOR, name);
|
2022-04-20 09:10:10 -05:00
|
|
|
layer_i = layer_i == -1 ?
|
|
|
|
|
CustomData_get_named_layer_index(cd_vdata, CD_PROP_BYTE_COLOR, name) :
|
|
|
|
|
layer_i;
|
2022-04-05 11:42:55 -07:00
|
|
|
|
|
|
|
|
if (layer_i == -1) {
|
|
|
|
|
domain = ATTR_DOMAIN_CORNER;
|
|
|
|
|
layer_i = layer_i == -1 ? CustomData_get_named_layer_index(cd_ldata, CD_PROP_COLOR, name) :
|
|
|
|
|
layer_i;
|
2022-04-20 09:10:10 -05:00
|
|
|
layer_i = layer_i == -1 ?
|
|
|
|
|
CustomData_get_named_layer_index(cd_ldata, CD_PROP_BYTE_COLOR, name) :
|
|
|
|
|
layer_i;
|
2022-04-05 11:42:55 -07:00
|
|
|
}
|
|
|
|
|
|
2022-05-13 09:24:28 +10:00
|
|
|
/* NOTE: this is not the same as the layer_i below. */
|
2022-04-05 11:42:55 -07:00
|
|
|
if (layer_i != -1) {
|
|
|
|
|
layer = (domain == ATTR_DOMAIN_POINT ? cd_vdata : cd_ldata)->layers + layer_i;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
layer = BKE_id_attributes_render_color_get(&me_query->id);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!layer) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
2022-05-13 09:24:28 +10:00
|
|
|
/* NOTE: this is the logical index into the color attribute list,
|
2022-04-05 11:42:55 -07:00
|
|
|
* not the customdata index. */
|
|
|
|
|
int vcol_i = BKE_id_attribute_to_index(
|
|
|
|
|
(ID *)me_query, layer, ATTR_DOMAIN_MASK_COLOR, CD_MASK_COLOR_ALL);
|
|
|
|
|
|
|
|
|
|
return vcol_i;
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(const Object *object,
|
|
|
|
|
const Mesh *me,
|
2019-04-06 01:55:21 +02:00
|
|
|
struct GPUMaterial **gpumat_array,
|
2021-10-26 18:16:33 -03:00
|
|
|
int gpumat_array_len,
|
EEVEE: support Curves attributes rendering
This adds support to render Curves attributes in EEVEE.
Each attribute is stored in a texture derived from a VBO. As the
shading group needs the textures to be valid upon creation, the
attributes are created and setup during its very creation, instead
of doing it lazily via create_requested which we cannot rely on
anyway as contrary to the mesh batch, we do cannot really tell if
attributes need to be updated or else via some `DRW_batch_requested`.
Since point attributes need refinement, and since attributes are all
cast to vec4/float4 to account for differences in type conversions
between Blender and OpenGL, the refinement shader for points is
used as is. The point attributes are stored for each subdivision level
in CurvesEvalFinalCache. Each subdivision level also keeps track of the
attributes already in use so they are properly updated when needed.
Some basic garbage collection was added similar to what is done
for meshes: if the attributes used over time have been different
from the currently used attributes for too long, then the buffers
are freed, ensuring that stale attributesare removed.
This adds `CurvesInfos` to the shader creation info, which stores
the scope in which the attributes are defined. Scopes are stored
as booleans, in an array indexed by attribute loading order which
is also the order in which the attributes were added to the material.
A mapping is necessary between the indices used for the scoping, and
the ones used in the Curves cache, as this may contain stale
attributes which have not been garbage collected yet.
Common utilities with the mesh code for handling requested
attributes were moved to a separate file.
Differential Revision: https://developer.blender.org/D14916
2022-05-24 05:02:57 +02:00
|
|
|
DRW_Attributes *attributes)
|
2017-06-28 13:38:24 +10:00
|
|
|
{
|
2022-01-11 15:42:07 +01:00
|
|
|
const Mesh *me_final = editmesh_final_or_this(object, me);
|
2020-05-25 20:16:42 +10:00
|
|
|
const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
|
2021-10-26 18:16:33 -03:00
|
|
|
const CustomData *cd_pdata = mesh_cd_pdata_get_from_mesh(me_final);
|
Sculpt Vertex Colors: Initial implementation
Sculpt Vertex Colors is a painting system that runs inside sculpt mode, reusing all its tools and optimizations. This provides much better performance, easier to maintain code and more advanced features (new brush engine, filters, symmetry options, masks and face sets compatibility...). This is also the initial step for future features like vertex painting in Multires and brushes that can sculpt and paint at the same time.
This commit includes:
- SCULPT_UNDO_COLOR for undo support in sculpt mode
- SCULPT_UPDATE_COLOR and PBVH flags and rendering
- Sculpt Color API functions
- Sculpt capability for sculpt tools (only enabled in the Paint Brush for now)
- Rendering support in workbench (default to Sculpt Vertex Colors except in Vertex Paint)
- Conversion operator between MPropCol (Sculpt Vertex Colors) and MLoopCol (Vertex Paint)
- Remesher reprojection in the Voxel Remehser
- Paint Brush and Smear Brush with color smoothing in alt-smooth mode
- Parameters for the new brush engine (density, opacity, flow, wet paint mixing, tip scale) implemented in Sculpt Vertex Colors
- Color Filter
- Color picker (uses S shortcut, replaces smooth)
- Color selector in the top bar
Reviewed By: brecht
Maniphest Tasks: T72866
Differential Revision: https://developer.blender.org/D5975
2020-06-22 20:05:28 +02:00
|
|
|
const CustomData *cd_vdata = mesh_cd_vdata_get_from_mesh(me_final);
|
2021-10-26 18:16:33 -03:00
|
|
|
const CustomData *cd_edata = mesh_cd_edata_get_from_mesh(me_final);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2022-04-05 11:42:55 -07:00
|
|
|
/* Create a mesh with final customdata domains
|
|
|
|
|
* we can query with attribute API. */
|
2022-06-05 12:04:42 +02:00
|
|
|
Mesh me_query = blender::dna::shallow_zero_initialize();
|
2022-04-05 11:42:55 -07:00
|
|
|
|
|
|
|
|
BKE_id_attribute_copy_domains_temp(
|
2022-06-10 10:29:35 +02:00
|
|
|
ID_ME, cd_vdata, cd_edata, cd_ldata, cd_pdata, nullptr, &me_query.id);
|
2022-04-05 11:42:55 -07:00
|
|
|
|
2017-06-29 15:23:47 +10:00
|
|
|
/* See: DM_vertex_attributes_from_gpu for similar logic */
|
2019-04-06 01:55:21 +02:00
|
|
|
DRW_MeshCDMask cd_used;
|
|
|
|
|
mesh_cd_layers_type_clear(&cd_used);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2017-06-28 13:38:24 +10:00
|
|
|
for (int i = 0; i < gpumat_array_len; i++) {
|
|
|
|
|
GPUMaterial *gpumat = gpumat_array[i];
|
|
|
|
|
if (gpumat) {
|
2020-02-14 10:47:20 +01:00
|
|
|
ListBase gpu_attrs = GPU_material_attributes(gpumat);
|
2020-04-03 19:15:01 +02:00
|
|
|
LISTBASE_FOREACH (GPUMaterialAttribute *, gpu_attr, &gpu_attrs) {
|
2020-02-14 10:47:20 +01:00
|
|
|
const char *name = gpu_attr->name;
|
2022-06-05 12:04:42 +02:00
|
|
|
eCustomDataType type = static_cast<eCustomDataType>(gpu_attr->type);
|
2017-06-29 15:23:47 +10:00
|
|
|
int layer = -1;
|
2022-06-05 12:04:42 +02:00
|
|
|
std::optional<eAttrDomain> domain;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2017-06-29 15:23:47 +10:00
|
|
|
if (type == CD_AUTO_FROM_NAME) {
|
2021-11-28 19:05:22 +01:00
|
|
|
/* We need to deduce what exact layer is used.
|
2017-06-29 15:23:47 +10:00
|
|
|
*
|
|
|
|
|
* We do it based on the specified name.
|
|
|
|
|
*/
|
2017-07-03 12:46:58 +02:00
|
|
|
if (name[0] != '\0') {
|
|
|
|
|
layer = CustomData_get_named_layer(cd_ldata, CD_MLOOPUV, name);
|
2017-06-29 15:23:47 +10:00
|
|
|
type = CD_MTFACE;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2022-04-05 11:42:55 -07:00
|
|
|
if (layer == -1) {
|
|
|
|
|
layer = CustomData_get_named_layer(cd_vdata, CD_PROP_COLOR, name);
|
|
|
|
|
if (layer != -1) {
|
|
|
|
|
type = CD_PROP_COLOR;
|
|
|
|
|
domain = ATTR_DOMAIN_POINT;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (layer == -1) {
|
|
|
|
|
layer = CustomData_get_named_layer(cd_ldata, CD_PROP_COLOR, name);
|
|
|
|
|
if (layer != -1) {
|
|
|
|
|
type = CD_PROP_COLOR;
|
|
|
|
|
domain = ATTR_DOMAIN_CORNER;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (layer == -1) {
|
2022-04-20 09:10:10 -05:00
|
|
|
layer = CustomData_get_named_layer(cd_vdata, CD_PROP_BYTE_COLOR, name);
|
2022-04-05 11:42:55 -07:00
|
|
|
if (layer != -1) {
|
2022-04-20 09:10:10 -05:00
|
|
|
type = CD_PROP_BYTE_COLOR;
|
2022-04-05 11:42:55 -07:00
|
|
|
domain = ATTR_DOMAIN_POINT;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-15 18:52:01 +02:00
|
|
|
if (layer == -1) {
|
2022-04-20 09:10:10 -05:00
|
|
|
layer = CustomData_get_named_layer(cd_ldata, CD_PROP_BYTE_COLOR, name);
|
2022-04-05 11:42:55 -07:00
|
|
|
if (layer != -1) {
|
2022-04-20 09:10:10 -05:00
|
|
|
type = CD_PROP_BYTE_COLOR;
|
2022-04-05 11:42:55 -07:00
|
|
|
domain = ATTR_DOMAIN_CORNER;
|
|
|
|
|
}
|
2020-07-15 18:52:01 +02:00
|
|
|
}
|
|
|
|
|
|
2017-06-29 15:23:47 +10:00
|
|
|
#if 0 /* Tangents are always from UV's - this will never happen. */
|
|
|
|
|
if (layer == -1) {
|
2017-07-03 12:46:58 +02:00
|
|
|
layer = CustomData_get_named_layer(cd_ldata, CD_TANGENT, name);
|
2017-06-29 15:23:47 +10:00
|
|
|
type = CD_TANGENT;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
2021-10-26 18:16:33 -03:00
|
|
|
if (layer == -1) {
|
|
|
|
|
/* Try to match a generic attribute, we use the first attribute domain with a
|
|
|
|
|
* matching name. */
|
EEVEE: support Curves attributes rendering
This adds support to render Curves attributes in EEVEE.
Each attribute is stored in a texture derived from a VBO. As the
shading group needs the textures to be valid upon creation, the
attributes are created and setup during its very creation, instead
of doing it lazily via create_requested which we cannot rely on
anyway as contrary to the mesh batch, we do cannot really tell if
attributes need to be updated or else via some `DRW_batch_requested`.
Since point attributes need refinement, and since attributes are all
cast to vec4/float4 to account for differences in type conversions
between Blender and OpenGL, the refinement shader for points is
used as is. The point attributes are stored for each subdivision level
in CurvesEvalFinalCache. Each subdivision level also keeps track of the
attributes already in use so they are properly updated when needed.
Some basic garbage collection was added similar to what is done
for meshes: if the attributes used over time have been different
from the currently used attributes for too long, then the buffers
are freed, ensuring that stale attributesare removed.
This adds `CurvesInfos` to the shader creation info, which stores
the scope in which the attributes are defined. Scopes are stored
as booleans, in an array indexed by attribute loading order which
is also the order in which the attributes were added to the material.
A mapping is necessary between the indices used for the scoping, and
the ones used in the Curves cache, as this may contain stale
attributes which have not been garbage collected yet.
Common utilities with the mesh code for handling requested
attributes were moved to a separate file.
Differential Revision: https://developer.blender.org/D14916
2022-05-24 05:02:57 +02:00
|
|
|
if (drw_custom_data_match_attribute(cd_vdata, name, &layer, &type)) {
|
2021-10-26 18:16:33 -03:00
|
|
|
domain = ATTR_DOMAIN_POINT;
|
|
|
|
|
}
|
EEVEE: support Curves attributes rendering
This adds support to render Curves attributes in EEVEE.
Each attribute is stored in a texture derived from a VBO. As the
shading group needs the textures to be valid upon creation, the
attributes are created and setup during its very creation, instead
of doing it lazily via create_requested which we cannot rely on
anyway as contrary to the mesh batch, we do cannot really tell if
attributes need to be updated or else via some `DRW_batch_requested`.
Since point attributes need refinement, and since attributes are all
cast to vec4/float4 to account for differences in type conversions
between Blender and OpenGL, the refinement shader for points is
used as is. The point attributes are stored for each subdivision level
in CurvesEvalFinalCache. Each subdivision level also keeps track of the
attributes already in use so they are properly updated when needed.
Some basic garbage collection was added similar to what is done
for meshes: if the attributes used over time have been different
from the currently used attributes for too long, then the buffers
are freed, ensuring that stale attributesare removed.
This adds `CurvesInfos` to the shader creation info, which stores
the scope in which the attributes are defined. Scopes are stored
as booleans, in an array indexed by attribute loading order which
is also the order in which the attributes were added to the material.
A mapping is necessary between the indices used for the scoping, and
the ones used in the Curves cache, as this may contain stale
attributes which have not been garbage collected yet.
Common utilities with the mesh code for handling requested
attributes were moved to a separate file.
Differential Revision: https://developer.blender.org/D14916
2022-05-24 05:02:57 +02:00
|
|
|
else if (drw_custom_data_match_attribute(cd_ldata, name, &layer, &type)) {
|
2021-10-26 18:16:33 -03:00
|
|
|
domain = ATTR_DOMAIN_CORNER;
|
|
|
|
|
}
|
EEVEE: support Curves attributes rendering
This adds support to render Curves attributes in EEVEE.
Each attribute is stored in a texture derived from a VBO. As the
shading group needs the textures to be valid upon creation, the
attributes are created and setup during its very creation, instead
of doing it lazily via create_requested which we cannot rely on
anyway as contrary to the mesh batch, we do cannot really tell if
attributes need to be updated or else via some `DRW_batch_requested`.
Since point attributes need refinement, and since attributes are all
cast to vec4/float4 to account for differences in type conversions
between Blender and OpenGL, the refinement shader for points is
used as is. The point attributes are stored for each subdivision level
in CurvesEvalFinalCache. Each subdivision level also keeps track of the
attributes already in use so they are properly updated when needed.
Some basic garbage collection was added similar to what is done
for meshes: if the attributes used over time have been different
from the currently used attributes for too long, then the buffers
are freed, ensuring that stale attributesare removed.
This adds `CurvesInfos` to the shader creation info, which stores
the scope in which the attributes are defined. Scopes are stored
as booleans, in an array indexed by attribute loading order which
is also the order in which the attributes were added to the material.
A mapping is necessary between the indices used for the scoping, and
the ones used in the Curves cache, as this may contain stale
attributes which have not been garbage collected yet.
Common utilities with the mesh code for handling requested
attributes were moved to a separate file.
Differential Revision: https://developer.blender.org/D14916
2022-05-24 05:02:57 +02:00
|
|
|
else if (drw_custom_data_match_attribute(cd_pdata, name, &layer, &type)) {
|
2021-10-26 18:16:33 -03:00
|
|
|
domain = ATTR_DOMAIN_FACE;
|
|
|
|
|
}
|
EEVEE: support Curves attributes rendering
This adds support to render Curves attributes in EEVEE.
Each attribute is stored in a texture derived from a VBO. As the
shading group needs the textures to be valid upon creation, the
attributes are created and setup during its very creation, instead
of doing it lazily via create_requested which we cannot rely on
anyway as contrary to the mesh batch, we do cannot really tell if
attributes need to be updated or else via some `DRW_batch_requested`.
Since point attributes need refinement, and since attributes are all
cast to vec4/float4 to account for differences in type conversions
between Blender and OpenGL, the refinement shader for points is
used as is. The point attributes are stored for each subdivision level
in CurvesEvalFinalCache. Each subdivision level also keeps track of the
attributes already in use so they are properly updated when needed.
Some basic garbage collection was added similar to what is done
for meshes: if the attributes used over time have been different
from the currently used attributes for too long, then the buffers
are freed, ensuring that stale attributesare removed.
This adds `CurvesInfos` to the shader creation info, which stores
the scope in which the attributes are defined. Scopes are stored
as booleans, in an array indexed by attribute loading order which
is also the order in which the attributes were added to the material.
A mapping is necessary between the indices used for the scoping, and
the ones used in the Curves cache, as this may contain stale
attributes which have not been garbage collected yet.
Common utilities with the mesh code for handling requested
attributes were moved to a separate file.
Differential Revision: https://developer.blender.org/D14916
2022-05-24 05:02:57 +02:00
|
|
|
else if (drw_custom_data_match_attribute(cd_edata, name, &layer, &type)) {
|
2021-10-26 18:16:33 -03:00
|
|
|
domain = ATTR_DOMAIN_EDGE;
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
layer = -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-06-29 15:23:47 +10:00
|
|
|
if (layer == -1) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
/* Fall back to the UV layer, which matches old behavior. */
|
|
|
|
|
type = CD_MTFACE;
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
|
2017-06-29 15:23:47 +10:00
|
|
|
switch (type) {
|
2017-06-28 13:38:24 +10:00
|
|
|
case CD_MTFACE: {
|
2017-06-29 15:23:47 +10:00
|
|
|
if (layer == -1) {
|
|
|
|
|
layer = (name[0] != '\0') ? CustomData_get_named_layer(cd_ldata, CD_MLOOPUV, name) :
|
2019-07-08 13:35:55 +02:00
|
|
|
CustomData_get_render_layer(cd_ldata, CD_MLOOPUV);
|
2017-06-29 15:23:47 +10:00
|
|
|
}
|
|
|
|
|
if (layer != -1) {
|
2019-04-06 01:55:21 +02:00
|
|
|
cd_used.uv |= (1 << layer);
|
2017-06-28 13:38:24 +10:00
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case CD_TANGENT: {
|
2017-06-29 15:23:47 +10:00
|
|
|
if (layer == -1) {
|
|
|
|
|
layer = (name[0] != '\0') ? CustomData_get_named_layer(cd_ldata, CD_MLOOPUV, name) :
|
2019-07-08 13:35:55 +02:00
|
|
|
CustomData_get_render_layer(cd_ldata, CD_MLOOPUV);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-01-09 01:04:28 +11:00
|
|
|
/* Only fallback to orco (below) when we have no UV layers, see: T56545 */
|
|
|
|
|
if (layer == -1 && name[0] != '\0') {
|
2019-07-08 13:35:55 +02:00
|
|
|
layer = CustomData_get_render_layer(cd_ldata, CD_MLOOPUV);
|
2019-01-09 01:04:28 +11:00
|
|
|
}
|
2017-06-29 15:23:47 +10:00
|
|
|
}
|
|
|
|
|
if (layer != -1) {
|
2019-04-06 01:55:21 +02:00
|
|
|
cd_used.tan |= (1 << layer);
|
2017-06-28 13:38:24 +10:00
|
|
|
}
|
2017-07-04 16:03:04 +03:00
|
|
|
else {
|
|
|
|
|
/* no UV layers at all => requesting orco */
|
2019-04-06 01:55:21 +02:00
|
|
|
cd_used.tan_orco = 1;
|
|
|
|
|
cd_used.orco = 1;
|
2017-07-04 16:03:04 +03:00
|
|
|
}
|
2017-06-28 13:38:24 +10:00
|
|
|
break;
|
|
|
|
|
}
|
Sculpt Vertex Colors: Initial implementation
Sculpt Vertex Colors is a painting system that runs inside sculpt mode, reusing all its tools and optimizations. This provides much better performance, easier to maintain code and more advanced features (new brush engine, filters, symmetry options, masks and face sets compatibility...). This is also the initial step for future features like vertex painting in Multires and brushes that can sculpt and paint at the same time.
This commit includes:
- SCULPT_UNDO_COLOR for undo support in sculpt mode
- SCULPT_UPDATE_COLOR and PBVH flags and rendering
- Sculpt Color API functions
- Sculpt capability for sculpt tools (only enabled in the Paint Brush for now)
- Rendering support in workbench (default to Sculpt Vertex Colors except in Vertex Paint)
- Conversion operator between MPropCol (Sculpt Vertex Colors) and MLoopCol (Vertex Paint)
- Remesher reprojection in the Voxel Remehser
- Paint Brush and Smear Brush with color smoothing in alt-smooth mode
- Parameters for the new brush engine (density, opacity, flow, wet paint mixing, tip scale) implemented in Sculpt Vertex Colors
- Color Filter
- Color picker (uses S shortcut, replaces smooth)
- Color selector in the top bar
Reviewed By: brecht
Maniphest Tasks: T72866
Differential Revision: https://developer.blender.org/D5975
2020-06-22 20:05:28 +02:00
|
|
|
|
2017-06-28 13:38:24 +10:00
|
|
|
case CD_ORCO: {
|
2019-04-06 01:55:21 +02:00
|
|
|
cd_used.orco = 1;
|
2017-06-28 13:38:24 +10:00
|
|
|
break;
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2022-04-05 11:42:55 -07:00
|
|
|
|
2022-05-13 09:24:28 +10:00
|
|
|
/* NOTE: attr->type will always be CD_PROP_COLOR even for
|
2022-04-20 09:10:10 -05:00
|
|
|
* CD_PROP_BYTE_COLOR layers, see node_shader_gpu_vertex_color in
|
2022-04-05 11:42:55 -07:00
|
|
|
* node_shader_vertex_color.cc.
|
|
|
|
|
*/
|
|
|
|
|
case CD_MCOL:
|
2022-04-20 09:10:10 -05:00
|
|
|
case CD_PROP_BYTE_COLOR:
|
2022-04-05 11:42:55 -07:00
|
|
|
case CD_PROP_COLOR: {
|
2022-05-11 14:01:47 +02:00
|
|
|
/* First check Color attributes, when not found check mesh attributes. Geometry nodes
|
|
|
|
|
* can generate those layers. */
|
2022-04-05 11:42:55 -07:00
|
|
|
int vcol_bit = mesh_cd_calc_gpu_layers_vcol_used(&me_query, cd_vdata, cd_ldata, name);
|
|
|
|
|
|
|
|
|
|
if (vcol_bit != -1) {
|
|
|
|
|
cd_used.vcol |= 1UL << (uint)vcol_bit;
|
2022-05-11 14:01:47 +02:00
|
|
|
break;
|
2022-04-05 11:42:55 -07:00
|
|
|
}
|
|
|
|
|
|
2022-06-05 12:04:42 +02:00
|
|
|
if (layer != -1 && domain.has_value()) {
|
2022-06-18 11:48:51 +02:00
|
|
|
drw_attributes_add_request(attributes, name, type, layer, *domain);
|
2022-05-11 14:01:47 +02:00
|
|
|
}
|
2022-04-05 11:42:55 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case CD_PROP_FLOAT3:
|
2021-10-26 18:16:33 -03:00
|
|
|
case CD_PROP_BOOL:
|
2022-02-04 10:29:11 -06:00
|
|
|
case CD_PROP_INT8:
|
2021-10-26 18:16:33 -03:00
|
|
|
case CD_PROP_INT32:
|
|
|
|
|
case CD_PROP_FLOAT:
|
2022-04-05 11:42:55 -07:00
|
|
|
case CD_PROP_FLOAT2: {
|
2022-06-05 12:04:42 +02:00
|
|
|
if (layer != -1 && domain.has_value()) {
|
2022-06-18 11:48:51 +02:00
|
|
|
drw_attributes_add_request(attributes, name, type, layer, *domain);
|
2021-10-26 18:16:33 -03:00
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
default:
|
|
|
|
|
break;
|
2017-06-28 13:38:24 +10:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-04-06 01:55:21 +02:00
|
|
|
return cd_used;
|
2017-06-28 13:38:24 +10:00
|
|
|
}
|
|
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
/** \} */
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
/* ---------------------------------------------------------------------- */
|
|
|
|
|
/** \name Vertex Group Selection
|
|
|
|
|
* \{ */
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
/** Reset the selection structure, deallocating heap memory as appropriate. */
|
|
|
|
|
static void drw_mesh_weight_state_clear(struct DRW_MeshWeightState *wstate)
|
|
|
|
|
{
|
|
|
|
|
MEM_SAFE_FREE(wstate->defgroup_sel);
|
2018-10-07 18:25:51 +03:00
|
|
|
MEM_SAFE_FREE(wstate->defgroup_locked);
|
|
|
|
|
MEM_SAFE_FREE(wstate->defgroup_unlocked);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
memset(wstate, 0, sizeof(*wstate));
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
wstate->defgroup_active = -1;
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
/** Copy selection data from one structure to another, including heap memory. */
|
|
|
|
|
static void drw_mesh_weight_state_copy(struct DRW_MeshWeightState *wstate_dst,
|
|
|
|
|
const struct DRW_MeshWeightState *wstate_src)
|
|
|
|
|
{
|
|
|
|
|
MEM_SAFE_FREE(wstate_dst->defgroup_sel);
|
2018-10-07 18:25:51 +03:00
|
|
|
MEM_SAFE_FREE(wstate_dst->defgroup_locked);
|
|
|
|
|
MEM_SAFE_FREE(wstate_dst->defgroup_unlocked);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
memcpy(wstate_dst, wstate_src, sizeof(*wstate_dst));
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
if (wstate_src->defgroup_sel) {
|
2022-06-05 12:04:42 +02:00
|
|
|
wstate_dst->defgroup_sel = static_cast<bool *>(MEM_dupallocN(wstate_src->defgroup_sel));
|
2019-07-14 16:49:44 +02:00
|
|
|
}
|
2018-10-07 18:25:51 +03:00
|
|
|
if (wstate_src->defgroup_locked) {
|
2022-06-05 12:04:42 +02:00
|
|
|
wstate_dst->defgroup_locked = static_cast<bool *>(MEM_dupallocN(wstate_src->defgroup_locked));
|
2018-10-07 18:25:51 +03:00
|
|
|
}
|
|
|
|
|
if (wstate_src->defgroup_unlocked) {
|
2022-06-05 12:04:42 +02:00
|
|
|
wstate_dst->defgroup_unlocked = static_cast<bool *>(
|
|
|
|
|
MEM_dupallocN(wstate_src->defgroup_unlocked));
|
2018-10-07 18:25:51 +03:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool drw_mesh_flags_equal(const bool *array1, const bool *array2, int size)
|
|
|
|
|
{
|
|
|
|
|
return ((!array1 && !array2) ||
|
|
|
|
|
(array1 && array2 && memcmp(array1, array2, size * sizeof(bool)) == 0));
|
2019-07-14 16:49:44 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
/** Compare two selection structures. */
|
|
|
|
|
static bool drw_mesh_weight_state_compare(const struct DRW_MeshWeightState *a,
|
|
|
|
|
const struct DRW_MeshWeightState *b)
|
|
|
|
|
{
|
|
|
|
|
return a->defgroup_active == b->defgroup_active && a->defgroup_len == b->defgroup_len &&
|
|
|
|
|
a->flags == b->flags && a->alert_mode == b->alert_mode &&
|
|
|
|
|
a->defgroup_sel_count == b->defgroup_sel_count &&
|
2018-10-07 18:25:51 +03:00
|
|
|
drw_mesh_flags_equal(a->defgroup_sel, b->defgroup_sel, a->defgroup_len) &&
|
|
|
|
|
drw_mesh_flags_equal(a->defgroup_locked, b->defgroup_locked, a->defgroup_len) &&
|
|
|
|
|
drw_mesh_flags_equal(a->defgroup_unlocked, b->defgroup_unlocked, a->defgroup_len);
|
2019-07-14 16:49:44 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
static void drw_mesh_weight_state_extract(Object *ob,
|
|
|
|
|
Mesh *me,
|
|
|
|
|
const ToolSettings *ts,
|
|
|
|
|
bool paint_mode,
|
|
|
|
|
struct DRW_MeshWeightState *wstate)
|
|
|
|
|
{
|
|
|
|
|
/* Extract complete vertex weight group selection state and mode flags. */
|
|
|
|
|
memset(wstate, 0, sizeof(*wstate));
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2021-07-13 12:10:34 -04:00
|
|
|
wstate->defgroup_active = me->vertex_group_active_index - 1;
|
|
|
|
|
wstate->defgroup_len = BLI_listbase_count(&me->vertex_group_names);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
wstate->alert_mode = ts->weightuser;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
if (paint_mode && ts->multipaint) {
|
2020-05-28 16:42:31 +10:00
|
|
|
/* Multi-paint needs to know all selected bones, not just the active group.
|
2019-07-14 16:49:44 +02:00
|
|
|
* This is actually a relatively expensive operation, but caching would be difficult. */
|
|
|
|
|
wstate->defgroup_sel = BKE_object_defgroup_selected_get(
|
|
|
|
|
ob, wstate->defgroup_len, &wstate->defgroup_sel_count);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
if (wstate->defgroup_sel_count > 1) {
|
|
|
|
|
wstate->flags |= DRW_MESH_WEIGHT_STATE_MULTIPAINT |
|
|
|
|
|
(ts->auto_normalize ? DRW_MESH_WEIGHT_STATE_AUTO_NORMALIZE : 0);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2021-04-02 14:44:26 +02:00
|
|
|
if (ME_USING_MIRROR_X_VERTEX_GROUPS(me)) {
|
2019-07-14 16:49:44 +02:00
|
|
|
BKE_object_defgroup_mirror_selection(ob,
|
|
|
|
|
wstate->defgroup_len,
|
|
|
|
|
wstate->defgroup_sel,
|
|
|
|
|
wstate->defgroup_sel,
|
|
|
|
|
&wstate->defgroup_sel_count);
|
2017-11-06 14:14:07 -02:00
|
|
|
}
|
2017-02-23 11:54:40 +01:00
|
|
|
}
|
2021-02-05 16:23:34 +11:00
|
|
|
/* With only one selected bone Multi-paint reverts to regular mode. */
|
2019-07-14 16:49:44 +02:00
|
|
|
else {
|
|
|
|
|
wstate->defgroup_sel_count = 0;
|
|
|
|
|
MEM_SAFE_FREE(wstate->defgroup_sel);
|
2017-05-22 23:31:46 +10:00
|
|
|
}
|
2017-02-23 11:54:40 +01:00
|
|
|
}
|
2018-10-07 18:25:51 +03:00
|
|
|
|
|
|
|
|
if (paint_mode && ts->wpaint_lock_relative) {
|
|
|
|
|
/* Set of locked vertex groups for the lock relative mode. */
|
|
|
|
|
wstate->defgroup_locked = BKE_object_defgroup_lock_flags_get(ob, wstate->defgroup_len);
|
|
|
|
|
wstate->defgroup_unlocked = BKE_object_defgroup_validmap_get(ob, wstate->defgroup_len);
|
|
|
|
|
|
|
|
|
|
/* Check that a deform group is active, and none of selected groups are locked. */
|
|
|
|
|
if (BKE_object_defgroup_check_lock_relative(
|
|
|
|
|
wstate->defgroup_locked, wstate->defgroup_unlocked, wstate->defgroup_active) &&
|
|
|
|
|
BKE_object_defgroup_check_lock_relative_multi(wstate->defgroup_len,
|
|
|
|
|
wstate->defgroup_locked,
|
|
|
|
|
wstate->defgroup_sel,
|
|
|
|
|
wstate->defgroup_sel_count)) {
|
|
|
|
|
wstate->flags |= DRW_MESH_WEIGHT_STATE_LOCK_RELATIVE;
|
|
|
|
|
|
|
|
|
|
/* Compute the set of locked and unlocked deform vertex groups. */
|
|
|
|
|
BKE_object_defgroup_split_locked_validmap(wstate->defgroup_len,
|
|
|
|
|
wstate->defgroup_locked,
|
|
|
|
|
wstate->defgroup_unlocked,
|
|
|
|
|
wstate->defgroup_locked, /* out */
|
|
|
|
|
wstate->defgroup_unlocked);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
MEM_SAFE_FREE(wstate->defgroup_unlocked);
|
|
|
|
|
MEM_SAFE_FREE(wstate->defgroup_locked);
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-07-14 16:49:44 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
/** \} */
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
/* ---------------------------------------------------------------------- */
|
|
|
|
|
/** \name Mesh GPUBatch Cache
|
|
|
|
|
* \{ */
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
BLI_INLINE void mesh_batch_cache_add_request(MeshBatchCache *cache, DRWBatchFlag new_flag)
|
|
|
|
|
{
|
|
|
|
|
atomic_fetch_and_or_uint32((uint32_t *)(&cache->batch_requested), *(uint32_t *)&new_flag);
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
/* GPUBatch cache management. */
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
static bool mesh_batch_cache_valid(Object *object, Mesh *me)
|
2019-07-14 16:49:44 +02:00
|
|
|
{
|
2022-06-05 12:04:42 +02:00
|
|
|
MeshBatchCache *cache = static_cast<MeshBatchCache *>(me->runtime.batch_cache);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2022-06-10 10:29:35 +02:00
|
|
|
if (cache == nullptr) {
|
2019-07-14 16:49:44 +02:00
|
|
|
return false;
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2022-04-05 11:42:55 -07:00
|
|
|
if (object->sculpt && object->sculpt->pbvh) {
|
2022-06-08 12:30:01 -07:00
|
|
|
if (cache->pbvh_is_drawing != BKE_pbvh_is_drawing(object->sculpt->pbvh) ||
|
|
|
|
|
BKE_pbvh_draw_cache_invalid(object->sculpt->pbvh)) {
|
2022-04-05 11:42:55 -07:00
|
|
|
return false;
|
|
|
|
|
}
|
2022-04-27 13:03:49 -07:00
|
|
|
|
|
|
|
|
if (BKE_pbvh_is_drawing(object->sculpt->pbvh) &&
|
|
|
|
|
BKE_pbvh_draw_cache_invalid(object->sculpt->pbvh)) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
2022-04-05 11:42:55 -07:00
|
|
|
}
|
|
|
|
|
|
2022-06-10 10:29:35 +02:00
|
|
|
if (cache->is_editmode != (me->edit_mesh != nullptr)) {
|
2019-07-14 16:49:44 +02:00
|
|
|
return false;
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
if (cache->is_dirty) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
if (cache->mat_len != mesh_render_mat_len_get(object, me)) {
|
2019-07-14 16:49:44 +02:00
|
|
|
return false;
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
return true;
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
static void mesh_batch_cache_init(Object *object, Mesh *me)
|
2019-07-14 16:49:44 +02:00
|
|
|
{
|
2022-06-05 12:04:42 +02:00
|
|
|
MeshBatchCache *cache = static_cast<MeshBatchCache *>(me->runtime.batch_cache);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
if (!cache) {
|
2022-06-05 12:04:42 +02:00
|
|
|
me->runtime.batch_cache = MEM_cnew<MeshBatchCache>(__func__);
|
|
|
|
|
cache = static_cast<MeshBatchCache *>(me->runtime.batch_cache);
|
2019-07-14 16:49:44 +02:00
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
memset(cache, 0, sizeof(*cache));
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2022-06-10 10:29:35 +02:00
|
|
|
cache->is_editmode = me->edit_mesh != nullptr;
|
2017-05-25 02:31:39 +10:00
|
|
|
|
2022-04-05 11:42:55 -07:00
|
|
|
if (object->sculpt && object->sculpt->pbvh) {
|
|
|
|
|
cache->pbvh_is_drawing = BKE_pbvh_is_drawing(object->sculpt->pbvh);
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
if (cache->is_editmode == false) {
|
|
|
|
|
// cache->edge_len = mesh_render_edges_len_get(me);
|
|
|
|
|
// cache->tri_len = mesh_render_looptri_len_get(me);
|
|
|
|
|
// cache->poly_len = mesh_render_polys_len_get(me);
|
|
|
|
|
// cache->vert_len = mesh_render_verts_len_get(me);
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
cache->mat_len = mesh_render_mat_len_get(object, me);
|
2022-06-05 12:04:42 +02:00
|
|
|
cache->surface_per_mat = static_cast<GPUBatch **>(
|
|
|
|
|
MEM_callocN(sizeof(*cache->surface_per_mat) * cache->mat_len, __func__));
|
|
|
|
|
cache->tris_per_mat = static_cast<GPUIndexBuf **>(
|
|
|
|
|
MEM_callocN(sizeof(*cache->tris_per_mat) * cache->mat_len, __func__));
|
2017-05-01 17:58:15 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
cache->is_dirty = false;
|
2022-06-05 12:04:42 +02:00
|
|
|
cache->batch_ready = (DRWBatchFlag)0;
|
|
|
|
|
cache->batch_requested = (DRWBatchFlag)0;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
drw_mesh_weight_state_clear(&cache->weight_state);
|
2018-05-19 13:31:44 +02:00
|
|
|
}
|
|
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
void DRW_mesh_batch_cache_validate(Object *object, Mesh *me)
|
2018-12-18 16:34:32 +01:00
|
|
|
{
|
2022-01-11 15:42:07 +01:00
|
|
|
if (!mesh_batch_cache_valid(object, me)) {
|
2019-07-14 16:49:44 +02:00
|
|
|
mesh_batch_cache_clear(me);
|
2022-01-11 15:42:07 +01:00
|
|
|
mesh_batch_cache_init(object, me);
|
2018-12-18 16:34:32 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
static MeshBatchCache *mesh_batch_cache_get(Mesh *me)
|
2018-12-17 23:00:05 +01:00
|
|
|
{
|
2022-06-05 12:04:42 +02:00
|
|
|
return static_cast<MeshBatchCache *>(me->runtime.batch_cache);
|
2018-12-17 23:00:05 +01:00
|
|
|
}
|
|
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
static void mesh_batch_cache_check_vertex_group(MeshBatchCache *cache,
|
|
|
|
|
const struct DRW_MeshWeightState *wstate)
|
2019-02-12 23:22:36 +01:00
|
|
|
{
|
2019-07-14 16:49:44 +02:00
|
|
|
if (!drw_mesh_weight_state_compare(&cache->weight_state, wstate)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.weights);
|
2019-02-12 23:22:36 +01:00
|
|
|
}
|
2019-07-14 16:49:44 +02:00
|
|
|
GPU_BATCH_CLEAR_SAFE(cache->batch.surface_weights);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
cache->batch_ready &= ~MBC_SURFACE_WEIGHTS;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
drw_mesh_weight_state_clear(&cache->weight_state);
|
2019-02-12 23:22:36 +01:00
|
|
|
}
|
2019-07-01 12:43:07 +02:00
|
|
|
}
|
|
|
|
|
|
2020-07-17 13:47:10 +02:00
|
|
|
static void mesh_batch_cache_request_surface_batches(MeshBatchCache *cache)
|
2019-10-09 18:11:10 +02:00
|
|
|
{
|
2020-07-17 13:47:10 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_SURFACE);
|
|
|
|
|
DRW_batch_request(&cache->batch.surface);
|
2020-10-07 13:55:07 +02:00
|
|
|
for (int i = 0; i < cache->mat_len; i++) {
|
|
|
|
|
DRW_batch_request(&cache->surface_per_mat[i]);
|
2020-07-17 13:47:10 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-08-23 09:52:13 -03:00
|
|
|
/* Free batches with material-mapped looptris.
|
|
|
|
|
* NOTE: The updating of the indices buffers (#tris_per_mat) is handled in the extractors.
|
|
|
|
|
* No need to discard they here. */
|
2020-07-17 13:47:10 +02:00
|
|
|
static void mesh_batch_cache_discard_surface_batches(MeshBatchCache *cache)
|
|
|
|
|
{
|
|
|
|
|
GPU_BATCH_DISCARD_SAFE(cache->batch.surface);
|
2020-10-07 13:55:07 +02:00
|
|
|
for (int i = 0; i < cache->mat_len; i++) {
|
|
|
|
|
GPU_BATCH_DISCARD_SAFE(cache->surface_per_mat[i]);
|
2019-10-09 18:11:10 +02:00
|
|
|
}
|
2020-07-17 13:47:10 +02:00
|
|
|
cache->batch_ready &= ~MBC_SURFACE;
|
2019-10-09 18:11:10 +02:00
|
|
|
}
|
|
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
static void mesh_batch_cache_discard_shaded_tri(MeshBatchCache *cache)
|
2019-07-01 12:43:07 +02:00
|
|
|
{
|
2021-08-23 13:28:55 -03:00
|
|
|
FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.uv);
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.tan);
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.vcol);
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.orco);
|
2019-07-01 12:43:07 +02:00
|
|
|
}
|
2021-08-23 09:52:13 -03:00
|
|
|
DRWBatchFlag batch_map = BATCH_MAP(vbo.uv, vbo.tan, vbo.vcol, vbo.orco);
|
2021-06-14 08:00:42 -03:00
|
|
|
mesh_batch_cache_discard_batch(cache, batch_map);
|
2019-10-09 18:11:10 +02:00
|
|
|
mesh_cd_layers_type_clear(&cache->cd_used);
|
2018-12-18 21:45:46 +01:00
|
|
|
}
|
|
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
static void mesh_batch_cache_discard_uvedit(MeshBatchCache *cache)
|
2017-04-25 18:46:59 +02:00
|
|
|
{
|
2021-08-23 13:28:55 -03:00
|
|
|
FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edituv_stretch_angle);
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edituv_stretch_area);
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.uv);
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edituv_data);
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_uv);
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_edituv_data);
|
|
|
|
|
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_tris);
|
|
|
|
|
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_lines);
|
|
|
|
|
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_points);
|
|
|
|
|
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_fdots);
|
2017-04-25 18:46:59 +02:00
|
|
|
}
|
2021-08-23 09:52:13 -03:00
|
|
|
DRWBatchFlag batch_map = BATCH_MAP(vbo.edituv_stretch_angle,
|
|
|
|
|
vbo.edituv_stretch_area,
|
|
|
|
|
vbo.uv,
|
|
|
|
|
vbo.edituv_data,
|
|
|
|
|
vbo.fdots_uv,
|
|
|
|
|
vbo.fdots_edituv_data,
|
|
|
|
|
ibo.edituv_tris,
|
|
|
|
|
ibo.edituv_lines,
|
|
|
|
|
ibo.edituv_points,
|
|
|
|
|
ibo.edituv_fdots);
|
2021-06-14 08:00:42 -03:00
|
|
|
mesh_batch_cache_discard_batch(cache, batch_map);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-09-03 13:42:11 +02:00
|
|
|
cache->tot_area = 0.0f;
|
|
|
|
|
cache->tot_uv_area = 0.0f;
|
|
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
cache->batch_ready &= ~MBC_EDITUV;
|
2019-10-07 19:20:52 +02:00
|
|
|
|
2019-10-09 18:11:10 +02:00
|
|
|
/* We discarded the vbo.uv so we need to reset the cd_used flag. */
|
|
|
|
|
cache->cd_used.uv = 0;
|
|
|
|
|
cache->cd_used.edit_uv = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mesh_batch_cache_discard_uvedit_select(MeshBatchCache *cache)
|
|
|
|
|
{
|
2021-08-23 13:28:55 -03:00
|
|
|
FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edituv_data);
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_edituv_data);
|
|
|
|
|
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_tris);
|
|
|
|
|
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_lines);
|
|
|
|
|
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_points);
|
|
|
|
|
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_fdots);
|
2019-10-09 18:11:10 +02:00
|
|
|
}
|
2021-08-23 09:52:13 -03:00
|
|
|
DRWBatchFlag batch_map = BATCH_MAP(vbo.edituv_data,
|
|
|
|
|
vbo.fdots_edituv_data,
|
|
|
|
|
ibo.edituv_tris,
|
|
|
|
|
ibo.edituv_lines,
|
|
|
|
|
ibo.edituv_points,
|
|
|
|
|
ibo.edituv_fdots);
|
2021-06-14 08:00:42 -03:00
|
|
|
mesh_batch_cache_discard_batch(cache, batch_map);
|
2017-04-25 18:46:59 +02:00
|
|
|
}
|
|
|
|
|
|
2020-10-09 07:27:18 +02:00
|
|
|
void DRW_mesh_batch_cache_dirty_tag(Mesh *me, eMeshBatchDirtyMode mode)
|
2018-12-22 23:57:12 +01:00
|
|
|
{
|
2022-06-05 12:04:42 +02:00
|
|
|
MeshBatchCache *cache = static_cast<MeshBatchCache *>(me->runtime.batch_cache);
|
2022-06-10 10:29:35 +02:00
|
|
|
if (cache == nullptr) {
|
2019-07-14 16:49:44 +02:00
|
|
|
return;
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2021-06-14 08:00:42 -03:00
|
|
|
DRWBatchFlag batch_map;
|
2019-07-14 16:49:44 +02:00
|
|
|
switch (mode) {
|
|
|
|
|
case BKE_MESH_BATCH_DIRTY_SELECT:
|
2021-08-23 13:28:55 -03:00
|
|
|
FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edit_data);
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_nor);
|
2018-12-22 23:57:12 +01:00
|
|
|
}
|
2021-08-23 09:52:13 -03:00
|
|
|
batch_map = BATCH_MAP(vbo.edit_data, vbo.fdots_nor);
|
2021-06-14 08:00:42 -03:00
|
|
|
mesh_batch_cache_discard_batch(cache, batch_map);
|
|
|
|
|
|
2019-10-09 18:11:10 +02:00
|
|
|
/* Because visible UVs depends on edit mode selection, discard topology. */
|
|
|
|
|
mesh_batch_cache_discard_uvedit_select(cache);
|
2019-07-14 16:49:44 +02:00
|
|
|
break;
|
|
|
|
|
case BKE_MESH_BATCH_DIRTY_SELECT_PAINT:
|
2020-04-03 16:59:34 +11:00
|
|
|
/* Paint mode selection flag is packed inside the nor attribute.
|
2019-07-14 16:49:44 +02:00
|
|
|
* Note that it can be slow if auto smooth is enabled. (see T63946) */
|
2021-08-23 13:28:55 -03:00
|
|
|
FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
|
|
|
|
|
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.lines_paint_mask);
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.pos_nor);
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.lnor);
|
2018-12-22 23:57:12 +01:00
|
|
|
}
|
2021-08-23 09:52:13 -03:00
|
|
|
batch_map = BATCH_MAP(ibo.lines_paint_mask, vbo.pos_nor, vbo.lnor);
|
2021-06-14 08:00:42 -03:00
|
|
|
mesh_batch_cache_discard_batch(cache, batch_map);
|
2019-07-14 16:49:44 +02:00
|
|
|
break;
|
|
|
|
|
case BKE_MESH_BATCH_DIRTY_ALL:
|
|
|
|
|
cache->is_dirty = true;
|
|
|
|
|
break;
|
|
|
|
|
case BKE_MESH_BATCH_DIRTY_SHADING:
|
|
|
|
|
mesh_batch_cache_discard_shaded_tri(cache);
|
|
|
|
|
mesh_batch_cache_discard_uvedit(cache);
|
|
|
|
|
break;
|
|
|
|
|
case BKE_MESH_BATCH_DIRTY_UVEDIT_ALL:
|
|
|
|
|
mesh_batch_cache_discard_uvedit(cache);
|
|
|
|
|
break;
|
|
|
|
|
case BKE_MESH_BATCH_DIRTY_UVEDIT_SELECT:
|
2021-08-23 13:28:55 -03:00
|
|
|
FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edituv_data);
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_edituv_data);
|
2018-12-22 23:57:12 +01:00
|
|
|
}
|
2021-08-23 09:52:13 -03:00
|
|
|
batch_map = BATCH_MAP(vbo.edituv_data, vbo.fdots_edituv_data);
|
2021-06-14 08:00:42 -03:00
|
|
|
mesh_batch_cache_discard_batch(cache, batch_map);
|
2019-07-14 16:49:44 +02:00
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
BLI_assert(0);
|
2018-12-22 23:57:12 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-08-23 12:33:12 -03:00
|
|
|
static void mesh_buffer_list_clear(MeshBufferList *mbuflist)
|
2021-05-31 09:32:37 +02:00
|
|
|
{
|
2021-08-23 12:33:12 -03:00
|
|
|
GPUVertBuf **vbos = (GPUVertBuf **)&mbuflist->vbo;
|
|
|
|
|
GPUIndexBuf **ibos = (GPUIndexBuf **)&mbuflist->ibo;
|
|
|
|
|
for (int i = 0; i < sizeof(mbuflist->vbo) / sizeof(void *); i++) {
|
2021-05-31 09:32:37 +02:00
|
|
|
GPU_VERTBUF_DISCARD_SAFE(vbos[i]);
|
|
|
|
|
}
|
2021-08-23 12:33:12 -03:00
|
|
|
for (int i = 0; i < sizeof(mbuflist->ibo) / sizeof(void *); i++) {
|
2021-05-31 09:32:37 +02:00
|
|
|
GPU_INDEXBUF_DISCARD_SAFE(ibos[i]);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-08-23 12:35:43 -03:00
|
|
|
static void mesh_buffer_cache_clear(MeshBufferCache *mbc)
|
2021-05-31 09:32:37 +02:00
|
|
|
{
|
2021-08-23 13:28:55 -03:00
|
|
|
mesh_buffer_list_clear(&mbc->buff);
|
|
|
|
|
|
2021-08-23 12:35:43 -03:00
|
|
|
MEM_SAFE_FREE(mbc->loose_geom.verts);
|
|
|
|
|
MEM_SAFE_FREE(mbc->loose_geom.edges);
|
|
|
|
|
mbc->loose_geom.edge_len = 0;
|
|
|
|
|
mbc->loose_geom.vert_len = 0;
|
2021-06-15 15:31:17 +02:00
|
|
|
|
2021-08-23 12:35:43 -03:00
|
|
|
MEM_SAFE_FREE(mbc->poly_sorted.tri_first_index);
|
|
|
|
|
MEM_SAFE_FREE(mbc->poly_sorted.mat_tri_len);
|
|
|
|
|
mbc->poly_sorted.visible_tri_len = 0;
|
2021-05-31 09:32:37 +02:00
|
|
|
}
|
|
|
|
|
|
OpenSubDiv: add support for an OpenGL evaluator
This evaluator is used in order to evaluate subdivision at render time, allowing for
faster renders of meshes with a subdivision surface modifier placed at the last
position in the modifier list.
When evaluating the subsurf modifier, we detect whether we can delegate evaluation
to the draw code. If so, the subdivision is first evaluated on the GPU using our own
custom evaluator (only the coarse data needs to be initially sent to the GPU), then,
buffers for the final `MeshBufferCache` are filled on the GPU using a set of
compute shaders. However, some buffers are still filled on the CPU side, if doing so
on the GPU is impractical (e.g. the line adjacency buffer used for x-ray, whose
logic is hardly GPU compatible).
This is done at the mesh buffer extraction level so that the result can be readily used
in the various OpenGL engines, without having to write custom geometry or tesselation
shaders.
We use our own subdivision evaluation shaders, instead of OpenSubDiv's vanilla one, in
order to control the data layout, and interpolation. For example, we store vertex colors
as compressed 16-bit integers, while OpenSubDiv's default evaluator only work for float
types.
In order to still access the modified geometry on the CPU side, for use in modifiers
or transform operators, a dedicated wrapper type is added `MESH_WRAPPER_TYPE_SUBD`.
Subdivision will be lazily evaluated via `BKE_object_get_evaluated_mesh` which will
create such a wrapper if possible. If the final subdivision surface is not needed on
the CPU side, `BKE_object_get_evaluated_mesh_no_subsurf` should be used.
Enabling or disabling GPU subdivision can be done through the user preferences (under
Viewport -> Subdivision).
See patch description for benchmarks.
Reviewed By: campbellbarton, jbakker, fclem, brecht, #eevee_viewport
Differential Revision: https://developer.blender.org/D12406
2021-12-27 16:34:47 +01:00
|
|
|
static void mesh_batch_cache_free_subdiv_cache(MeshBatchCache *cache)
|
|
|
|
|
{
|
|
|
|
|
if (cache->subdiv_cache) {
|
|
|
|
|
draw_subdiv_cache_free(cache->subdiv_cache);
|
|
|
|
|
MEM_freeN(cache->subdiv_cache);
|
2022-06-10 10:29:35 +02:00
|
|
|
cache->subdiv_cache = nullptr;
|
OpenSubDiv: add support for an OpenGL evaluator
This evaluator is used in order to evaluate subdivision at render time, allowing for
faster renders of meshes with a subdivision surface modifier placed at the last
position in the modifier list.
When evaluating the subsurf modifier, we detect whether we can delegate evaluation
to the draw code. If so, the subdivision is first evaluated on the GPU using our own
custom evaluator (only the coarse data needs to be initially sent to the GPU), then,
buffers for the final `MeshBufferCache` are filled on the GPU using a set of
compute shaders. However, some buffers are still filled on the CPU side, if doing so
on the GPU is impractical (e.g. the line adjacency buffer used for x-ray, whose
logic is hardly GPU compatible).
This is done at the mesh buffer extraction level so that the result can be readily used
in the various OpenGL engines, without having to write custom geometry or tesselation
shaders.
We use our own subdivision evaluation shaders, instead of OpenSubDiv's vanilla one, in
order to control the data layout, and interpolation. For example, we store vertex colors
as compressed 16-bit integers, while OpenSubDiv's default evaluator only work for float
types.
In order to still access the modified geometry on the CPU side, for use in modifiers
or transform operators, a dedicated wrapper type is added `MESH_WRAPPER_TYPE_SUBD`.
Subdivision will be lazily evaluated via `BKE_object_get_evaluated_mesh` which will
create such a wrapper if possible. If the final subdivision surface is not needed on
the CPU side, `BKE_object_get_evaluated_mesh_no_subsurf` should be used.
Enabling or disabling GPU subdivision can be done through the user preferences (under
Viewport -> Subdivision).
See patch description for benchmarks.
Reviewed By: campbellbarton, jbakker, fclem, brecht, #eevee_viewport
Differential Revision: https://developer.blender.org/D12406
2021-12-27 16:34:47 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
static void mesh_batch_cache_clear(Mesh *me)
|
2018-12-22 23:57:12 +01:00
|
|
|
{
|
2022-06-05 12:04:42 +02:00
|
|
|
MeshBatchCache *cache = static_cast<MeshBatchCache *>(me->runtime.batch_cache);
|
2019-07-14 16:49:44 +02:00
|
|
|
if (!cache) {
|
|
|
|
|
return;
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2021-08-23 13:28:55 -03:00
|
|
|
FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
|
|
|
|
|
mesh_buffer_cache_clear(mbc);
|
2020-11-03 10:43:51 +01:00
|
|
|
}
|
2020-10-19 08:08:49 +02:00
|
|
|
|
2020-11-03 10:43:51 +01:00
|
|
|
for (int i = 0; i < cache->mat_len; i++) {
|
2021-08-23 09:52:13 -03:00
|
|
|
GPU_INDEXBUF_DISCARD_SAFE(cache->tris_per_mat[i]);
|
2018-12-22 23:57:12 +01:00
|
|
|
}
|
2021-08-23 09:52:13 -03:00
|
|
|
MEM_SAFE_FREE(cache->tris_per_mat);
|
2020-11-03 10:43:51 +01:00
|
|
|
|
2019-09-08 00:12:26 +10:00
|
|
|
for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); i++) {
|
2019-07-14 16:49:44 +02:00
|
|
|
GPUBatch **batch = (GPUBatch **)&cache->batch;
|
|
|
|
|
GPU_BATCH_DISCARD_SAFE(batch[i]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
mesh_batch_cache_discard_shaded_tri(cache);
|
|
|
|
|
mesh_batch_cache_discard_uvedit(cache);
|
2020-10-07 13:55:07 +02:00
|
|
|
MEM_SAFE_FREE(cache->surface_per_mat);
|
|
|
|
|
cache->mat_len = 0;
|
2019-07-14 16:49:44 +02:00
|
|
|
|
2022-06-05 12:04:42 +02:00
|
|
|
cache->batch_ready = (DRWBatchFlag)0;
|
2019-07-14 16:49:44 +02:00
|
|
|
drw_mesh_weight_state_clear(&cache->weight_state);
|
OpenSubDiv: add support for an OpenGL evaluator
This evaluator is used in order to evaluate subdivision at render time, allowing for
faster renders of meshes with a subdivision surface modifier placed at the last
position in the modifier list.
When evaluating the subsurf modifier, we detect whether we can delegate evaluation
to the draw code. If so, the subdivision is first evaluated on the GPU using our own
custom evaluator (only the coarse data needs to be initially sent to the GPU), then,
buffers for the final `MeshBufferCache` are filled on the GPU using a set of
compute shaders. However, some buffers are still filled on the CPU side, if doing so
on the GPU is impractical (e.g. the line adjacency buffer used for x-ray, whose
logic is hardly GPU compatible).
This is done at the mesh buffer extraction level so that the result can be readily used
in the various OpenGL engines, without having to write custom geometry or tesselation
shaders.
We use our own subdivision evaluation shaders, instead of OpenSubDiv's vanilla one, in
order to control the data layout, and interpolation. For example, we store vertex colors
as compressed 16-bit integers, while OpenSubDiv's default evaluator only work for float
types.
In order to still access the modified geometry on the CPU side, for use in modifiers
or transform operators, a dedicated wrapper type is added `MESH_WRAPPER_TYPE_SUBD`.
Subdivision will be lazily evaluated via `BKE_object_get_evaluated_mesh` which will
create such a wrapper if possible. If the final subdivision surface is not needed on
the CPU side, `BKE_object_get_evaluated_mesh_no_subsurf` should be used.
Enabling or disabling GPU subdivision can be done through the user preferences (under
Viewport -> Subdivision).
See patch description for benchmarks.
Reviewed By: campbellbarton, jbakker, fclem, brecht, #eevee_viewport
Differential Revision: https://developer.blender.org/D12406
2021-12-27 16:34:47 +01:00
|
|
|
|
|
|
|
|
mesh_batch_cache_free_subdiv_cache(cache);
|
2019-07-14 16:49:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void DRW_mesh_batch_cache_free(Mesh *me)
|
|
|
|
|
{
|
|
|
|
|
mesh_batch_cache_clear(me);
|
|
|
|
|
MEM_SAFE_FREE(me->runtime.batch_cache);
|
2018-12-22 23:57:12 +01:00
|
|
|
}
|
|
|
|
|
|
2017-05-05 05:07:52 +10:00
|
|
|
/** \} */
|
|
|
|
|
|
|
|
|
|
/* ---------------------------------------------------------------------- */
|
|
|
|
|
/** \name Public API
|
|
|
|
|
* \{ */
|
|
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
static void texpaint_request_active_uv(MeshBatchCache *cache, Object *object, Mesh *me)
|
2018-12-18 21:03:47 +01:00
|
|
|
{
|
2019-04-06 01:55:21 +02:00
|
|
|
DRW_MeshCDMask cd_needed;
|
|
|
|
|
mesh_cd_layers_type_clear(&cd_needed);
|
2022-01-11 15:42:07 +01:00
|
|
|
mesh_cd_calc_active_uv_layer(object, me, &cd_needed);
|
2019-04-06 01:55:21 +02:00
|
|
|
|
|
|
|
|
BLI_assert(cd_needed.uv != 0 &&
|
|
|
|
|
"No uv layer available in texpaint, but batches requested anyway!");
|
|
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
mesh_cd_calc_active_mask_uv_layer(object, me, &cd_needed);
|
2019-04-06 01:55:21 +02:00
|
|
|
mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
|
2018-12-18 21:03:47 +01:00
|
|
|
}
|
|
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
static void texpaint_request_active_vcol(MeshBatchCache *cache, Object *object, Mesh *me)
|
2018-12-18 21:03:47 +01:00
|
|
|
{
|
2019-04-06 01:55:21 +02:00
|
|
|
DRW_MeshCDMask cd_needed;
|
|
|
|
|
mesh_cd_layers_type_clear(&cd_needed);
|
2022-01-11 15:42:07 +01:00
|
|
|
mesh_cd_calc_active_mloopcol_layer(object, me, &cd_needed);
|
2019-04-06 01:55:21 +02:00
|
|
|
|
|
|
|
|
BLI_assert(cd_needed.vcol != 0 &&
|
Sculpt Vertex Colors: Initial implementation
Sculpt Vertex Colors is a painting system that runs inside sculpt mode, reusing all its tools and optimizations. This provides much better performance, easier to maintain code and more advanced features (new brush engine, filters, symmetry options, masks and face sets compatibility...). This is also the initial step for future features like vertex painting in Multires and brushes that can sculpt and paint at the same time.
This commit includes:
- SCULPT_UNDO_COLOR for undo support in sculpt mode
- SCULPT_UPDATE_COLOR and PBVH flags and rendering
- Sculpt Color API functions
- Sculpt capability for sculpt tools (only enabled in the Paint Brush for now)
- Rendering support in workbench (default to Sculpt Vertex Colors except in Vertex Paint)
- Conversion operator between MPropCol (Sculpt Vertex Colors) and MLoopCol (Vertex Paint)
- Remesher reprojection in the Voxel Remehser
- Paint Brush and Smear Brush with color smoothing in alt-smooth mode
- Parameters for the new brush engine (density, opacity, flow, wet paint mixing, tip scale) implemented in Sculpt Vertex Colors
- Color Filter
- Color picker (uses S shortcut, replaces smooth)
- Color selector in the top bar
Reviewed By: brecht
Maniphest Tasks: T72866
Differential Revision: https://developer.blender.org/D5975
2020-06-22 20:05:28 +02:00
|
|
|
"No MLOOPCOL layer available in vertpaint, but batches requested anyway!");
|
|
|
|
|
|
|
|
|
|
mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
static void sculpt_request_active_vcol(MeshBatchCache *cache, Object *object, Mesh *me)
|
Sculpt Vertex Colors: Initial implementation
Sculpt Vertex Colors is a painting system that runs inside sculpt mode, reusing all its tools and optimizations. This provides much better performance, easier to maintain code and more advanced features (new brush engine, filters, symmetry options, masks and face sets compatibility...). This is also the initial step for future features like vertex painting in Multires and brushes that can sculpt and paint at the same time.
This commit includes:
- SCULPT_UNDO_COLOR for undo support in sculpt mode
- SCULPT_UPDATE_COLOR and PBVH flags and rendering
- Sculpt Color API functions
- Sculpt capability for sculpt tools (only enabled in the Paint Brush for now)
- Rendering support in workbench (default to Sculpt Vertex Colors except in Vertex Paint)
- Conversion operator between MPropCol (Sculpt Vertex Colors) and MLoopCol (Vertex Paint)
- Remesher reprojection in the Voxel Remehser
- Paint Brush and Smear Brush with color smoothing in alt-smooth mode
- Parameters for the new brush engine (density, opacity, flow, wet paint mixing, tip scale) implemented in Sculpt Vertex Colors
- Color Filter
- Color picker (uses S shortcut, replaces smooth)
- Color selector in the top bar
Reviewed By: brecht
Maniphest Tasks: T72866
Differential Revision: https://developer.blender.org/D5975
2020-06-22 20:05:28 +02:00
|
|
|
{
|
2022-04-05 11:42:55 -07:00
|
|
|
const Mesh *me_final = editmesh_final_or_this(object, me);
|
|
|
|
|
const CustomData *cd_vdata = mesh_cd_vdata_get_from_mesh(me_final);
|
|
|
|
|
const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
|
|
|
|
|
|
2022-06-05 12:04:42 +02:00
|
|
|
Mesh me_query = blender::dna::shallow_zero_initialize();
|
2022-06-10 10:29:35 +02:00
|
|
|
BKE_id_attribute_copy_domains_temp(
|
|
|
|
|
ID_ME, cd_vdata, nullptr, cd_ldata, nullptr, nullptr, &me_query.id);
|
2022-04-05 11:42:55 -07:00
|
|
|
|
2022-06-07 18:55:56 +02:00
|
|
|
const CustomDataLayer *active = BKE_id_attributes_active_color_get(&me_query.id);
|
|
|
|
|
const CustomDataLayer *render = BKE_id_attributes_render_color_get(&me_query.id);
|
Sculpt Vertex Colors: Initial implementation
Sculpt Vertex Colors is a painting system that runs inside sculpt mode, reusing all its tools and optimizations. This provides much better performance, easier to maintain code and more advanced features (new brush engine, filters, symmetry options, masks and face sets compatibility...). This is also the initial step for future features like vertex painting in Multires and brushes that can sculpt and paint at the same time.
This commit includes:
- SCULPT_UNDO_COLOR for undo support in sculpt mode
- SCULPT_UPDATE_COLOR and PBVH flags and rendering
- Sculpt Color API functions
- Sculpt capability for sculpt tools (only enabled in the Paint Brush for now)
- Rendering support in workbench (default to Sculpt Vertex Colors except in Vertex Paint)
- Conversion operator between MPropCol (Sculpt Vertex Colors) and MLoopCol (Vertex Paint)
- Remesher reprojection in the Voxel Remehser
- Paint Brush and Smear Brush with color smoothing in alt-smooth mode
- Parameters for the new brush engine (density, opacity, flow, wet paint mixing, tip scale) implemented in Sculpt Vertex Colors
- Color Filter
- Color picker (uses S shortcut, replaces smooth)
- Color selector in the top bar
Reviewed By: brecht
Maniphest Tasks: T72866
Differential Revision: https://developer.blender.org/D5975
2020-06-22 20:05:28 +02:00
|
|
|
|
2022-04-05 11:42:55 -07:00
|
|
|
int active_i = BKE_id_attribute_to_index(
|
|
|
|
|
&me_query.id, active, ATTR_DOMAIN_MASK_COLOR, CD_MASK_COLOR_ALL);
|
|
|
|
|
int render_i = BKE_id_attribute_to_index(
|
|
|
|
|
&me_query.id, render, ATTR_DOMAIN_MASK_COLOR, CD_MASK_COLOR_ALL);
|
2019-04-06 01:55:21 +02:00
|
|
|
|
2022-04-05 11:42:55 -07:00
|
|
|
if (active_i >= 0) {
|
2022-05-11 12:44:04 +02:00
|
|
|
cache->cd_needed.vcol |= 1UL << (uint)active_i;
|
2022-04-05 11:42:55 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (render_i >= 0) {
|
2022-05-11 12:44:04 +02:00
|
|
|
cache->cd_needed.vcol |= 1UL << (uint)render_i;
|
2022-04-05 11:42:55 -07:00
|
|
|
}
|
2018-12-18 21:03:47 +01:00
|
|
|
}
|
|
|
|
|
|
2018-12-17 21:24:43 +01:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_all_verts(Mesh *me)
|
|
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2019-05-08 00:12:24 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_ALL_VERTS);
|
2018-12-17 21:24:43 +01:00
|
|
|
return DRW_batch_request(&cache->batch.all_verts);
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-18 00:12:21 +02:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_all_edges(Mesh *me)
|
2017-01-25 09:16:29 +01:00
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2019-05-08 00:12:24 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_ALL_EDGES);
|
2018-12-18 16:34:32 +01:00
|
|
|
return DRW_batch_request(&cache->batch.all_edges);
|
2017-01-25 09:16:29 +01:00
|
|
|
}
|
|
|
|
|
|
2018-12-18 17:10:38 +01:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_surface(Mesh *me)
|
2017-02-15 13:32:35 +01:00
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2020-07-17 13:47:10 +02:00
|
|
|
mesh_batch_cache_request_surface_batches(cache);
|
2022-04-05 11:42:55 -07:00
|
|
|
|
2020-07-17 13:47:10 +02:00
|
|
|
return cache->batch.surface;
|
2017-03-09 01:29:58 +01:00
|
|
|
}
|
2017-02-16 16:19:48 +01:00
|
|
|
|
2018-12-18 17:10:38 +01:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_loose_edges(Mesh *me)
|
2018-06-01 11:35:51 +02:00
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2019-05-08 00:12:24 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_LOOSE_EDGES);
|
2019-06-18 22:25:37 +02:00
|
|
|
if (cache->no_loose_wire) {
|
2022-06-10 10:29:35 +02:00
|
|
|
return nullptr;
|
2019-06-18 22:25:37 +02:00
|
|
|
}
|
2020-08-07 11:49:59 +02:00
|
|
|
|
|
|
|
|
return DRW_batch_request(&cache->batch.loose_edges);
|
2018-06-01 11:35:51 +02:00
|
|
|
}
|
|
|
|
|
|
2018-12-18 17:10:38 +01:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_surface_weights(Mesh *me)
|
2017-05-03 18:55:40 +02:00
|
|
|
{
|
2018-09-23 20:41:10 +03:00
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2019-05-10 15:03:18 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_SURFACE_WEIGHTS);
|
2018-12-17 23:00:05 +01:00
|
|
|
return DRW_batch_request(&cache->batch.surface_weights);
|
2017-05-03 18:55:40 +02:00
|
|
|
}
|
|
|
|
|
|
2018-07-18 00:12:21 +02:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_edge_detection(Mesh *me, bool *r_is_manifold)
|
2018-05-19 13:31:44 +02:00
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2019-05-08 00:12:24 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_EDGE_DETECTION);
|
2018-12-18 21:03:47 +01:00
|
|
|
/* Even if is_manifold is not correct (not updated),
|
|
|
|
|
* the default (not manifold) is just the worst case. */
|
2018-05-21 13:21:26 +02:00
|
|
|
if (r_is_manifold) {
|
|
|
|
|
*r_is_manifold = cache->is_manifold;
|
|
|
|
|
}
|
2018-12-18 19:56:55 +01:00
|
|
|
return DRW_batch_request(&cache->batch.edge_detection);
|
2018-05-19 13:31:44 +02:00
|
|
|
}
|
|
|
|
|
|
2018-12-07 05:03:01 +01:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_wireframes_face(Mesh *me)
|
2018-05-31 18:43:19 +02:00
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2019-05-08 00:12:24 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_WIRE_EDGES);
|
2019-02-12 23:22:36 +01:00
|
|
|
return DRW_batch_request(&cache->batch.wire_edges);
|
2018-05-31 18:43:19 +02:00
|
|
|
}
|
|
|
|
|
|
2019-04-18 08:00:59 +02:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_edit_mesh_analysis(Mesh *me)
|
|
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2019-05-08 00:12:24 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_EDIT_MESH_ANALYSIS);
|
2019-04-18 08:00:59 +02:00
|
|
|
return DRW_batch_request(&cache->batch.edit_mesh_analysis);
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(Object *object,
|
|
|
|
|
Mesh *me,
|
2018-12-18 21:03:47 +01:00
|
|
|
struct GPUMaterial **gpumat_array,
|
2020-01-17 16:05:19 +01:00
|
|
|
uint gpumat_array_len)
|
2018-12-18 21:03:47 +01:00
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
EEVEE: support Curves attributes rendering
This adds support to render Curves attributes in EEVEE.
Each attribute is stored in a texture derived from a VBO. As the
shading group needs the textures to be valid upon creation, the
attributes are created and setup during its very creation, instead
of doing it lazily via create_requested which we cannot rely on
anyway as contrary to the mesh batch, we do cannot really tell if
attributes need to be updated or else via some `DRW_batch_requested`.
Since point attributes need refinement, and since attributes are all
cast to vec4/float4 to account for differences in type conversions
between Blender and OpenGL, the refinement shader for points is
used as is. The point attributes are stored for each subdivision level
in CurvesEvalFinalCache. Each subdivision level also keeps track of the
attributes already in use so they are properly updated when needed.
Some basic garbage collection was added similar to what is done
for meshes: if the attributes used over time have been different
from the currently used attributes for too long, then the buffers
are freed, ensuring that stale attributesare removed.
This adds `CurvesInfos` to the shader creation info, which stores
the scope in which the attributes are defined. Scopes are stored
as booleans, in an array indexed by attribute loading order which
is also the order in which the attributes were added to the material.
A mapping is necessary between the indices used for the scoping, and
the ones used in the Curves cache, as this may contain stale
attributes which have not been garbage collected yet.
Common utilities with the mesh code for handling requested
attributes were moved to a separate file.
Differential Revision: https://developer.blender.org/D14916
2022-05-24 05:02:57 +02:00
|
|
|
DRW_Attributes attrs_needed;
|
|
|
|
|
drw_attributes_clear(&attrs_needed);
|
2021-10-26 18:16:33 -03:00
|
|
|
DRW_MeshCDMask cd_needed = mesh_cd_calc_used_gpu_layers(
|
2022-01-11 15:42:07 +01:00
|
|
|
object, me, gpumat_array, gpumat_array_len, &attrs_needed);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-12-18 21:03:47 +01:00
|
|
|
BLI_assert(gpumat_array_len == cache->mat_len);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-04-18 17:25:04 +02:00
|
|
|
mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
|
2021-10-26 18:16:33 -03:00
|
|
|
ThreadMutex *mesh_render_mutex = (ThreadMutex *)me->runtime.render_mutex;
|
EEVEE: support Curves attributes rendering
This adds support to render Curves attributes in EEVEE.
Each attribute is stored in a texture derived from a VBO. As the
shading group needs the textures to be valid upon creation, the
attributes are created and setup during its very creation, instead
of doing it lazily via create_requested which we cannot rely on
anyway as contrary to the mesh batch, we do cannot really tell if
attributes need to be updated or else via some `DRW_batch_requested`.
Since point attributes need refinement, and since attributes are all
cast to vec4/float4 to account for differences in type conversions
between Blender and OpenGL, the refinement shader for points is
used as is. The point attributes are stored for each subdivision level
in CurvesEvalFinalCache. Each subdivision level also keeps track of the
attributes already in use so they are properly updated when needed.
Some basic garbage collection was added similar to what is done
for meshes: if the attributes used over time have been different
from the currently used attributes for too long, then the buffers
are freed, ensuring that stale attributesare removed.
This adds `CurvesInfos` to the shader creation info, which stores
the scope in which the attributes are defined. Scopes are stored
as booleans, in an array indexed by attribute loading order which
is also the order in which the attributes were added to the material.
A mapping is necessary between the indices used for the scoping, and
the ones used in the Curves cache, as this may contain stale
attributes which have not been garbage collected yet.
Common utilities with the mesh code for handling requested
attributes were moved to a separate file.
Differential Revision: https://developer.blender.org/D14916
2022-05-24 05:02:57 +02:00
|
|
|
drw_attributes_merge(&cache->attr_needed, &attrs_needed, mesh_render_mutex);
|
2020-07-17 13:47:10 +02:00
|
|
|
mesh_batch_cache_request_surface_batches(cache);
|
2019-07-14 16:49:44 +02:00
|
|
|
return cache->surface_per_mat;
|
2018-12-18 21:03:47 +01:00
|
|
|
}
|
|
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
GPUBatch **DRW_mesh_batch_cache_get_surface_texpaint(Object *object, Mesh *me)
|
2018-12-18 21:03:47 +01:00
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2022-01-11 15:42:07 +01:00
|
|
|
texpaint_request_active_uv(cache, object, me);
|
2020-07-17 13:47:10 +02:00
|
|
|
mesh_batch_cache_request_surface_batches(cache);
|
2019-07-14 16:49:44 +02:00
|
|
|
return cache->surface_per_mat;
|
2018-12-18 21:03:47 +01:00
|
|
|
}
|
|
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_surface_texpaint_single(Object *object, Mesh *me)
|
2018-12-18 21:03:47 +01:00
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2022-01-11 15:42:07 +01:00
|
|
|
texpaint_request_active_uv(cache, object, me);
|
2020-07-17 13:47:10 +02:00
|
|
|
mesh_batch_cache_request_surface_batches(cache);
|
|
|
|
|
return cache->batch.surface;
|
2018-12-18 21:03:47 +01:00
|
|
|
}
|
|
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_surface_vertpaint(Object *object, Mesh *me)
|
2018-12-18 21:03:47 +01:00
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2022-01-11 15:42:07 +01:00
|
|
|
texpaint_request_active_vcol(cache, object, me);
|
2020-07-17 13:47:10 +02:00
|
|
|
mesh_batch_cache_request_surface_batches(cache);
|
|
|
|
|
return cache->batch.surface;
|
2018-12-18 21:03:47 +01:00
|
|
|
}
|
|
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_surface_sculpt(Object *object, Mesh *me)
|
Sculpt Vertex Colors: Initial implementation
Sculpt Vertex Colors is a painting system that runs inside sculpt mode, reusing all its tools and optimizations. This provides much better performance, easier to maintain code and more advanced features (new brush engine, filters, symmetry options, masks and face sets compatibility...). This is also the initial step for future features like vertex painting in Multires and brushes that can sculpt and paint at the same time.
This commit includes:
- SCULPT_UNDO_COLOR for undo support in sculpt mode
- SCULPT_UPDATE_COLOR and PBVH flags and rendering
- Sculpt Color API functions
- Sculpt capability for sculpt tools (only enabled in the Paint Brush for now)
- Rendering support in workbench (default to Sculpt Vertex Colors except in Vertex Paint)
- Conversion operator between MPropCol (Sculpt Vertex Colors) and MLoopCol (Vertex Paint)
- Remesher reprojection in the Voxel Remehser
- Paint Brush and Smear Brush with color smoothing in alt-smooth mode
- Parameters for the new brush engine (density, opacity, flow, wet paint mixing, tip scale) implemented in Sculpt Vertex Colors
- Color Filter
- Color picker (uses S shortcut, replaces smooth)
- Color selector in the top bar
Reviewed By: brecht
Maniphest Tasks: T72866
Differential Revision: https://developer.blender.org/D5975
2020-06-22 20:05:28 +02:00
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2022-01-11 15:42:07 +01:00
|
|
|
sculpt_request_active_vcol(cache, object, me);
|
2020-07-17 13:47:10 +02:00
|
|
|
mesh_batch_cache_request_surface_batches(cache);
|
|
|
|
|
return cache->batch.surface;
|
Sculpt Vertex Colors: Initial implementation
Sculpt Vertex Colors is a painting system that runs inside sculpt mode, reusing all its tools and optimizations. This provides much better performance, easier to maintain code and more advanced features (new brush engine, filters, symmetry options, masks and face sets compatibility...). This is also the initial step for future features like vertex painting in Multires and brushes that can sculpt and paint at the same time.
This commit includes:
- SCULPT_UNDO_COLOR for undo support in sculpt mode
- SCULPT_UPDATE_COLOR and PBVH flags and rendering
- Sculpt Color API functions
- Sculpt capability for sculpt tools (only enabled in the Paint Brush for now)
- Rendering support in workbench (default to Sculpt Vertex Colors except in Vertex Paint)
- Conversion operator between MPropCol (Sculpt Vertex Colors) and MLoopCol (Vertex Paint)
- Remesher reprojection in the Voxel Remehser
- Paint Brush and Smear Brush with color smoothing in alt-smooth mode
- Parameters for the new brush engine (density, opacity, flow, wet paint mixing, tip scale) implemented in Sculpt Vertex Colors
- Color Filter
- Color picker (uses S shortcut, replaces smooth)
- Color selector in the top bar
Reviewed By: brecht
Maniphest Tasks: T72866
Differential Revision: https://developer.blender.org/D5975
2020-06-22 20:05:28 +02:00
|
|
|
}
|
|
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
int DRW_mesh_material_count_get(const Object *object, const Mesh *me)
|
2020-01-28 16:39:33 +01:00
|
|
|
{
|
2022-01-11 15:42:07 +01:00
|
|
|
return mesh_render_mat_len_get(object, me);
|
2020-01-28 16:39:33 +01:00
|
|
|
}
|
|
|
|
|
|
2020-09-18 19:30:02 +02:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_sculpt_overlays(Mesh *me)
|
|
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
|
|
|
|
|
|
|
|
|
cache->cd_needed.sculpt_overlays = 1;
|
|
|
|
|
mesh_batch_cache_add_request(cache, MBC_SCULPT_OVERLAYS);
|
|
|
|
|
DRW_batch_request(&cache->batch.sculpt_overlays);
|
|
|
|
|
|
|
|
|
|
return cache->batch.sculpt_overlays;
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-18 21:03:47 +01:00
|
|
|
/** \} */
|
|
|
|
|
|
2019-02-04 01:13:51 +01:00
|
|
|
/* ---------------------------------------------------------------------- */
|
|
|
|
|
/** \name Edit Mode API
|
|
|
|
|
* \{ */
|
2020-06-19 17:02:55 +02:00
|
|
|
|
|
|
|
|
GPUVertBuf *DRW_mesh_batch_cache_pos_vertbuf_get(Mesh *me)
|
|
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
|
|
|
|
/* Request surface to trigger the vbo filling. Otherwise it may do nothing. */
|
2020-07-17 13:47:10 +02:00
|
|
|
mesh_batch_cache_request_surface_batches(cache);
|
2020-06-19 17:02:55 +02:00
|
|
|
|
2022-06-10 10:29:35 +02:00
|
|
|
DRW_vbo_request(nullptr, &cache->final.buff.vbo.pos_nor);
|
2021-08-23 13:28:55 -03:00
|
|
|
return cache->final.buff.vbo.pos_nor;
|
2020-06-19 17:02:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/** \} */
|
|
|
|
|
|
|
|
|
|
/* ---------------------------------------------------------------------- */
|
|
|
|
|
/** \name Edit Mode API
|
|
|
|
|
* \{ */
|
2019-02-04 01:13:51 +01:00
|
|
|
|
|
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_edit_triangles(Mesh *me)
|
|
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2019-05-08 00:12:24 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_EDIT_TRIANGLES);
|
2019-02-04 01:13:51 +01:00
|
|
|
return DRW_batch_request(&cache->batch.edit_triangles);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_edit_edges(Mesh *me)
|
|
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2019-05-08 00:12:24 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_EDIT_EDGES);
|
2019-02-04 01:13:51 +01:00
|
|
|
return DRW_batch_request(&cache->batch.edit_edges);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_edit_vertices(Mesh *me)
|
|
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2019-05-08 00:12:24 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_EDIT_VERTICES);
|
2019-02-04 01:13:51 +01:00
|
|
|
return DRW_batch_request(&cache->batch.edit_vertices);
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_edit_vnors(Mesh *me)
|
|
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
|
|
|
|
mesh_batch_cache_add_request(cache, MBC_EDIT_VNOR);
|
|
|
|
|
return DRW_batch_request(&cache->batch.edit_vnor);
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-04 01:13:51 +01:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_edit_lnors(Mesh *me)
|
|
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2019-05-08 00:12:24 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_EDIT_LNOR);
|
2019-02-04 01:13:51 +01:00
|
|
|
return DRW_batch_request(&cache->batch.edit_lnor);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_edit_facedots(Mesh *me)
|
|
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2019-05-08 00:12:24 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_EDIT_FACEDOTS);
|
2019-07-14 16:49:44 +02:00
|
|
|
return DRW_batch_request(&cache->batch.edit_fdots);
|
2019-02-04 01:13:51 +01:00
|
|
|
}
|
|
|
|
|
|
2019-10-15 01:49:53 +02:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_edit_skin_roots(Mesh *me)
|
|
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
|
|
|
|
mesh_batch_cache_add_request(cache, MBC_SKIN_ROOTS);
|
|
|
|
|
return DRW_batch_request(&cache->batch.edit_skin_roots);
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-04 01:13:51 +01:00
|
|
|
/** \} */
|
|
|
|
|
|
2018-12-18 21:03:47 +01:00
|
|
|
/* ---------------------------------------------------------------------- */
|
|
|
|
|
/** \name Edit Mode selection API
|
|
|
|
|
* \{ */
|
|
|
|
|
|
2018-12-22 23:57:12 +01:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_triangles_with_select_id(Mesh *me)
|
2018-12-18 21:03:47 +01:00
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2019-05-08 00:12:24 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_EDIT_SELECTION_FACES);
|
2018-12-22 23:57:12 +01:00
|
|
|
return DRW_batch_request(&cache->batch.edit_selection_faces);
|
2018-12-18 21:03:47 +01:00
|
|
|
}
|
|
|
|
|
|
2018-12-22 23:57:12 +01:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_facedots_with_select_id(Mesh *me)
|
2017-08-17 01:38:07 +10:00
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2019-05-08 00:12:24 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_EDIT_SELECTION_FACEDOTS);
|
2019-07-14 16:49:44 +02:00
|
|
|
return DRW_batch_request(&cache->batch.edit_selection_fdots);
|
2017-08-17 01:38:07 +10:00
|
|
|
}
|
|
|
|
|
|
2018-12-22 23:57:12 +01:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_edges_with_select_id(Mesh *me)
|
2017-08-17 01:38:07 +10:00
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2019-05-08 00:12:24 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_EDIT_SELECTION_EDGES);
|
2018-12-22 23:57:12 +01:00
|
|
|
return DRW_batch_request(&cache->batch.edit_selection_edges);
|
2017-08-17 01:38:07 +10:00
|
|
|
}
|
|
|
|
|
|
2018-12-22 23:57:12 +01:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_verts_with_select_id(Mesh *me)
|
2017-08-17 01:38:07 +10:00
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2019-05-08 00:12:24 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_EDIT_SELECTION_VERTS);
|
2018-12-22 23:57:12 +01:00
|
|
|
return DRW_batch_request(&cache->batch.edit_selection_verts);
|
2017-03-02 01:07:03 +01:00
|
|
|
}
|
|
|
|
|
|
2018-12-18 21:03:47 +01:00
|
|
|
/** \} */
|
2017-05-22 23:31:46 +10:00
|
|
|
|
2018-12-18 21:03:47 +01:00
|
|
|
/* ---------------------------------------------------------------------- */
|
|
|
|
|
/** \name UV Image editor API
|
|
|
|
|
* \{ */
|
2018-12-18 02:18:55 +01:00
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
static void edituv_request_active_uv(MeshBatchCache *cache, Object *object, Mesh *me)
|
2019-10-08 18:35:57 +02:00
|
|
|
{
|
|
|
|
|
DRW_MeshCDMask cd_needed;
|
|
|
|
|
mesh_cd_layers_type_clear(&cd_needed);
|
2022-01-11 15:42:07 +01:00
|
|
|
mesh_cd_calc_active_uv_layer(object, me, &cd_needed);
|
2019-10-08 18:35:57 +02:00
|
|
|
mesh_cd_calc_edit_uv_layer(me, &cd_needed);
|
|
|
|
|
|
2019-10-10 20:16:14 +02:00
|
|
|
BLI_assert(cd_needed.edit_uv != 0 &&
|
2019-10-08 18:35:57 +02:00
|
|
|
"No uv layer available in edituv, but batches requested anyway!");
|
|
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
mesh_cd_calc_active_mask_uv_layer(object, me, &cd_needed);
|
2019-10-08 18:35:57 +02:00
|
|
|
mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_edituv_faces_stretch_area(Object *object,
|
|
|
|
|
Mesh *me,
|
2019-09-27 16:40:18 +02:00
|
|
|
float **tot_area,
|
|
|
|
|
float **tot_uv_area)
|
2019-01-09 22:56:27 +01:00
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2022-01-11 15:42:07 +01:00
|
|
|
edituv_request_active_uv(cache, object, me);
|
2019-09-03 13:42:11 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_EDITUV_FACES_STRETCH_AREA);
|
|
|
|
|
|
2022-06-10 10:29:35 +02:00
|
|
|
if (tot_area != nullptr) {
|
2019-09-27 16:40:18 +02:00
|
|
|
*tot_area = &cache->tot_area;
|
2019-09-03 13:42:11 +02:00
|
|
|
}
|
2022-06-10 10:29:35 +02:00
|
|
|
if (tot_uv_area != nullptr) {
|
2019-09-27 16:40:18 +02:00
|
|
|
*tot_uv_area = &cache->tot_uv_area;
|
2019-09-03 13:42:11 +02:00
|
|
|
}
|
|
|
|
|
return DRW_batch_request(&cache->batch.edituv_faces_stretch_area);
|
2019-01-09 22:56:27 +01:00
|
|
|
}
|
|
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_edituv_faces_stretch_angle(Object *object, Mesh *me)
|
2019-01-09 22:56:27 +01:00
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2022-01-11 15:42:07 +01:00
|
|
|
edituv_request_active_uv(cache, object, me);
|
2019-09-03 13:42:11 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_EDITUV_FACES_STRETCH_ANGLE);
|
|
|
|
|
return DRW_batch_request(&cache->batch.edituv_faces_stretch_angle);
|
2019-01-09 22:56:27 +01:00
|
|
|
}
|
|
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_edituv_faces(Object *object, Mesh *me)
|
2019-01-09 22:56:27 +01:00
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2022-01-11 15:42:07 +01:00
|
|
|
edituv_request_active_uv(cache, object, me);
|
2019-05-08 00:12:24 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_EDITUV_FACES);
|
2019-01-09 22:56:27 +01:00
|
|
|
return DRW_batch_request(&cache->batch.edituv_faces);
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_edituv_edges(Object *object, Mesh *me)
|
2019-01-09 22:56:27 +01:00
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2022-01-11 15:42:07 +01:00
|
|
|
edituv_request_active_uv(cache, object, me);
|
2019-05-08 00:12:24 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_EDITUV_EDGES);
|
2019-01-09 22:56:27 +01:00
|
|
|
return DRW_batch_request(&cache->batch.edituv_edges);
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_edituv_verts(Object *object, Mesh *me)
|
2019-01-09 22:56:27 +01:00
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2022-01-11 15:42:07 +01:00
|
|
|
edituv_request_active_uv(cache, object, me);
|
2019-05-08 00:12:24 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_EDITUV_VERTS);
|
2019-01-09 22:56:27 +01:00
|
|
|
return DRW_batch_request(&cache->batch.edituv_verts);
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_edituv_facedots(Object *object, Mesh *me)
|
2019-01-09 22:56:27 +01:00
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2022-01-11 15:42:07 +01:00
|
|
|
edituv_request_active_uv(cache, object, me);
|
2019-05-08 00:12:24 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_EDITUV_FACEDOTS);
|
2019-07-14 16:49:44 +02:00
|
|
|
return DRW_batch_request(&cache->batch.edituv_fdots);
|
2019-01-09 22:56:27 +01:00
|
|
|
}
|
|
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_uv_edges(Object *object, Mesh *me)
|
2018-10-01 14:55:35 +02:00
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2022-01-11 15:42:07 +01:00
|
|
|
edituv_request_active_uv(cache, object, me);
|
2019-05-08 00:12:24 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_WIRE_LOOPS_UVS);
|
2019-01-10 22:22:42 +01:00
|
|
|
return DRW_batch_request(&cache->batch.wire_loops_uvs);
|
2018-10-01 14:55:35 +02:00
|
|
|
}
|
|
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
GPUBatch *DRW_mesh_batch_cache_get_surface_edges(Object *object, Mesh *me)
|
2017-05-03 18:55:40 +02:00
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2022-01-11 15:42:07 +01:00
|
|
|
texpaint_request_active_uv(cache, object, me);
|
2019-05-08 00:12:24 +02:00
|
|
|
mesh_batch_cache_add_request(cache, MBC_WIRE_LOOPS);
|
2018-12-17 17:01:06 +01:00
|
|
|
return DRW_batch_request(&cache->batch.wire_loops);
|
2017-05-03 18:55:40 +02:00
|
|
|
}
|
|
|
|
|
|
2017-05-05 05:07:52 +10:00
|
|
|
/** \} */
|
2018-12-08 20:10:20 +01:00
|
|
|
|
|
|
|
|
/* ---------------------------------------------------------------------- */
|
|
|
|
|
/** \name Grouped batch generation
|
|
|
|
|
* \{ */
|
|
|
|
|
|
2019-04-19 18:52:38 +02:00
|
|
|
void DRW_mesh_batch_cache_free_old(Mesh *me, int ctime)
|
2019-04-19 02:22:22 +02:00
|
|
|
{
|
2022-06-05 12:04:42 +02:00
|
|
|
MeshBatchCache *cache = static_cast<MeshBatchCache *>(me->runtime.batch_cache);
|
2019-04-19 02:22:22 +02:00
|
|
|
|
2022-06-10 10:29:35 +02:00
|
|
|
if (cache == nullptr) {
|
2019-04-19 02:22:22 +02:00
|
|
|
return;
|
2019-04-22 12:20:14 +10:00
|
|
|
}
|
2019-04-19 02:22:22 +02:00
|
|
|
|
2019-04-19 18:52:38 +02:00
|
|
|
if (mesh_cd_layers_type_equal(cache->cd_used_over_time, cache->cd_used)) {
|
|
|
|
|
cache->lastmatch = ctime;
|
|
|
|
|
}
|
|
|
|
|
|
EEVEE: support Curves attributes rendering
This adds support to render Curves attributes in EEVEE.
Each attribute is stored in a texture derived from a VBO. As the
shading group needs the textures to be valid upon creation, the
attributes are created and setup during its very creation, instead
of doing it lazily via create_requested which we cannot rely on
anyway as contrary to the mesh batch, we do cannot really tell if
attributes need to be updated or else via some `DRW_batch_requested`.
Since point attributes need refinement, and since attributes are all
cast to vec4/float4 to account for differences in type conversions
between Blender and OpenGL, the refinement shader for points is
used as is. The point attributes are stored for each subdivision level
in CurvesEvalFinalCache. Each subdivision level also keeps track of the
attributes already in use so they are properly updated when needed.
Some basic garbage collection was added similar to what is done
for meshes: if the attributes used over time have been different
from the currently used attributes for too long, then the buffers
are freed, ensuring that stale attributesare removed.
This adds `CurvesInfos` to the shader creation info, which stores
the scope in which the attributes are defined. Scopes are stored
as booleans, in an array indexed by attribute loading order which
is also the order in which the attributes were added to the material.
A mapping is necessary between the indices used for the scoping, and
the ones used in the Curves cache, as this may contain stale
attributes which have not been garbage collected yet.
Common utilities with the mesh code for handling requested
attributes were moved to a separate file.
Differential Revision: https://developer.blender.org/D14916
2022-05-24 05:02:57 +02:00
|
|
|
if (drw_attributes_overlap(&cache->attr_used_over_time, &cache->attr_used)) {
|
2021-10-26 18:16:33 -03:00
|
|
|
cache->lastmatch = ctime;
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-19 18:52:38 +02:00
|
|
|
if (ctime - cache->lastmatch > U.vbotimeout) {
|
2019-04-19 02:22:22 +02:00
|
|
|
mesh_batch_cache_discard_shaded_tri(cache);
|
|
|
|
|
}
|
2019-04-19 18:52:38 +02:00
|
|
|
|
2019-04-19 02:22:22 +02:00
|
|
|
mesh_cd_layers_type_clear(&cache->cd_used_over_time);
|
EEVEE: support Curves attributes rendering
This adds support to render Curves attributes in EEVEE.
Each attribute is stored in a texture derived from a VBO. As the
shading group needs the textures to be valid upon creation, the
attributes are created and setup during its very creation, instead
of doing it lazily via create_requested which we cannot rely on
anyway as contrary to the mesh batch, we do cannot really tell if
attributes need to be updated or else via some `DRW_batch_requested`.
Since point attributes need refinement, and since attributes are all
cast to vec4/float4 to account for differences in type conversions
between Blender and OpenGL, the refinement shader for points is
used as is. The point attributes are stored for each subdivision level
in CurvesEvalFinalCache. Each subdivision level also keeps track of the
attributes already in use so they are properly updated when needed.
Some basic garbage collection was added similar to what is done
for meshes: if the attributes used over time have been different
from the currently used attributes for too long, then the buffers
are freed, ensuring that stale attributesare removed.
This adds `CurvesInfos` to the shader creation info, which stores
the scope in which the attributes are defined. Scopes are stored
as booleans, in an array indexed by attribute loading order which
is also the order in which the attributes were added to the material.
A mapping is necessary between the indices used for the scoping, and
the ones used in the Curves cache, as this may contain stale
attributes which have not been garbage collected yet.
Common utilities with the mesh code for handling requested
attributes were moved to a separate file.
Differential Revision: https://developer.blender.org/D14916
2022-05-24 05:02:57 +02:00
|
|
|
drw_attributes_clear(&cache->attr_used_over_time);
|
2021-10-26 18:16:33 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void drw_add_attributes_vbo(GPUBatch *batch,
|
|
|
|
|
MeshBufferList *mbuflist,
|
EEVEE: support Curves attributes rendering
This adds support to render Curves attributes in EEVEE.
Each attribute is stored in a texture derived from a VBO. As the
shading group needs the textures to be valid upon creation, the
attributes are created and setup during its very creation, instead
of doing it lazily via create_requested which we cannot rely on
anyway as contrary to the mesh batch, we do cannot really tell if
attributes need to be updated or else via some `DRW_batch_requested`.
Since point attributes need refinement, and since attributes are all
cast to vec4/float4 to account for differences in type conversions
between Blender and OpenGL, the refinement shader for points is
used as is. The point attributes are stored for each subdivision level
in CurvesEvalFinalCache. Each subdivision level also keeps track of the
attributes already in use so they are properly updated when needed.
Some basic garbage collection was added similar to what is done
for meshes: if the attributes used over time have been different
from the currently used attributes for too long, then the buffers
are freed, ensuring that stale attributesare removed.
This adds `CurvesInfos` to the shader creation info, which stores
the scope in which the attributes are defined. Scopes are stored
as booleans, in an array indexed by attribute loading order which
is also the order in which the attributes were added to the material.
A mapping is necessary between the indices used for the scoping, and
the ones used in the Curves cache, as this may contain stale
attributes which have not been garbage collected yet.
Common utilities with the mesh code for handling requested
attributes were moved to a separate file.
Differential Revision: https://developer.blender.org/D14916
2022-05-24 05:02:57 +02:00
|
|
|
DRW_Attributes *attr_used)
|
2021-10-26 18:16:33 -03:00
|
|
|
{
|
|
|
|
|
for (int i = 0; i < attr_used->num_requests; i++) {
|
|
|
|
|
DRW_vbo_request(batch, &mbuflist->vbo.attr[i]);
|
|
|
|
|
}
|
2019-04-19 02:22:22 +02:00
|
|
|
}
|
|
|
|
|
|
2020-07-17 08:31:03 +02:00
|
|
|
#ifdef DEBUG
|
|
|
|
|
/* Sanity check function to test if all requested batches are available. */
|
|
|
|
|
static void drw_mesh_batch_cache_check_available(struct TaskGraph *task_graph, Mesh *me)
|
|
|
|
|
{
|
|
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
|
|
|
|
/* Make sure all requested batches have been setup. */
|
2021-07-03 23:08:40 +10:00
|
|
|
/* NOTE: The next line creates a different scheduling than during release builds what can lead to
|
2020-07-17 08:31:03 +02:00
|
|
|
* some issues (See T77867 where we needed to disable this function in order to debug what was
|
|
|
|
|
* happening in release builds). */
|
|
|
|
|
BLI_task_graph_work_and_wait(task_graph);
|
2021-05-26 21:28:05 -03:00
|
|
|
for (int i = 0; i < MBC_BATCH_LEN; i++) {
|
2022-06-05 12:04:42 +02:00
|
|
|
BLI_assert(!DRW_batch_requested(((GPUBatch **)&cache->batch)[i], (GPUPrimType)0));
|
2020-07-17 08:31:03 +02:00
|
|
|
}
|
2021-05-26 21:28:05 -03:00
|
|
|
for (int i = 0; i < MBC_VBO_LEN; i++) {
|
2021-08-23 13:28:55 -03:00
|
|
|
BLI_assert(!DRW_vbo_requested(((GPUVertBuf **)&cache->final.buff.vbo)[i]));
|
2020-07-17 08:31:03 +02:00
|
|
|
}
|
2021-05-26 21:28:05 -03:00
|
|
|
for (int i = 0; i < MBC_IBO_LEN; i++) {
|
2021-08-23 13:28:55 -03:00
|
|
|
BLI_assert(!DRW_ibo_requested(((GPUIndexBuf **)&cache->final.buff.ibo)[i]));
|
2020-07-17 08:31:03 +02:00
|
|
|
}
|
2021-05-26 21:28:05 -03:00
|
|
|
for (int i = 0; i < MBC_VBO_LEN; i++) {
|
2021-08-23 13:28:55 -03:00
|
|
|
BLI_assert(!DRW_vbo_requested(((GPUVertBuf **)&cache->cage.buff.vbo)[i]));
|
2020-07-17 08:31:03 +02:00
|
|
|
}
|
2021-05-26 21:28:05 -03:00
|
|
|
for (int i = 0; i < MBC_IBO_LEN; i++) {
|
2021-08-23 13:28:55 -03:00
|
|
|
BLI_assert(!DRW_ibo_requested(((GPUIndexBuf **)&cache->cage.buff.ibo)[i]));
|
2020-07-17 08:31:03 +02:00
|
|
|
}
|
2021-05-26 21:28:05 -03:00
|
|
|
for (int i = 0; i < MBC_VBO_LEN; i++) {
|
2021-08-23 13:28:55 -03:00
|
|
|
BLI_assert(!DRW_vbo_requested(((GPUVertBuf **)&cache->uv_cage.buff.vbo)[i]));
|
2020-07-17 08:31:03 +02:00
|
|
|
}
|
2021-05-26 21:28:05 -03:00
|
|
|
for (int i = 0; i < MBC_IBO_LEN; i++) {
|
2021-08-23 13:28:55 -03:00
|
|
|
BLI_assert(!DRW_ibo_requested(((GPUIndexBuf **)&cache->uv_cage.buff.ibo)[i]));
|
2020-07-17 08:31:03 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2020-06-02 15:07:17 +02:00
|
|
|
void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
|
|
|
|
|
Object *ob,
|
|
|
|
|
Mesh *me,
|
|
|
|
|
const Scene *scene,
|
|
|
|
|
const bool is_paint_mode,
|
|
|
|
|
const bool use_hide)
|
|
|
|
|
{
|
|
|
|
|
BLI_assert(task_graph);
|
2022-06-10 10:29:35 +02:00
|
|
|
const ToolSettings *ts = nullptr;
|
2019-08-15 11:39:45 +02:00
|
|
|
if (scene) {
|
|
|
|
|
ts = scene->toolsettings;
|
|
|
|
|
}
|
2018-12-08 20:10:20 +01:00
|
|
|
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
2020-01-13 17:29:31 +01:00
|
|
|
bool cd_uv_update = false;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-05-08 00:12:24 +02:00
|
|
|
/* Early out */
|
2019-05-11 13:16:57 +02:00
|
|
|
if (cache->batch_requested == 0) {
|
2019-05-08 22:01:34 +02:00
|
|
|
#ifdef DEBUG
|
2020-07-17 08:31:03 +02:00
|
|
|
drw_mesh_batch_cache_check_available(task_graph, me);
|
2020-02-11 12:01:29 +11:00
|
|
|
#endif
|
2020-07-17 08:31:03 +02:00
|
|
|
return;
|
2018-12-17 23:00:05 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2022-06-05 12:04:42 +02:00
|
|
|
#ifdef DEBUG
|
|
|
|
|
/* Map the index of a buffer to a flag containing all batches that use it. */
|
|
|
|
|
Map<int, DRWBatchFlag> batches_that_use_buffer_local;
|
|
|
|
|
|
|
|
|
|
auto assert_deps_valid = [&](DRWBatchFlag batch_flag, Span<int> used_buffer_indices) {
|
|
|
|
|
for (const int buffer_index : used_buffer_indices) {
|
|
|
|
|
batches_that_use_buffer_local.add_or_modify(
|
|
|
|
|
buffer_index,
|
|
|
|
|
[&](DRWBatchFlag *value) { *value = batch_flag; },
|
|
|
|
|
[&](DRWBatchFlag *value) { *value |= batch_flag; });
|
|
|
|
|
BLI_assert(batches_that_use_buffer(buffer_index) & batch_flag);
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
#else
|
|
|
|
|
auto assert_deps_valid = [&](DRWBatchFlag UNUSED(batch_flag),
|
|
|
|
|
Span<int> UNUSED(used_buffer_indices)) {};
|
|
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
2020-01-08 22:20:45 +11:00
|
|
|
/* Sanity check. */
|
2022-06-10 10:29:35 +02:00
|
|
|
if ((me->edit_mesh != nullptr) && (ob->mode & OB_MODE_EDIT)) {
|
|
|
|
|
BLI_assert(BKE_object_get_editmesh_eval_final(ob) != nullptr);
|
2020-01-08 22:20:45 +11:00
|
|
|
}
|
|
|
|
|
|
2022-06-10 10:29:35 +02:00
|
|
|
const bool is_editmode = (me->edit_mesh != nullptr) &&
|
|
|
|
|
(BKE_object_get_editmesh_eval_final(ob) != nullptr) &&
|
2022-01-11 15:42:07 +01:00
|
|
|
DRW_object_is_in_edit_mode(ob);
|
2020-08-25 23:49:55 +10:00
|
|
|
|
|
|
|
|
/* This could be set for paint mode too, currently it's only used for edit-mode. */
|
|
|
|
|
const bool is_mode_active = is_editmode && DRW_object_is_in_edit_mode(ob);
|
2020-01-08 22:20:45 +11:00
|
|
|
|
2019-05-11 13:10:28 +02:00
|
|
|
DRWBatchFlag batch_requested = cache->batch_requested;
|
2022-06-05 12:04:42 +02:00
|
|
|
cache->batch_requested = (DRWBatchFlag)0;
|
2019-05-11 13:10:28 +02:00
|
|
|
|
|
|
|
|
if (batch_requested & MBC_SURFACE_WEIGHTS) {
|
2019-05-08 00:12:24 +02:00
|
|
|
/* Check vertex weights. */
|
2022-06-10 10:29:35 +02:00
|
|
|
if ((cache->batch.surface_weights != nullptr) && (ts != nullptr)) {
|
2019-05-08 00:12:24 +02:00
|
|
|
struct DRW_MeshWeightState wstate;
|
|
|
|
|
BLI_assert(ob->type == OB_MESH);
|
|
|
|
|
drw_mesh_weight_state_extract(ob, me, ts, is_paint_mode, &wstate);
|
|
|
|
|
mesh_batch_cache_check_vertex_group(cache, &wstate);
|
|
|
|
|
drw_mesh_weight_state_copy(&cache->weight_state, &wstate);
|
|
|
|
|
drw_mesh_weight_state_clear(&wstate);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
if (batch_requested &
|
2020-07-17 13:47:10 +02:00
|
|
|
(MBC_SURFACE | MBC_WIRE_LOOPS_UVS | MBC_EDITUV_FACES_STRETCH_AREA |
|
2019-09-03 13:42:11 +02:00
|
|
|
MBC_EDITUV_FACES_STRETCH_ANGLE | MBC_EDITUV_FACES | MBC_EDITUV_EDGES | MBC_EDITUV_VERTS)) {
|
2019-06-13 20:22:10 +02:00
|
|
|
/* Modifiers will only generate an orco layer if the mesh is deformed. */
|
2019-05-08 00:12:24 +02:00
|
|
|
if (cache->cd_needed.orco != 0) {
|
2019-09-28 00:19:30 +02:00
|
|
|
/* Orco is always extracted from final mesh. */
|
2022-01-11 15:42:07 +01:00
|
|
|
Mesh *me_final = (me->edit_mesh) ? BKE_object_get_editmesh_eval_final(ob) : me;
|
2022-06-10 10:29:35 +02:00
|
|
|
if (CustomData_get_layer(&me_final->vdata, CD_ORCO) == nullptr) {
|
2019-07-14 16:49:44 +02:00
|
|
|
/* Skip orco calculation */
|
2019-05-08 00:12:24 +02:00
|
|
|
cache->cd_needed.orco = 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-10-26 18:16:33 -03:00
|
|
|
ThreadMutex *mesh_render_mutex = (ThreadMutex *)me->runtime.render_mutex;
|
|
|
|
|
|
2019-05-08 00:12:24 +02:00
|
|
|
/* Verify that all surface batches have needed attribute layers.
|
|
|
|
|
*/
|
|
|
|
|
/* TODO(fclem): We could be a bit smarter here and only do it per
|
|
|
|
|
* material. */
|
|
|
|
|
bool cd_overlap = mesh_cd_layers_type_overlap(cache->cd_used, cache->cd_needed);
|
EEVEE: support Curves attributes rendering
This adds support to render Curves attributes in EEVEE.
Each attribute is stored in a texture derived from a VBO. As the
shading group needs the textures to be valid upon creation, the
attributes are created and setup during its very creation, instead
of doing it lazily via create_requested which we cannot rely on
anyway as contrary to the mesh batch, we do cannot really tell if
attributes need to be updated or else via some `DRW_batch_requested`.
Since point attributes need refinement, and since attributes are all
cast to vec4/float4 to account for differences in type conversions
between Blender and OpenGL, the refinement shader for points is
used as is. The point attributes are stored for each subdivision level
in CurvesEvalFinalCache. Each subdivision level also keeps track of the
attributes already in use so they are properly updated when needed.
Some basic garbage collection was added similar to what is done
for meshes: if the attributes used over time have been different
from the currently used attributes for too long, then the buffers
are freed, ensuring that stale attributesare removed.
This adds `CurvesInfos` to the shader creation info, which stores
the scope in which the attributes are defined. Scopes are stored
as booleans, in an array indexed by attribute loading order which
is also the order in which the attributes were added to the material.
A mapping is necessary between the indices used for the scoping, and
the ones used in the Curves cache, as this may contain stale
attributes which have not been garbage collected yet.
Common utilities with the mesh code for handling requested
attributes were moved to a separate file.
Differential Revision: https://developer.blender.org/D14916
2022-05-24 05:02:57 +02:00
|
|
|
bool attr_overlap = drw_attributes_overlap(&cache->attr_used, &cache->attr_needed);
|
2021-10-26 18:16:33 -03:00
|
|
|
if (cd_overlap == false || attr_overlap == false) {
|
2021-08-23 13:28:55 -03:00
|
|
|
FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
|
2019-08-14 22:43:44 +02:00
|
|
|
if ((cache->cd_used.uv & cache->cd_needed.uv) != cache->cd_needed.uv) {
|
2021-08-23 13:28:55 -03:00
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.uv);
|
2020-01-13 17:29:31 +01:00
|
|
|
cd_uv_update = true;
|
2019-08-14 22:43:44 +02:00
|
|
|
}
|
|
|
|
|
if ((cache->cd_used.tan & cache->cd_needed.tan) != cache->cd_needed.tan ||
|
2019-07-14 16:49:44 +02:00
|
|
|
cache->cd_used.tan_orco != cache->cd_needed.tan_orco) {
|
2021-08-23 13:28:55 -03:00
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.tan);
|
2019-07-14 16:49:44 +02:00
|
|
|
}
|
|
|
|
|
if (cache->cd_used.orco != cache->cd_needed.orco) {
|
2021-08-23 13:28:55 -03:00
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.orco);
|
2019-07-14 16:49:44 +02:00
|
|
|
}
|
2020-09-18 19:30:02 +02:00
|
|
|
if (cache->cd_used.sculpt_overlays != cache->cd_needed.sculpt_overlays) {
|
2021-08-23 13:28:55 -03:00
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.sculpt_data);
|
2020-09-18 19:30:02 +02:00
|
|
|
}
|
2021-10-26 18:16:33 -03:00
|
|
|
if ((cache->cd_used.vcol & cache->cd_needed.vcol) != cache->cd_needed.vcol) {
|
2021-08-23 13:28:55 -03:00
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.vcol);
|
2019-07-14 16:49:44 +02:00
|
|
|
}
|
EEVEE: support Curves attributes rendering
This adds support to render Curves attributes in EEVEE.
Each attribute is stored in a texture derived from a VBO. As the
shading group needs the textures to be valid upon creation, the
attributes are created and setup during its very creation, instead
of doing it lazily via create_requested which we cannot rely on
anyway as contrary to the mesh batch, we do cannot really tell if
attributes need to be updated or else via some `DRW_batch_requested`.
Since point attributes need refinement, and since attributes are all
cast to vec4/float4 to account for differences in type conversions
between Blender and OpenGL, the refinement shader for points is
used as is. The point attributes are stored for each subdivision level
in CurvesEvalFinalCache. Each subdivision level also keeps track of the
attributes already in use so they are properly updated when needed.
Some basic garbage collection was added similar to what is done
for meshes: if the attributes used over time have been different
from the currently used attributes for too long, then the buffers
are freed, ensuring that stale attributesare removed.
This adds `CurvesInfos` to the shader creation info, which stores
the scope in which the attributes are defined. Scopes are stored
as booleans, in an array indexed by attribute loading order which
is also the order in which the attributes were added to the material.
A mapping is necessary between the indices used for the scoping, and
the ones used in the Curves cache, as this may contain stale
attributes which have not been garbage collected yet.
Common utilities with the mesh code for handling requested
attributes were moved to a separate file.
Differential Revision: https://developer.blender.org/D14916
2022-05-24 05:02:57 +02:00
|
|
|
if (!drw_attributes_overlap(&cache->attr_used, &cache->attr_needed)) {
|
2021-10-26 18:16:33 -03:00
|
|
|
for (int i = 0; i < GPU_MAX_ATTR; i++) {
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.attr[i]);
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-05-08 00:12:24 +02:00
|
|
|
}
|
|
|
|
|
/* We can't discard batches at this point as they have been
|
|
|
|
|
* referenced for drawing. Just clear them in place. */
|
2019-09-08 00:12:26 +10:00
|
|
|
for (int i = 0; i < cache->mat_len; i++) {
|
2019-07-14 16:49:44 +02:00
|
|
|
GPU_BATCH_CLEAR_SAFE(cache->surface_per_mat[i]);
|
2019-05-08 00:12:24 +02:00
|
|
|
}
|
|
|
|
|
GPU_BATCH_CLEAR_SAFE(cache->batch.surface);
|
2020-07-17 13:47:10 +02:00
|
|
|
cache->batch_ready &= ~(MBC_SURFACE);
|
2019-05-08 00:12:24 +02:00
|
|
|
|
|
|
|
|
mesh_cd_layers_type_merge(&cache->cd_used, cache->cd_needed);
|
EEVEE: support Curves attributes rendering
This adds support to render Curves attributes in EEVEE.
Each attribute is stored in a texture derived from a VBO. As the
shading group needs the textures to be valid upon creation, the
attributes are created and setup during its very creation, instead
of doing it lazily via create_requested which we cannot rely on
anyway as contrary to the mesh batch, we do cannot really tell if
attributes need to be updated or else via some `DRW_batch_requested`.
Since point attributes need refinement, and since attributes are all
cast to vec4/float4 to account for differences in type conversions
between Blender and OpenGL, the refinement shader for points is
used as is. The point attributes are stored for each subdivision level
in CurvesEvalFinalCache. Each subdivision level also keeps track of the
attributes already in use so they are properly updated when needed.
Some basic garbage collection was added similar to what is done
for meshes: if the attributes used over time have been different
from the currently used attributes for too long, then the buffers
are freed, ensuring that stale attributesare removed.
This adds `CurvesInfos` to the shader creation info, which stores
the scope in which the attributes are defined. Scopes are stored
as booleans, in an array indexed by attribute loading order which
is also the order in which the attributes were added to the material.
A mapping is necessary between the indices used for the scoping, and
the ones used in the Curves cache, as this may contain stale
attributes which have not been garbage collected yet.
Common utilities with the mesh code for handling requested
attributes were moved to a separate file.
Differential Revision: https://developer.blender.org/D14916
2022-05-24 05:02:57 +02:00
|
|
|
drw_attributes_merge(&cache->attr_used, &cache->attr_needed, mesh_render_mutex);
|
2019-05-08 00:12:24 +02:00
|
|
|
}
|
|
|
|
|
mesh_cd_layers_type_merge(&cache->cd_used_over_time, cache->cd_needed);
|
|
|
|
|
mesh_cd_layers_type_clear(&cache->cd_needed);
|
2021-10-26 18:16:33 -03:00
|
|
|
|
EEVEE: support Curves attributes rendering
This adds support to render Curves attributes in EEVEE.
Each attribute is stored in a texture derived from a VBO. As the
shading group needs the textures to be valid upon creation, the
attributes are created and setup during its very creation, instead
of doing it lazily via create_requested which we cannot rely on
anyway as contrary to the mesh batch, we do cannot really tell if
attributes need to be updated or else via some `DRW_batch_requested`.
Since point attributes need refinement, and since attributes are all
cast to vec4/float4 to account for differences in type conversions
between Blender and OpenGL, the refinement shader for points is
used as is. The point attributes are stored for each subdivision level
in CurvesEvalFinalCache. Each subdivision level also keeps track of the
attributes already in use so they are properly updated when needed.
Some basic garbage collection was added similar to what is done
for meshes: if the attributes used over time have been different
from the currently used attributes for too long, then the buffers
are freed, ensuring that stale attributesare removed.
This adds `CurvesInfos` to the shader creation info, which stores
the scope in which the attributes are defined. Scopes are stored
as booleans, in an array indexed by attribute loading order which
is also the order in which the attributes were added to the material.
A mapping is necessary between the indices used for the scoping, and
the ones used in the Curves cache, as this may contain stale
attributes which have not been garbage collected yet.
Common utilities with the mesh code for handling requested
attributes were moved to a separate file.
Differential Revision: https://developer.blender.org/D14916
2022-05-24 05:02:57 +02:00
|
|
|
drw_attributes_merge(&cache->attr_used_over_time, &cache->attr_needed, mesh_render_mutex);
|
|
|
|
|
drw_attributes_clear(&cache->attr_needed);
|
2019-05-08 00:12:24 +02:00
|
|
|
}
|
|
|
|
|
|
2019-05-11 13:10:28 +02:00
|
|
|
if (batch_requested & MBC_EDITUV) {
|
2019-05-08 00:12:24 +02:00
|
|
|
/* Discard UV batches if sync_selection changes */
|
2020-01-13 17:29:31 +01:00
|
|
|
const bool is_uvsyncsel = ts && (ts->uv_flag & UV_SYNC_SELECTION);
|
|
|
|
|
if (cd_uv_update || (cache->is_uvsyncsel != is_uvsyncsel)) {
|
|
|
|
|
cache->is_uvsyncsel = is_uvsyncsel;
|
2021-08-23 13:28:55 -03:00
|
|
|
FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edituv_data);
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_uv);
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_edituv_data);
|
|
|
|
|
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_tris);
|
|
|
|
|
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_lines);
|
|
|
|
|
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_points);
|
|
|
|
|
GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_fdots);
|
2019-05-08 00:12:24 +02:00
|
|
|
}
|
2020-01-13 17:29:31 +01:00
|
|
|
/* We only clear the batches as they may already have been
|
|
|
|
|
* referenced. */
|
|
|
|
|
GPU_BATCH_CLEAR_SAFE(cache->batch.wire_loops_uvs);
|
|
|
|
|
GPU_BATCH_CLEAR_SAFE(cache->batch.edituv_faces_stretch_area);
|
|
|
|
|
GPU_BATCH_CLEAR_SAFE(cache->batch.edituv_faces_stretch_angle);
|
|
|
|
|
GPU_BATCH_CLEAR_SAFE(cache->batch.edituv_faces);
|
|
|
|
|
GPU_BATCH_CLEAR_SAFE(cache->batch.edituv_edges);
|
|
|
|
|
GPU_BATCH_CLEAR_SAFE(cache->batch.edituv_verts);
|
|
|
|
|
GPU_BATCH_CLEAR_SAFE(cache->batch.edituv_fdots);
|
|
|
|
|
cache->batch_ready &= ~MBC_EDITUV;
|
2019-05-08 00:12:24 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Second chance to early out */
|
2019-05-11 13:10:28 +02:00
|
|
|
if ((batch_requested & ~cache->batch_ready) == 0) {
|
2019-05-08 22:01:34 +02:00
|
|
|
#ifdef DEBUG
|
2020-07-17 08:31:03 +02:00
|
|
|
drw_mesh_batch_cache_check_available(task_graph, me);
|
2020-02-11 12:01:29 +11:00
|
|
|
#endif
|
2020-07-17 08:31:03 +02:00
|
|
|
return;
|
2019-04-18 17:25:04 +02:00
|
|
|
}
|
|
|
|
|
|
2020-11-18 12:16:43 +01:00
|
|
|
/* TODO(pablodp606): This always updates the sculpt normals for regular drawing (non-PBVH).
|
|
|
|
|
* This makes tools that sample the surface per step get wrong normals until a redraw happens.
|
|
|
|
|
* Normal updates should be part of the brush loop and only run during the stroke when the
|
|
|
|
|
* brush needs to sample the surface. The drawing code should only update the normals
|
|
|
|
|
* per redraw when smooth shading is enabled. */
|
|
|
|
|
const bool do_update_sculpt_normals = ob->sculpt && ob->sculpt->pbvh;
|
|
|
|
|
if (do_update_sculpt_normals) {
|
2022-06-05 12:04:42 +02:00
|
|
|
Mesh *mesh = static_cast<Mesh *>(ob->data);
|
2020-11-18 12:16:43 +01:00
|
|
|
BKE_pbvh_update_normals(ob->sculpt->pbvh, mesh->runtime.subdiv_ccg);
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-11 13:10:28 +02:00
|
|
|
cache->batch_ready |= batch_requested;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
bool do_cage = false, do_uvcage = false;
|
|
|
|
|
if (is_editmode) {
|
|
|
|
|
Mesh *editmesh_eval_final = BKE_object_get_editmesh_eval_final(ob);
|
|
|
|
|
Mesh *editmesh_eval_cage = BKE_object_get_editmesh_eval_cage(ob);
|
2019-07-14 16:49:44 +02:00
|
|
|
|
2022-01-11 15:42:07 +01:00
|
|
|
do_cage = editmesh_eval_final != editmesh_eval_cage;
|
|
|
|
|
do_uvcage = !editmesh_eval_final->runtime.is_original;
|
|
|
|
|
}
|
2019-07-14 16:49:44 +02:00
|
|
|
|
2022-05-17 15:31:37 +02:00
|
|
|
const bool do_subdivision = BKE_subsurf_modifier_has_gpu_subdiv(me);
|
OpenSubDiv: add support for an OpenGL evaluator
This evaluator is used in order to evaluate subdivision at render time, allowing for
faster renders of meshes with a subdivision surface modifier placed at the last
position in the modifier list.
When evaluating the subsurf modifier, we detect whether we can delegate evaluation
to the draw code. If so, the subdivision is first evaluated on the GPU using our own
custom evaluator (only the coarse data needs to be initially sent to the GPU), then,
buffers for the final `MeshBufferCache` are filled on the GPU using a set of
compute shaders. However, some buffers are still filled on the CPU side, if doing so
on the GPU is impractical (e.g. the line adjacency buffer used for x-ray, whose
logic is hardly GPU compatible).
This is done at the mesh buffer extraction level so that the result can be readily used
in the various OpenGL engines, without having to write custom geometry or tesselation
shaders.
We use our own subdivision evaluation shaders, instead of OpenSubDiv's vanilla one, in
order to control the data layout, and interpolation. For example, we store vertex colors
as compressed 16-bit integers, while OpenSubDiv's default evaluator only work for float
types.
In order to still access the modified geometry on the CPU side, for use in modifiers
or transform operators, a dedicated wrapper type is added `MESH_WRAPPER_TYPE_SUBD`.
Subdivision will be lazily evaluated via `BKE_object_get_evaluated_mesh` which will
create such a wrapper if possible. If the final subdivision surface is not needed on
the CPU side, `BKE_object_get_evaluated_mesh_no_subsurf` should be used.
Enabling or disabling GPU subdivision can be done through the user preferences (under
Viewport -> Subdivision).
See patch description for benchmarks.
Reviewed By: campbellbarton, jbakker, fclem, brecht, #eevee_viewport
Differential Revision: https://developer.blender.org/D12406
2021-12-27 16:34:47 +01:00
|
|
|
|
2021-08-23 13:28:55 -03:00
|
|
|
MeshBufferList *mbuflist = &cache->final.buff;
|
2019-07-14 16:49:44 +02:00
|
|
|
|
2020-04-03 16:59:34 +11:00
|
|
|
/* Initialize batches and request VBO's & IBO's. */
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(
|
|
|
|
|
MBC_SURFACE,
|
|
|
|
|
{BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.lnor), BUFFER_INDEX(vbo.pos_nor),
|
|
|
|
|
BUFFER_INDEX(vbo.uv), BUFFER_INDEX(vbo.vcol), BUFFER_INDEX(vbo.attr[0]),
|
|
|
|
|
BUFFER_INDEX(vbo.attr[1]), BUFFER_INDEX(vbo.attr[2]), BUFFER_INDEX(vbo.attr[3]),
|
|
|
|
|
BUFFER_INDEX(vbo.attr[4]), BUFFER_INDEX(vbo.attr[5]), BUFFER_INDEX(vbo.attr[6]),
|
|
|
|
|
BUFFER_INDEX(vbo.attr[7]), BUFFER_INDEX(vbo.attr[8]), BUFFER_INDEX(vbo.attr[9]),
|
|
|
|
|
BUFFER_INDEX(vbo.attr[10]), BUFFER_INDEX(vbo.attr[11]), BUFFER_INDEX(vbo.attr[12]),
|
|
|
|
|
BUFFER_INDEX(vbo.attr[13]), BUFFER_INDEX(vbo.attr[14])});
|
2018-12-16 15:17:31 +01:00
|
|
|
if (DRW_batch_requested(cache->batch.surface, GPU_PRIM_TRIS)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.surface, &mbuflist->ibo.tris);
|
2020-04-03 16:59:34 +11:00
|
|
|
/* Order matters. First ones override latest VBO's attributes. */
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_vbo_request(cache->batch.surface, &mbuflist->vbo.lnor);
|
|
|
|
|
DRW_vbo_request(cache->batch.surface, &mbuflist->vbo.pos_nor);
|
2019-04-06 01:55:21 +02:00
|
|
|
if (cache->cd_used.uv != 0) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_vbo_request(cache->batch.surface, &mbuflist->vbo.uv);
|
2018-12-17 11:37:27 +01:00
|
|
|
}
|
2021-10-26 18:16:33 -03:00
|
|
|
if (cache->cd_used.vcol != 0) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_vbo_request(cache->batch.surface, &mbuflist->vbo.vcol);
|
2018-12-18 02:18:55 +01:00
|
|
|
}
|
2021-10-26 18:16:33 -03:00
|
|
|
drw_add_attributes_vbo(cache->batch.surface, mbuflist, &cache->attr_used);
|
2018-12-16 15:17:31 +01:00
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(MBC_ALL_VERTS, {BUFFER_INDEX(vbo.pos_nor)});
|
2018-12-10 15:29:04 +01:00
|
|
|
if (DRW_batch_requested(cache->batch.all_verts, GPU_PRIM_POINTS)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_vbo_request(cache->batch.all_verts, &mbuflist->vbo.pos_nor);
|
2018-12-09 11:21:23 +01:00
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(
|
|
|
|
|
MBC_SCULPT_OVERLAYS,
|
|
|
|
|
{BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.sculpt_data)});
|
2020-09-18 19:30:02 +02:00
|
|
|
if (DRW_batch_requested(cache->batch.sculpt_overlays, GPU_PRIM_TRIS)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.sculpt_overlays, &mbuflist->ibo.tris);
|
|
|
|
|
DRW_vbo_request(cache->batch.sculpt_overlays, &mbuflist->vbo.pos_nor);
|
|
|
|
|
DRW_vbo_request(cache->batch.sculpt_overlays, &mbuflist->vbo.sculpt_data);
|
2020-09-18 19:30:02 +02:00
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(MBC_ALL_EDGES, {BUFFER_INDEX(ibo.lines), BUFFER_INDEX(vbo.pos_nor)});
|
2018-12-18 16:34:32 +01:00
|
|
|
if (DRW_batch_requested(cache->batch.all_edges, GPU_PRIM_LINES)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.all_edges, &mbuflist->ibo.lines);
|
|
|
|
|
DRW_vbo_request(cache->batch.all_edges, &mbuflist->vbo.pos_nor);
|
2018-12-18 16:34:32 +01:00
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(MBC_LOOSE_EDGES, {BUFFER_INDEX(ibo.lines_loose), BUFFER_INDEX(vbo.pos_nor)});
|
2018-12-18 21:45:46 +01:00
|
|
|
if (DRW_batch_requested(cache->batch.loose_edges, GPU_PRIM_LINES)) {
|
2022-06-10 10:29:35 +02:00
|
|
|
DRW_ibo_request(nullptr, &mbuflist->ibo.lines);
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.loose_edges, &mbuflist->ibo.lines_loose);
|
|
|
|
|
DRW_vbo_request(cache->batch.loose_edges, &mbuflist->vbo.pos_nor);
|
2018-12-18 21:45:46 +01:00
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(MBC_EDGE_DETECTION,
|
|
|
|
|
{BUFFER_INDEX(ibo.lines_adjacency), BUFFER_INDEX(vbo.pos_nor)});
|
2018-12-18 19:56:55 +01:00
|
|
|
if (DRW_batch_requested(cache->batch.edge_detection, GPU_PRIM_LINES_ADJ)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.edge_detection, &mbuflist->ibo.lines_adjacency);
|
|
|
|
|
DRW_vbo_request(cache->batch.edge_detection, &mbuflist->vbo.pos_nor);
|
2018-12-18 19:56:55 +01:00
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(
|
|
|
|
|
MBC_SURFACE_WEIGHTS,
|
|
|
|
|
{BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.weights)});
|
2018-12-17 23:00:05 +01:00
|
|
|
if (DRW_batch_requested(cache->batch.surface_weights, GPU_PRIM_TRIS)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.surface_weights, &mbuflist->ibo.tris);
|
|
|
|
|
DRW_vbo_request(cache->batch.surface_weights, &mbuflist->vbo.pos_nor);
|
|
|
|
|
DRW_vbo_request(cache->batch.surface_weights, &mbuflist->vbo.weights);
|
2018-12-17 23:00:05 +01:00
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(
|
|
|
|
|
MBC_WIRE_LOOPS,
|
|
|
|
|
{BUFFER_INDEX(ibo.lines_paint_mask), BUFFER_INDEX(vbo.lnor), BUFFER_INDEX(vbo.pos_nor)});
|
2019-07-01 12:43:07 +02:00
|
|
|
if (DRW_batch_requested(cache->batch.wire_loops, GPU_PRIM_LINES)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.wire_loops, &mbuflist->ibo.lines_paint_mask);
|
2020-04-03 16:59:34 +11:00
|
|
|
/* Order matters. First ones override latest VBO's attributes. */
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_vbo_request(cache->batch.wire_loops, &mbuflist->vbo.lnor);
|
|
|
|
|
DRW_vbo_request(cache->batch.wire_loops, &mbuflist->vbo.pos_nor);
|
2018-12-17 17:01:06 +01:00
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(
|
|
|
|
|
MBC_WIRE_EDGES,
|
|
|
|
|
{BUFFER_INDEX(ibo.lines), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.edge_fac)});
|
2019-02-12 23:22:36 +01:00
|
|
|
if (DRW_batch_requested(cache->batch.wire_edges, GPU_PRIM_LINES)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.wire_edges, &mbuflist->ibo.lines);
|
|
|
|
|
DRW_vbo_request(cache->batch.wire_edges, &mbuflist->vbo.pos_nor);
|
|
|
|
|
DRW_vbo_request(cache->batch.wire_edges, &mbuflist->vbo.edge_fac);
|
2019-02-12 23:22:36 +01:00
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(MBC_WIRE_LOOPS_UVS, {BUFFER_INDEX(ibo.edituv_lines), BUFFER_INDEX(vbo.uv)});
|
2019-07-14 16:49:44 +02:00
|
|
|
if (DRW_batch_requested(cache->batch.wire_loops_uvs, GPU_PRIM_LINES)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.wire_loops_uvs, &mbuflist->ibo.edituv_lines);
|
2019-01-10 22:22:42 +01:00
|
|
|
/* For paint overlay. Active layer should have been queried. */
|
2019-04-06 01:55:21 +02:00
|
|
|
if (cache->cd_used.uv != 0) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_vbo_request(cache->batch.wire_loops_uvs, &mbuflist->vbo.uv);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(
|
|
|
|
|
MBC_EDIT_MESH_ANALYSIS,
|
|
|
|
|
{BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.mesh_analysis)});
|
2019-04-18 08:00:59 +02:00
|
|
|
if (DRW_batch_requested(cache->batch.edit_mesh_analysis, GPU_PRIM_TRIS)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.edit_mesh_analysis, &mbuflist->ibo.tris);
|
|
|
|
|
DRW_vbo_request(cache->batch.edit_mesh_analysis, &mbuflist->vbo.pos_nor);
|
|
|
|
|
DRW_vbo_request(cache->batch.edit_mesh_analysis, &mbuflist->vbo.mesh_analysis);
|
2018-12-22 23:57:12 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-01-09 22:56:27 +01:00
|
|
|
/* Per Material */
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(
|
|
|
|
|
MBC_SURFACE_PER_MAT,
|
|
|
|
|
{BUFFER_INDEX(vbo.lnor), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.uv),
|
|
|
|
|
BUFFER_INDEX(vbo.tan), BUFFER_INDEX(vbo.vcol), BUFFER_INDEX(vbo.orco),
|
|
|
|
|
BUFFER_INDEX(vbo.attr[0]), BUFFER_INDEX(vbo.attr[1]), BUFFER_INDEX(vbo.attr[2]),
|
|
|
|
|
BUFFER_INDEX(vbo.attr[3]), BUFFER_INDEX(vbo.attr[4]), BUFFER_INDEX(vbo.attr[5]),
|
|
|
|
|
BUFFER_INDEX(vbo.attr[6]), BUFFER_INDEX(vbo.attr[7]), BUFFER_INDEX(vbo.attr[8]),
|
|
|
|
|
BUFFER_INDEX(vbo.attr[9]), BUFFER_INDEX(vbo.attr[10]), BUFFER_INDEX(vbo.attr[11]),
|
|
|
|
|
BUFFER_INDEX(vbo.attr[12]), BUFFER_INDEX(vbo.attr[13]), BUFFER_INDEX(vbo.attr[14])});
|
|
|
|
|
assert_deps_valid(MBC_SURFACE_PER_MAT, {TRIS_PER_MAT_INDEX});
|
2019-09-08 00:12:26 +10:00
|
|
|
for (int i = 0; i < cache->mat_len; i++) {
|
2019-07-14 16:49:44 +02:00
|
|
|
if (DRW_batch_requested(cache->surface_per_mat[i], GPU_PRIM_TRIS)) {
|
2021-08-23 09:52:13 -03:00
|
|
|
DRW_ibo_request(cache->surface_per_mat[i], &cache->tris_per_mat[i]);
|
2020-04-03 16:59:34 +11:00
|
|
|
/* Order matters. First ones override latest VBO's attributes. */
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_vbo_request(cache->surface_per_mat[i], &mbuflist->vbo.lnor);
|
|
|
|
|
DRW_vbo_request(cache->surface_per_mat[i], &mbuflist->vbo.pos_nor);
|
2019-08-14 22:43:44 +02:00
|
|
|
if (cache->cd_used.uv != 0) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_vbo_request(cache->surface_per_mat[i], &mbuflist->vbo.uv);
|
2019-08-14 22:43:44 +02:00
|
|
|
}
|
|
|
|
|
if ((cache->cd_used.tan != 0) || (cache->cd_used.tan_orco != 0)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_vbo_request(cache->surface_per_mat[i], &mbuflist->vbo.tan);
|
2018-12-22 23:57:12 +01:00
|
|
|
}
|
2021-10-26 18:16:33 -03:00
|
|
|
if (cache->cd_used.vcol != 0) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_vbo_request(cache->surface_per_mat[i], &mbuflist->vbo.vcol);
|
2018-12-16 15:17:31 +01:00
|
|
|
}
|
2018-12-17 23:00:05 +01:00
|
|
|
if (cache->cd_used.orco != 0) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_vbo_request(cache->surface_per_mat[i], &mbuflist->vbo.orco);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2021-10-26 18:16:33 -03:00
|
|
|
drw_add_attributes_vbo(cache->surface_per_mat[i], mbuflist, &cache->attr_used);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-08-23 13:28:55 -03:00
|
|
|
mbuflist = (do_cage) ? &cache->cage.buff : &cache->final.buff;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
/* Edit Mesh */
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(
|
|
|
|
|
MBC_EDIT_TRIANGLES,
|
|
|
|
|
{BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.edit_data)});
|
2019-07-14 16:49:44 +02:00
|
|
|
if (DRW_batch_requested(cache->batch.edit_triangles, GPU_PRIM_TRIS)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.edit_triangles, &mbuflist->ibo.tris);
|
|
|
|
|
DRW_vbo_request(cache->batch.edit_triangles, &mbuflist->vbo.pos_nor);
|
|
|
|
|
DRW_vbo_request(cache->batch.edit_triangles, &mbuflist->vbo.edit_data);
|
2019-07-01 12:43:07 +02:00
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(
|
|
|
|
|
MBC_EDIT_VERTICES,
|
|
|
|
|
{BUFFER_INDEX(ibo.points), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.edit_data)});
|
2019-07-14 16:49:44 +02:00
|
|
|
if (DRW_batch_requested(cache->batch.edit_vertices, GPU_PRIM_POINTS)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.edit_vertices, &mbuflist->ibo.points);
|
|
|
|
|
DRW_vbo_request(cache->batch.edit_vertices, &mbuflist->vbo.pos_nor);
|
|
|
|
|
DRW_vbo_request(cache->batch.edit_vertices, &mbuflist->vbo.edit_data);
|
2018-12-10 15:29:04 +01:00
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(
|
|
|
|
|
MBC_EDIT_EDGES,
|
|
|
|
|
{BUFFER_INDEX(ibo.lines), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.edit_data)});
|
2019-07-14 16:49:44 +02:00
|
|
|
if (DRW_batch_requested(cache->batch.edit_edges, GPU_PRIM_LINES)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.edit_edges, &mbuflist->ibo.lines);
|
|
|
|
|
DRW_vbo_request(cache->batch.edit_edges, &mbuflist->vbo.pos_nor);
|
|
|
|
|
DRW_vbo_request(cache->batch.edit_edges, &mbuflist->vbo.edit_data);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(MBC_EDIT_VNOR, {BUFFER_INDEX(ibo.points), BUFFER_INDEX(vbo.pos_nor)});
|
2019-07-14 16:49:44 +02:00
|
|
|
if (DRW_batch_requested(cache->batch.edit_vnor, GPU_PRIM_POINTS)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.edit_vnor, &mbuflist->ibo.points);
|
|
|
|
|
DRW_vbo_request(cache->batch.edit_vnor, &mbuflist->vbo.pos_nor);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(MBC_EDIT_LNOR,
|
|
|
|
|
{BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.lnor)});
|
2019-07-14 16:49:44 +02:00
|
|
|
if (DRW_batch_requested(cache->batch.edit_lnor, GPU_PRIM_POINTS)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.edit_lnor, &mbuflist->ibo.tris);
|
|
|
|
|
DRW_vbo_request(cache->batch.edit_lnor, &mbuflist->vbo.pos_nor);
|
|
|
|
|
DRW_vbo_request(cache->batch.edit_lnor, &mbuflist->vbo.lnor);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(
|
|
|
|
|
MBC_EDIT_FACEDOTS,
|
|
|
|
|
{BUFFER_INDEX(ibo.fdots), BUFFER_INDEX(vbo.fdots_pos), BUFFER_INDEX(vbo.fdots_nor)});
|
2019-07-14 16:49:44 +02:00
|
|
|
if (DRW_batch_requested(cache->batch.edit_fdots, GPU_PRIM_POINTS)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.edit_fdots, &mbuflist->ibo.fdots);
|
|
|
|
|
DRW_vbo_request(cache->batch.edit_fdots, &mbuflist->vbo.fdots_pos);
|
|
|
|
|
DRW_vbo_request(cache->batch.edit_fdots, &mbuflist->vbo.fdots_nor);
|
2019-02-04 01:13:51 +01:00
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(MBC_SKIN_ROOTS, {BUFFER_INDEX(vbo.skin_roots)});
|
2019-12-02 01:40:58 +01:00
|
|
|
if (DRW_batch_requested(cache->batch.edit_skin_roots, GPU_PRIM_POINTS)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_vbo_request(cache->batch.edit_skin_roots, &mbuflist->vbo.skin_roots);
|
2019-10-15 01:49:53 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
/* Selection */
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(
|
|
|
|
|
MBC_EDIT_SELECTION_VERTS,
|
|
|
|
|
{BUFFER_INDEX(ibo.points), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.vert_idx)});
|
2019-07-14 16:49:44 +02:00
|
|
|
if (DRW_batch_requested(cache->batch.edit_selection_verts, GPU_PRIM_POINTS)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.edit_selection_verts, &mbuflist->ibo.points);
|
|
|
|
|
DRW_vbo_request(cache->batch.edit_selection_verts, &mbuflist->vbo.pos_nor);
|
|
|
|
|
DRW_vbo_request(cache->batch.edit_selection_verts, &mbuflist->vbo.vert_idx);
|
2018-12-22 23:57:12 +01:00
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(
|
|
|
|
|
MBC_EDIT_SELECTION_EDGES,
|
|
|
|
|
{BUFFER_INDEX(ibo.lines), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.edge_idx)});
|
2019-07-14 16:49:44 +02:00
|
|
|
if (DRW_batch_requested(cache->batch.edit_selection_edges, GPU_PRIM_LINES)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.edit_selection_edges, &mbuflist->ibo.lines);
|
|
|
|
|
DRW_vbo_request(cache->batch.edit_selection_edges, &mbuflist->vbo.pos_nor);
|
|
|
|
|
DRW_vbo_request(cache->batch.edit_selection_edges, &mbuflist->vbo.edge_idx);
|
2018-12-22 23:57:12 +01:00
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(
|
|
|
|
|
MBC_EDIT_SELECTION_FACES,
|
|
|
|
|
{BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.poly_idx)});
|
2019-07-14 16:49:44 +02:00
|
|
|
if (DRW_batch_requested(cache->batch.edit_selection_faces, GPU_PRIM_TRIS)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.edit_selection_faces, &mbuflist->ibo.tris);
|
|
|
|
|
DRW_vbo_request(cache->batch.edit_selection_faces, &mbuflist->vbo.pos_nor);
|
|
|
|
|
DRW_vbo_request(cache->batch.edit_selection_faces, &mbuflist->vbo.poly_idx);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(
|
|
|
|
|
MBC_EDIT_SELECTION_FACEDOTS,
|
|
|
|
|
{BUFFER_INDEX(ibo.fdots), BUFFER_INDEX(vbo.fdots_pos), BUFFER_INDEX(vbo.fdot_idx)});
|
2019-07-14 16:49:44 +02:00
|
|
|
if (DRW_batch_requested(cache->batch.edit_selection_fdots, GPU_PRIM_POINTS)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.edit_selection_fdots, &mbuflist->ibo.fdots);
|
|
|
|
|
DRW_vbo_request(cache->batch.edit_selection_fdots, &mbuflist->vbo.fdots_pos);
|
|
|
|
|
DRW_vbo_request(cache->batch.edit_selection_fdots, &mbuflist->vbo.fdot_idx);
|
2019-04-18 08:00:59 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
|
|
|
|
/**
|
2019-02-04 01:13:51 +01:00
|
|
|
* TODO: The code and data structure is ready to support modified UV display
|
|
|
|
|
* but the selection code for UVs needs to support it first. So for now, only
|
2019-03-19 15:17:46 +11:00
|
|
|
* display the cage in all cases.
|
2019-04-17 06:17:24 +02:00
|
|
|
*/
|
2021-08-23 13:28:55 -03:00
|
|
|
mbuflist = (do_uvcage) ? &cache->uv_cage.buff : &cache->final.buff;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
/* Edit UV */
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(
|
|
|
|
|
MBC_EDITUV_FACES,
|
|
|
|
|
{BUFFER_INDEX(ibo.edituv_tris), BUFFER_INDEX(vbo.uv), BUFFER_INDEX(vbo.edituv_data)});
|
2019-07-14 16:49:44 +02:00
|
|
|
if (DRW_batch_requested(cache->batch.edituv_faces, GPU_PRIM_TRIS)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.edituv_faces, &mbuflist->ibo.edituv_tris);
|
|
|
|
|
DRW_vbo_request(cache->batch.edituv_faces, &mbuflist->vbo.uv);
|
|
|
|
|
DRW_vbo_request(cache->batch.edituv_faces, &mbuflist->vbo.edituv_data);
|
2019-07-14 16:49:44 +02:00
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(MBC_EDITUV_FACES_STRETCH_AREA,
|
|
|
|
|
{BUFFER_INDEX(ibo.edituv_tris),
|
|
|
|
|
BUFFER_INDEX(vbo.uv),
|
|
|
|
|
BUFFER_INDEX(vbo.edituv_data),
|
|
|
|
|
BUFFER_INDEX(vbo.edituv_stretch_area)});
|
2019-09-03 13:42:11 +02:00
|
|
|
if (DRW_batch_requested(cache->batch.edituv_faces_stretch_area, GPU_PRIM_TRIS)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.edituv_faces_stretch_area, &mbuflist->ibo.edituv_tris);
|
|
|
|
|
DRW_vbo_request(cache->batch.edituv_faces_stretch_area, &mbuflist->vbo.uv);
|
|
|
|
|
DRW_vbo_request(cache->batch.edituv_faces_stretch_area, &mbuflist->vbo.edituv_data);
|
|
|
|
|
DRW_vbo_request(cache->batch.edituv_faces_stretch_area, &mbuflist->vbo.edituv_stretch_area);
|
2019-09-03 13:42:11 +02:00
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(MBC_EDITUV_FACES_STRETCH_ANGLE,
|
|
|
|
|
{BUFFER_INDEX(ibo.edituv_tris),
|
|
|
|
|
BUFFER_INDEX(vbo.uv),
|
|
|
|
|
BUFFER_INDEX(vbo.edituv_data),
|
|
|
|
|
BUFFER_INDEX(vbo.edituv_stretch_angle)});
|
2019-09-03 13:42:11 +02:00
|
|
|
if (DRW_batch_requested(cache->batch.edituv_faces_stretch_angle, GPU_PRIM_TRIS)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.edituv_faces_stretch_angle, &mbuflist->ibo.edituv_tris);
|
|
|
|
|
DRW_vbo_request(cache->batch.edituv_faces_stretch_angle, &mbuflist->vbo.uv);
|
|
|
|
|
DRW_vbo_request(cache->batch.edituv_faces_stretch_angle, &mbuflist->vbo.edituv_data);
|
|
|
|
|
DRW_vbo_request(cache->batch.edituv_faces_stretch_angle, &mbuflist->vbo.edituv_stretch_angle);
|
2019-07-14 16:49:44 +02:00
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(
|
|
|
|
|
MBC_EDITUV_EDGES,
|
|
|
|
|
{BUFFER_INDEX(ibo.edituv_lines), BUFFER_INDEX(vbo.uv), BUFFER_INDEX(vbo.edituv_data)});
|
2019-07-14 16:49:44 +02:00
|
|
|
if (DRW_batch_requested(cache->batch.edituv_edges, GPU_PRIM_LINES)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.edituv_edges, &mbuflist->ibo.edituv_lines);
|
|
|
|
|
DRW_vbo_request(cache->batch.edituv_edges, &mbuflist->vbo.uv);
|
|
|
|
|
DRW_vbo_request(cache->batch.edituv_edges, &mbuflist->vbo.edituv_data);
|
2019-02-04 01:13:51 +01:00
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(
|
|
|
|
|
MBC_EDITUV_VERTS,
|
|
|
|
|
{BUFFER_INDEX(ibo.edituv_points), BUFFER_INDEX(vbo.uv), BUFFER_INDEX(vbo.edituv_data)});
|
2019-07-14 16:49:44 +02:00
|
|
|
if (DRW_batch_requested(cache->batch.edituv_verts, GPU_PRIM_POINTS)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.edituv_verts, &mbuflist->ibo.edituv_points);
|
|
|
|
|
DRW_vbo_request(cache->batch.edituv_verts, &mbuflist->vbo.uv);
|
|
|
|
|
DRW_vbo_request(cache->batch.edituv_verts, &mbuflist->vbo.edituv_data);
|
2019-07-14 16:49:44 +02:00
|
|
|
}
|
2022-06-05 12:04:42 +02:00
|
|
|
assert_deps_valid(MBC_EDITUV_FACEDOTS,
|
|
|
|
|
{BUFFER_INDEX(ibo.edituv_fdots),
|
|
|
|
|
BUFFER_INDEX(vbo.fdots_uv),
|
|
|
|
|
BUFFER_INDEX(vbo.fdots_edituv_data)});
|
2019-07-14 16:49:44 +02:00
|
|
|
if (DRW_batch_requested(cache->batch.edituv_fdots, GPU_PRIM_POINTS)) {
|
2021-08-23 13:28:55 -03:00
|
|
|
DRW_ibo_request(cache->batch.edituv_fdots, &mbuflist->ibo.edituv_fdots);
|
|
|
|
|
DRW_vbo_request(cache->batch.edituv_fdots, &mbuflist->vbo.fdots_uv);
|
|
|
|
|
DRW_vbo_request(cache->batch.edituv_fdots, &mbuflist->vbo.fdots_edituv_data);
|
2019-07-14 16:49:44 +02:00
|
|
|
}
|
|
|
|
|
|
2022-06-05 12:04:42 +02:00
|
|
|
#ifdef DEBUG
|
|
|
|
|
auto assert_final_deps_valid = [&](const int buffer_index) {
|
|
|
|
|
BLI_assert(batches_that_use_buffer(buffer_index) ==
|
|
|
|
|
batches_that_use_buffer_local.lookup(buffer_index));
|
|
|
|
|
};
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(vbo.lnor));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(vbo.pos_nor));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(vbo.uv));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(vbo.vcol));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(vbo.sculpt_data));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(vbo.weights));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(vbo.edge_fac));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(vbo.mesh_analysis));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(vbo.tan));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(vbo.orco));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(vbo.edit_data));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(vbo.fdots_pos));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(vbo.fdots_nor));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(vbo.skin_roots));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(vbo.vert_idx));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(vbo.edge_idx));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(vbo.poly_idx));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(vbo.fdot_idx));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(vbo.edituv_data));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(vbo.edituv_stretch_area));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(vbo.edituv_stretch_angle));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(vbo.fdots_uv));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(vbo.fdots_edituv_data));
|
|
|
|
|
for (const int i : IndexRange(GPU_MAX_ATTR)) {
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(vbo.attr[i]));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(ibo.tris));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(ibo.lines));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(ibo.lines_loose));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(ibo.lines_adjacency));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(ibo.lines_paint_mask));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(ibo.points));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(ibo.fdots));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(ibo.edituv_tris));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(ibo.edituv_lines));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(ibo.edituv_points));
|
|
|
|
|
assert_final_deps_valid(BUFFER_INDEX(ibo.edituv_fdots));
|
|
|
|
|
|
|
|
|
|
assert_final_deps_valid(TRIS_PER_MAT_INDEX);
|
|
|
|
|
#endif
|
2021-06-14 08:00:42 -03:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
if (do_uvcage) {
|
2022-06-05 12:04:42 +02:00
|
|
|
blender::draw::mesh_buffer_cache_create_requested(task_graph,
|
|
|
|
|
cache,
|
|
|
|
|
&cache->uv_cage,
|
|
|
|
|
ob,
|
|
|
|
|
me,
|
|
|
|
|
is_editmode,
|
|
|
|
|
is_paint_mode,
|
|
|
|
|
is_mode_active,
|
|
|
|
|
ob->obmat,
|
|
|
|
|
false,
|
|
|
|
|
true,
|
|
|
|
|
scene,
|
|
|
|
|
ts,
|
|
|
|
|
true);
|
2019-01-09 22:56:27 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
if (do_cage) {
|
2022-06-05 12:04:42 +02:00
|
|
|
blender::draw::mesh_buffer_cache_create_requested(task_graph,
|
|
|
|
|
cache,
|
|
|
|
|
&cache->cage,
|
|
|
|
|
ob,
|
|
|
|
|
me,
|
|
|
|
|
is_editmode,
|
|
|
|
|
is_paint_mode,
|
|
|
|
|
is_mode_active,
|
|
|
|
|
ob->obmat,
|
|
|
|
|
false,
|
|
|
|
|
false,
|
|
|
|
|
scene,
|
|
|
|
|
ts,
|
|
|
|
|
true);
|
2020-01-07 14:06:33 +11:00
|
|
|
}
|
|
|
|
|
|
OpenSubDiv: add support for an OpenGL evaluator
This evaluator is used in order to evaluate subdivision at render time, allowing for
faster renders of meshes with a subdivision surface modifier placed at the last
position in the modifier list.
When evaluating the subsurf modifier, we detect whether we can delegate evaluation
to the draw code. If so, the subdivision is first evaluated on the GPU using our own
custom evaluator (only the coarse data needs to be initially sent to the GPU), then,
buffers for the final `MeshBufferCache` are filled on the GPU using a set of
compute shaders. However, some buffers are still filled on the CPU side, if doing so
on the GPU is impractical (e.g. the line adjacency buffer used for x-ray, whose
logic is hardly GPU compatible).
This is done at the mesh buffer extraction level so that the result can be readily used
in the various OpenGL engines, without having to write custom geometry or tesselation
shaders.
We use our own subdivision evaluation shaders, instead of OpenSubDiv's vanilla one, in
order to control the data layout, and interpolation. For example, we store vertex colors
as compressed 16-bit integers, while OpenSubDiv's default evaluator only work for float
types.
In order to still access the modified geometry on the CPU side, for use in modifiers
or transform operators, a dedicated wrapper type is added `MESH_WRAPPER_TYPE_SUBD`.
Subdivision will be lazily evaluated via `BKE_object_get_evaluated_mesh` which will
create such a wrapper if possible. If the final subdivision surface is not needed on
the CPU side, `BKE_object_get_evaluated_mesh_no_subsurf` should be used.
Enabling or disabling GPU subdivision can be done through the user preferences (under
Viewport -> Subdivision).
See patch description for benchmarks.
Reviewed By: campbellbarton, jbakker, fclem, brecht, #eevee_viewport
Differential Revision: https://developer.blender.org/D12406
2021-12-27 16:34:47 +01:00
|
|
|
if (do_subdivision) {
|
2022-06-13 15:21:05 +02:00
|
|
|
DRW_create_subdivision(ob,
|
2022-02-14 10:09:06 +01:00
|
|
|
me,
|
|
|
|
|
cache,
|
|
|
|
|
&cache->final,
|
|
|
|
|
is_editmode,
|
|
|
|
|
is_paint_mode,
|
|
|
|
|
is_mode_active,
|
|
|
|
|
ob->obmat,
|
|
|
|
|
true,
|
|
|
|
|
false,
|
|
|
|
|
ts,
|
|
|
|
|
use_hide);
|
OpenSubDiv: add support for an OpenGL evaluator
This evaluator is used in order to evaluate subdivision at render time, allowing for
faster renders of meshes with a subdivision surface modifier placed at the last
position in the modifier list.
When evaluating the subsurf modifier, we detect whether we can delegate evaluation
to the draw code. If so, the subdivision is first evaluated on the GPU using our own
custom evaluator (only the coarse data needs to be initially sent to the GPU), then,
buffers for the final `MeshBufferCache` are filled on the GPU using a set of
compute shaders. However, some buffers are still filled on the CPU side, if doing so
on the GPU is impractical (e.g. the line adjacency buffer used for x-ray, whose
logic is hardly GPU compatible).
This is done at the mesh buffer extraction level so that the result can be readily used
in the various OpenGL engines, without having to write custom geometry or tesselation
shaders.
We use our own subdivision evaluation shaders, instead of OpenSubDiv's vanilla one, in
order to control the data layout, and interpolation. For example, we store vertex colors
as compressed 16-bit integers, while OpenSubDiv's default evaluator only work for float
types.
In order to still access the modified geometry on the CPU side, for use in modifiers
or transform operators, a dedicated wrapper type is added `MESH_WRAPPER_TYPE_SUBD`.
Subdivision will be lazily evaluated via `BKE_object_get_evaluated_mesh` which will
create such a wrapper if possible. If the final subdivision surface is not needed on
the CPU side, `BKE_object_get_evaluated_mesh_no_subsurf` should be used.
Enabling or disabling GPU subdivision can be done through the user preferences (under
Viewport -> Subdivision).
See patch description for benchmarks.
Reviewed By: campbellbarton, jbakker, fclem, brecht, #eevee_viewport
Differential Revision: https://developer.blender.org/D12406
2021-12-27 16:34:47 +01:00
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
/* The subsurf modifier may have been recently removed, or another modifier was added after it,
|
|
|
|
|
* so free any potential subdivision cache as it is not needed anymore. */
|
|
|
|
|
mesh_batch_cache_free_subdiv_cache(cache);
|
|
|
|
|
}
|
|
|
|
|
|
2022-06-05 12:04:42 +02:00
|
|
|
blender::draw::mesh_buffer_cache_create_requested(task_graph,
|
|
|
|
|
cache,
|
|
|
|
|
&cache->final,
|
|
|
|
|
ob,
|
|
|
|
|
me,
|
|
|
|
|
is_editmode,
|
|
|
|
|
is_paint_mode,
|
|
|
|
|
is_mode_active,
|
|
|
|
|
ob->obmat,
|
|
|
|
|
true,
|
|
|
|
|
false,
|
|
|
|
|
scene,
|
|
|
|
|
ts,
|
|
|
|
|
use_hide);
|
2020-09-09 16:32:48 +02:00
|
|
|
|
|
|
|
|
/* Ensure that all requested batches have finished.
|
|
|
|
|
* Ideally we want to remove this sync, but there are cases where this doesn't work.
|
|
|
|
|
* See T79038 for example.
|
|
|
|
|
*
|
|
|
|
|
* An idea to improve this is to separate the Object mode from the edit mode draw caches. And
|
|
|
|
|
* based on the mode the correct one will be updated. Other option is to look into using
|
|
|
|
|
* drw_batch_cache_generate_requested_delayed. */
|
2020-08-25 11:48:48 +02:00
|
|
|
BLI_task_graph_work_and_wait(task_graph);
|
2018-12-16 15:17:31 +01:00
|
|
|
#ifdef DEBUG
|
2020-07-17 08:31:03 +02:00
|
|
|
drw_mesh_batch_cache_check_available(task_graph, me);
|
2018-12-16 15:17:31 +01:00
|
|
|
#endif
|
2018-12-08 20:10:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/** \} */
|