2022-02-11 09:07:11 +11:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
|
* Copyright 2016 Blender Foundation. */
|
2018-02-28 01:16:23 +01:00
|
|
|
|
2019-02-18 08:08:12 +11:00
|
|
|
/** \file
|
|
|
|
|
* \ingroup draw
|
2018-02-28 01:16:23 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include "draw_manager.h"
|
|
|
|
|
|
2020-06-02 18:14:28 +02:00
|
|
|
#include "BLI_alloca.h"
|
2019-08-15 10:17:41 -03:00
|
|
|
#include "BLI_math.h"
|
2019-04-05 20:45:32 +02:00
|
|
|
#include "BLI_math_bits.h"
|
2019-05-07 18:01:14 +02:00
|
|
|
#include "BLI_memblock.h"
|
2018-02-28 16:23:33 +01:00
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
#include "BKE_global.h"
|
|
|
|
|
|
2021-05-28 08:16:26 +02:00
|
|
|
#include "GPU_compute.h"
|
2019-10-03 16:21:23 +02:00
|
|
|
#include "GPU_platform.h"
|
2020-08-14 15:20:35 +02:00
|
|
|
#include "GPU_shader.h"
|
2020-08-17 18:11:09 +02:00
|
|
|
#include "GPU_state.h"
|
2018-02-28 01:16:23 +01:00
|
|
|
|
|
|
|
|
#ifdef USE_GPU_SELECT
|
|
|
|
|
# include "GPU_select.h"
|
|
|
|
|
#endif
|
|
|
|
|
|
2018-05-11 07:48:52 +02:00
|
|
|
void DRW_select_load_id(uint id)
|
2018-02-28 01:16:23 +01:00
|
|
|
{
|
2019-05-31 01:45:41 +02:00
|
|
|
#ifdef USE_GPU_SELECT
|
2019-02-02 13:39:51 +11:00
|
|
|
BLI_assert(G.f & G_FLAG_PICKSEL);
|
2018-02-28 01:16:23 +01:00
|
|
|
DST.select_id = id;
|
2019-09-13 23:02:45 +02:00
|
|
|
#endif
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
2018-02-28 01:16:23 +01:00
|
|
|
|
2018-08-02 18:33:19 +02:00
|
|
|
#define DEBUG_UBO_BINDING
|
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
typedef struct DRWCommandsState {
|
|
|
|
|
GPUBatch *batch;
|
|
|
|
|
int resource_chunk;
|
2020-03-09 16:27:24 +01:00
|
|
|
int resource_id;
|
2019-05-31 01:45:41 +02:00
|
|
|
int base_inst;
|
|
|
|
|
int inst_count;
|
|
|
|
|
bool neg_scale;
|
|
|
|
|
/* Resource location. */
|
|
|
|
|
int obmats_loc;
|
|
|
|
|
int obinfos_loc;
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
int obattrs_loc;
|
Attribute Node: support accessing attributes of View Layer and Scene.
The attribute node already allows accessing attributes associated
with objects and meshes, which allows changing the behavior of the
same material between different objects or instances. The same idea
can be extended to an even more global level of layers and scenes.
Currently view layers provide an option to replace all materials
with a different one. However, since the same material will be applied
to all objects in the layer, varying the behavior between layers while
preserving distinct materials requires duplicating objects.
Providing access to properties of layers and scenes via the attribute
node enables making materials with built-in switches or settings that
can be controlled globally at the view layer level. This is probably
most useful for complex NPR shading and compositing. Like with objects,
the node can also access built-in scene properties, like render resolution
or FOV of the active camera. Lookup is also attempted in World, similar
to how the Object mode checks the Mesh datablock.
In Cycles this mode is implemented by replacing the attribute node with
the attribute value during sync, allowing constant folding to take the
values into account. This means however that materials that use this
feature have to be re-synced upon any changes to scene, world or camera.
The Eevee version uses a new uniform buffer containing a sorted array
mapping name hashes to values, with binary search lookup. The array
is limited to 512 entries, which is effectively limitless even
considering it is shared by all materials in the scene; it is also
just 16KB of memory so no point trying to optimize further.
The buffer has to be rebuilt when new attributes are detected in a
material, so the draw engine keeps a table of recently seen attribute
names to minimize the chance of extra rebuilds mid-draw.
Differential Revision: https://developer.blender.org/D15941
2022-09-12 00:30:58 +03:00
|
|
|
int vlattrs_loc;
|
2019-05-31 01:45:41 +02:00
|
|
|
int baseinst_loc;
|
|
|
|
|
int chunkid_loc;
|
2020-03-09 16:27:24 +01:00
|
|
|
int resourceid_loc;
|
2019-05-31 01:45:41 +02:00
|
|
|
/* Legacy matrix support. */
|
|
|
|
|
int obmat_loc;
|
|
|
|
|
int obinv_loc;
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
/* Uniform Attributes. */
|
|
|
|
|
DRWSparseUniformBuf *obattrs_ubo;
|
2019-05-31 01:45:41 +02:00
|
|
|
/* Selection ID state. */
|
|
|
|
|
GPUVertBuf *select_buf;
|
|
|
|
|
uint select_id;
|
|
|
|
|
/* Drawing State */
|
|
|
|
|
DRWState drw_state_enabled;
|
|
|
|
|
DRWState drw_state_disabled;
|
|
|
|
|
} DRWCommandsState;
|
|
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Draw State (DRW_state)
|
|
|
|
|
* \{ */
|
|
|
|
|
|
|
|
|
|
void drw_state_set(DRWState state)
|
|
|
|
|
{
|
2020-08-22 01:30:15 +02:00
|
|
|
/* Mask locked state. */
|
|
|
|
|
state = (~DST.state_lock & state) | (DST.state_lock & DST.state);
|
|
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
if (DST.state == state) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2020-08-17 18:11:09 +02:00
|
|
|
eGPUWriteMask write_mask = 0;
|
|
|
|
|
eGPUBlend blend = 0;
|
|
|
|
|
eGPUFaceCullTest culling_test = 0;
|
|
|
|
|
eGPUDepthTest depth_test = 0;
|
|
|
|
|
eGPUStencilTest stencil_test = 0;
|
|
|
|
|
eGPUStencilOp stencil_op = 0;
|
|
|
|
|
eGPUProvokingVertex provoking_vert = 0;
|
2018-02-28 01:16:23 +01:00
|
|
|
|
2020-08-17 18:11:09 +02:00
|
|
|
if (state & DRW_STATE_WRITE_DEPTH) {
|
|
|
|
|
write_mask |= GPU_WRITE_DEPTH;
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
2020-08-17 18:11:09 +02:00
|
|
|
if (state & DRW_STATE_WRITE_COLOR) {
|
|
|
|
|
write_mask |= GPU_WRITE_COLOR;
|
2019-06-17 15:18:21 +02:00
|
|
|
}
|
2020-08-28 14:33:51 +02:00
|
|
|
if (state & DRW_STATE_WRITE_STENCIL_ENABLED) {
|
|
|
|
|
write_mask |= GPU_WRITE_STENCIL;
|
|
|
|
|
}
|
2019-06-17 15:18:21 +02:00
|
|
|
|
2020-08-17 18:11:09 +02:00
|
|
|
switch (state & (DRW_STATE_CULL_BACK | DRW_STATE_CULL_FRONT)) {
|
|
|
|
|
case DRW_STATE_CULL_BACK:
|
|
|
|
|
culling_test = GPU_CULL_BACK;
|
|
|
|
|
break;
|
|
|
|
|
case DRW_STATE_CULL_FRONT:
|
|
|
|
|
culling_test = GPU_CULL_FRONT;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
culling_test = GPU_CULL_NONE;
|
|
|
|
|
break;
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-08-17 18:11:09 +02:00
|
|
|
switch (state & DRW_STATE_DEPTH_TEST_ENABLED) {
|
|
|
|
|
case DRW_STATE_DEPTH_LESS:
|
|
|
|
|
depth_test = GPU_DEPTH_LESS;
|
|
|
|
|
break;
|
|
|
|
|
case DRW_STATE_DEPTH_LESS_EQUAL:
|
|
|
|
|
depth_test = GPU_DEPTH_LESS_EQUAL;
|
|
|
|
|
break;
|
|
|
|
|
case DRW_STATE_DEPTH_EQUAL:
|
|
|
|
|
depth_test = GPU_DEPTH_EQUAL;
|
|
|
|
|
break;
|
|
|
|
|
case DRW_STATE_DEPTH_GREATER:
|
|
|
|
|
depth_test = GPU_DEPTH_GREATER;
|
|
|
|
|
break;
|
|
|
|
|
case DRW_STATE_DEPTH_GREATER_EQUAL:
|
|
|
|
|
depth_test = GPU_DEPTH_GREATER_EQUAL;
|
|
|
|
|
break;
|
|
|
|
|
case DRW_STATE_DEPTH_ALWAYS:
|
|
|
|
|
depth_test = GPU_DEPTH_ALWAYS;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
depth_test = GPU_DEPTH_NONE;
|
|
|
|
|
break;
|
2018-05-16 12:47:15 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-08-17 18:11:09 +02:00
|
|
|
switch (state & DRW_STATE_WRITE_STENCIL_ENABLED) {
|
|
|
|
|
case DRW_STATE_WRITE_STENCIL:
|
|
|
|
|
stencil_op = GPU_STENCIL_OP_REPLACE;
|
2020-08-17 20:32:20 +02:00
|
|
|
GPU_stencil_write_mask_set(0xFF);
|
2020-08-17 18:11:09 +02:00
|
|
|
break;
|
|
|
|
|
case DRW_STATE_WRITE_STENCIL_SHADOW_PASS:
|
|
|
|
|
stencil_op = GPU_STENCIL_OP_COUNT_DEPTH_PASS;
|
2020-08-17 20:32:20 +02:00
|
|
|
GPU_stencil_write_mask_set(0xFF);
|
2020-08-17 18:11:09 +02:00
|
|
|
break;
|
|
|
|
|
case DRW_STATE_WRITE_STENCIL_SHADOW_FAIL:
|
|
|
|
|
stencil_op = GPU_STENCIL_OP_COUNT_DEPTH_FAIL;
|
2020-08-17 20:32:20 +02:00
|
|
|
GPU_stencil_write_mask_set(0xFF);
|
2020-08-17 18:11:09 +02:00
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
stencil_op = GPU_STENCIL_OP_NONE;
|
2020-08-17 20:32:20 +02:00
|
|
|
GPU_stencil_write_mask_set(0x00);
|
2020-08-17 18:11:09 +02:00
|
|
|
break;
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
2020-08-17 18:11:09 +02:00
|
|
|
|
|
|
|
|
switch (state & DRW_STATE_STENCIL_TEST_ENABLED) {
|
|
|
|
|
case DRW_STATE_STENCIL_ALWAYS:
|
|
|
|
|
stencil_test = GPU_STENCIL_ALWAYS;
|
|
|
|
|
break;
|
|
|
|
|
case DRW_STATE_STENCIL_EQUAL:
|
|
|
|
|
stencil_test = GPU_STENCIL_EQUAL;
|
|
|
|
|
break;
|
|
|
|
|
case DRW_STATE_STENCIL_NEQUAL:
|
|
|
|
|
stencil_test = GPU_STENCIL_NEQUAL;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
stencil_test = GPU_STENCIL_NONE;
|
|
|
|
|
break;
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-08-17 18:11:09 +02:00
|
|
|
switch (state & DRW_STATE_BLEND_ENABLED) {
|
|
|
|
|
case DRW_STATE_BLEND_ADD:
|
|
|
|
|
blend = GPU_BLEND_ADDITIVE;
|
|
|
|
|
break;
|
|
|
|
|
case DRW_STATE_BLEND_ADD_FULL:
|
|
|
|
|
blend = GPU_BLEND_ADDITIVE_PREMULT;
|
|
|
|
|
break;
|
|
|
|
|
case DRW_STATE_BLEND_ALPHA:
|
|
|
|
|
blend = GPU_BLEND_ALPHA;
|
|
|
|
|
break;
|
|
|
|
|
case DRW_STATE_BLEND_ALPHA_PREMUL:
|
|
|
|
|
blend = GPU_BLEND_ALPHA_PREMULT;
|
|
|
|
|
break;
|
|
|
|
|
case DRW_STATE_BLEND_BACKGROUND:
|
|
|
|
|
blend = GPU_BLEND_BACKGROUND;
|
|
|
|
|
break;
|
|
|
|
|
case DRW_STATE_BLEND_OIT:
|
|
|
|
|
blend = GPU_BLEND_OIT;
|
|
|
|
|
break;
|
|
|
|
|
case DRW_STATE_BLEND_MUL:
|
|
|
|
|
blend = GPU_BLEND_MULTIPLY;
|
|
|
|
|
break;
|
|
|
|
|
case DRW_STATE_BLEND_SUB:
|
|
|
|
|
blend = GPU_BLEND_SUBTRACT;
|
|
|
|
|
break;
|
|
|
|
|
case DRW_STATE_BLEND_CUSTOM:
|
|
|
|
|
blend = GPU_BLEND_CUSTOM;
|
|
|
|
|
break;
|
|
|
|
|
case DRW_STATE_LOGIC_INVERT:
|
|
|
|
|
blend = GPU_BLEND_INVERT;
|
|
|
|
|
break;
|
2020-11-03 14:39:50 +01:00
|
|
|
case DRW_STATE_BLEND_ALPHA_UNDER_PREMUL:
|
|
|
|
|
blend = GPU_BLEND_ALPHA_UNDER_PREMUL;
|
|
|
|
|
break;
|
2020-08-17 18:11:09 +02:00
|
|
|
default:
|
|
|
|
|
blend = GPU_BLEND_NONE;
|
|
|
|
|
break;
|
2019-06-17 15:18:21 +02:00
|
|
|
}
|
|
|
|
|
|
2020-08-17 18:11:09 +02:00
|
|
|
GPU_state_set(
|
|
|
|
|
write_mask, blend, culling_test, depth_test, stencil_test, stencil_op, provoking_vert);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-08-17 18:11:09 +02:00
|
|
|
if (state & DRW_STATE_SHADOW_OFFSET) {
|
|
|
|
|
GPU_shadow_offset(true);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2020-08-17 18:11:09 +02:00
|
|
|
else {
|
|
|
|
|
GPU_shadow_offset(false);
|
2019-08-22 16:04:25 +02:00
|
|
|
}
|
|
|
|
|
|
2021-07-03 23:08:40 +10:00
|
|
|
/* TODO: this should be part of shader state. */
|
2020-08-17 18:11:09 +02:00
|
|
|
if (state & DRW_STATE_CLIP_PLANES) {
|
|
|
|
|
GPU_clip_distances(DST.view_active->clip_planes_len);
|
2019-12-02 01:40:58 +01:00
|
|
|
}
|
2020-08-17 18:11:09 +02:00
|
|
|
else {
|
|
|
|
|
GPU_clip_distances(0);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
|
2020-08-17 18:11:09 +02:00
|
|
|
if (state & DRW_STATE_IN_FRONT_SELECT) {
|
|
|
|
|
/* XXX `GPU_depth_range` is not a perfect solution
|
|
|
|
|
* since very distant geometries can still be occluded.
|
|
|
|
|
* Also the depth test precision of these geometries is impaired.
|
|
|
|
|
* However, it solves the selection for the vast majority of cases. */
|
|
|
|
|
GPU_depth_range(0.0f, 0.01f);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
GPU_depth_range(0.0f, 1.0f);
|
2019-05-28 17:18:39 +02:00
|
|
|
}
|
|
|
|
|
|
2020-08-17 18:11:09 +02:00
|
|
|
if (state & DRW_STATE_PROGRAM_POINT_SIZE) {
|
|
|
|
|
GPU_program_point_size(true);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
GPU_program_point_size(false);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
|
2020-08-17 18:11:09 +02:00
|
|
|
if (state & DRW_STATE_FIRST_VERTEX_CONVENTION) {
|
|
|
|
|
GPU_provoking_vertex(GPU_VERTEX_FIRST);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
GPU_provoking_vertex(GPU_VERTEX_LAST);
|
|
|
|
|
}
|
2018-02-28 01:16:23 +01:00
|
|
|
|
|
|
|
|
DST.state = state;
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-09 16:27:24 +01:00
|
|
|
static void drw_stencil_state_set(uint write_mask, uint reference, uint compare_mask)
|
2018-02-28 01:16:23 +01:00
|
|
|
{
|
2020-03-09 16:27:24 +01:00
|
|
|
/* Reminders:
|
|
|
|
|
* - (compare_mask & reference) is what is tested against (compare_mask & stencil_value)
|
|
|
|
|
* stencil_value being the value stored in the stencil buffer.
|
2020-03-11 21:39:56 +11:00
|
|
|
* - (write-mask & reference) is what gets written if the test condition is fulfilled.
|
2021-01-04 12:00:18 +11:00
|
|
|
*/
|
2020-08-17 18:11:09 +02:00
|
|
|
GPU_stencil_write_mask_set(write_mask);
|
|
|
|
|
GPU_stencil_reference_set(reference);
|
|
|
|
|
GPU_stencil_compare_mask_set(compare_mask);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void DRW_state_reset_ex(DRWState state)
|
|
|
|
|
{
|
|
|
|
|
DST.state = ~state;
|
|
|
|
|
drw_state_set(state);
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-17 15:18:21 +02:00
|
|
|
static void drw_state_validate(void)
|
|
|
|
|
{
|
|
|
|
|
/* Cannot write to stencil buffer without stencil test. */
|
2021-08-05 16:48:29 +10:00
|
|
|
if (DST.state & DRW_STATE_WRITE_STENCIL_ENABLED) {
|
2019-06-17 15:18:21 +02:00
|
|
|
BLI_assert(DST.state & DRW_STATE_STENCIL_TEST_ENABLED);
|
|
|
|
|
}
|
|
|
|
|
/* Cannot write to depth buffer without depth test. */
|
2021-08-05 16:48:29 +10:00
|
|
|
if (DST.state & DRW_STATE_WRITE_DEPTH) {
|
2019-06-17 15:18:21 +02:00
|
|
|
BLI_assert(DST.state & DRW_STATE_DEPTH_TEST_ENABLED);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-01 17:14:35 +11:00
|
|
|
void DRW_state_lock(DRWState state)
|
|
|
|
|
{
|
|
|
|
|
DST.state_lock = state;
|
2020-09-16 01:32:04 +02:00
|
|
|
|
|
|
|
|
/* We must get the current state to avoid overriding it. */
|
|
|
|
|
/* Not complete, but that just what we need for now. */
|
|
|
|
|
if (state & DRW_STATE_WRITE_DEPTH) {
|
|
|
|
|
SET_FLAG_FROM_TEST(DST.state, GPU_depth_mask_get(), DRW_STATE_WRITE_DEPTH);
|
|
|
|
|
}
|
|
|
|
|
if (state & DRW_STATE_DEPTH_TEST_ENABLED) {
|
|
|
|
|
DST.state &= ~DRW_STATE_DEPTH_TEST_ENABLED;
|
|
|
|
|
|
|
|
|
|
switch (GPU_depth_test_get()) {
|
|
|
|
|
case GPU_DEPTH_ALWAYS:
|
|
|
|
|
DST.state |= DRW_STATE_DEPTH_ALWAYS;
|
|
|
|
|
break;
|
|
|
|
|
case GPU_DEPTH_LESS:
|
|
|
|
|
DST.state |= DRW_STATE_DEPTH_LESS;
|
|
|
|
|
break;
|
|
|
|
|
case GPU_DEPTH_LESS_EQUAL:
|
|
|
|
|
DST.state |= DRW_STATE_DEPTH_LESS_EQUAL;
|
|
|
|
|
break;
|
|
|
|
|
case GPU_DEPTH_EQUAL:
|
|
|
|
|
DST.state |= DRW_STATE_DEPTH_EQUAL;
|
|
|
|
|
break;
|
|
|
|
|
case GPU_DEPTH_GREATER:
|
|
|
|
|
DST.state |= DRW_STATE_DEPTH_GREATER;
|
|
|
|
|
break;
|
|
|
|
|
case GPU_DEPTH_GREATER_EQUAL:
|
|
|
|
|
DST.state |= DRW_STATE_DEPTH_GREATER_EQUAL;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-03-01 17:14:35 +11:00
|
|
|
}
|
|
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
void DRW_state_reset(void)
|
|
|
|
|
{
|
2018-05-07 18:29:37 +02:00
|
|
|
DRW_state_reset_ex(DRW_STATE_DEFAULT);
|
|
|
|
|
|
2020-06-04 21:27:34 +02:00
|
|
|
GPU_texture_unbind_all();
|
2022-08-01 17:58:14 +02:00
|
|
|
GPU_texture_image_unbind_all();
|
2020-08-20 23:09:37 +02:00
|
|
|
GPU_uniformbuf_unbind_all();
|
2022-02-08 23:19:31 +01:00
|
|
|
GPU_storagebuf_unbind_all();
|
2020-06-04 21:27:34 +02:00
|
|
|
|
2019-12-02 14:35:43 +01:00
|
|
|
/* Should stay constant during the whole rendering. */
|
2019-05-17 14:04:30 +02:00
|
|
|
GPU_point_size(5);
|
2019-12-02 14:35:43 +01:00
|
|
|
GPU_line_smooth(false);
|
2021-01-21 10:58:33 +11:00
|
|
|
/* Bypass #U.pixelsize factor by using a factor of 0.0f. Will be clamped to 1.0f. */
|
2020-08-19 17:41:23 +02:00
|
|
|
GPU_line_width(0.0f);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/** \} */
|
|
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
2019-05-20 18:01:42 +02:00
|
|
|
/** \name Culling (DRW_culling)
|
2018-03-01 03:52:54 +01:00
|
|
|
* \{ */
|
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
static bool draw_call_is_culled(const DRWResourceHandle *handle, DRWView *view)
|
2018-04-02 12:21:09 -03:00
|
|
|
{
|
2019-05-31 01:45:41 +02:00
|
|
|
DRWCullingState *culling = DRW_memblock_elem_from_handle(DST.vmempool->cullstates, handle);
|
|
|
|
|
return (culling->mask & view->culling_mask) != 0;
|
2018-04-02 12:21:09 -03:00
|
|
|
}
|
|
|
|
|
|
2022-01-27 14:59:09 +01:00
|
|
|
void DRW_view_set_active(const DRWView *view)
|
2018-03-01 03:52:54 +01:00
|
|
|
{
|
2022-01-27 15:25:01 +01:00
|
|
|
DST.view_active = (view != NULL) ? ((DRWView *)view) : DST.view_default;
|
2018-03-01 03:52:54 +01:00
|
|
|
}
|
|
|
|
|
|
Cycles: merge of cycles-x branch, a major update to the renderer
This includes much improved GPU rendering performance, viewport interactivity,
new shadow catcher, revamped sampling settings, subsurface scattering anisotropy,
new GPU volume sampling, improved PMJ sampling pattern, and more.
Some features have also been removed or changed, breaking backwards compatibility.
Including the removal of the OpenCL backend, for which alternatives are under
development.
Release notes and code docs:
https://wiki.blender.org/wiki/Reference/Release_Notes/3.0/Cycles
https://wiki.blender.org/wiki/Source/Render/Cycles
Credits:
* Sergey Sharybin
* Brecht Van Lommel
* Patrick Mours (OptiX backend)
* Christophe Hery (subsurface scattering anisotropy)
* William Leeson (PMJ sampling pattern)
* Alaska (various fixes and tweaks)
* Thomas Dinges (various fixes)
For the full commit history, see the cycles-x branch. This squashes together
all the changes since intermediate changes would often fail building or tests.
Ref T87839, T87837, T87836
Fixes T90734, T89353, T80267, T80267, T77185, T69800
2021-09-20 17:59:20 +02:00
|
|
|
const DRWView *DRW_view_get_active(void)
|
|
|
|
|
{
|
|
|
|
|
return DST.view_active;
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-01 03:52:54 +01:00
|
|
|
/* Return True if the given BoundSphere intersect the current view frustum */
|
2019-05-20 18:01:42 +02:00
|
|
|
static bool draw_culling_sphere_test(const BoundSphere *frustum_bsphere,
|
|
|
|
|
const float (*frustum_planes)[4],
|
|
|
|
|
const BoundSphere *bsphere)
|
2018-03-01 03:52:54 +01:00
|
|
|
{
|
|
|
|
|
/* Bypass test if radius is negative. */
|
2019-01-25 07:10:13 +11:00
|
|
|
if (bsphere->radius < 0.0f) {
|
2018-03-01 03:52:54 +01:00
|
|
|
return true;
|
2019-01-25 07:10:13 +11:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-03-01 03:52:54 +01:00
|
|
|
/* Do a rough test first: Sphere VS Sphere intersect. */
|
2019-05-04 01:41:39 +02:00
|
|
|
float center_dist_sq = len_squared_v3v3(bsphere->center, frustum_bsphere->center);
|
|
|
|
|
float radius_sum = bsphere->radius + frustum_bsphere->radius;
|
2020-03-06 17:18:10 +01:00
|
|
|
if (center_dist_sq > square_f(radius_sum)) {
|
2018-03-01 03:52:54 +01:00
|
|
|
return false;
|
2019-01-25 07:10:13 +11:00
|
|
|
}
|
2021-07-03 23:08:40 +10:00
|
|
|
/* TODO: we could test against the inscribed sphere of the frustum to early out positively. */
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-03-01 03:52:54 +01:00
|
|
|
/* Test against the 6 frustum planes. */
|
2021-07-03 23:08:40 +10:00
|
|
|
/* TODO: order planes with sides first then far then near clip. Should be better culling
|
2019-05-17 14:04:30 +02:00
|
|
|
* heuristic when sculpting. */
|
2018-03-01 03:52:54 +01:00
|
|
|
for (int p = 0; p < 6; p++) {
|
2019-05-20 18:01:42 +02:00
|
|
|
float dist = plane_point_side_v3(frustum_planes[p], bsphere->center);
|
2018-03-01 03:52:54 +01:00
|
|
|
if (dist < -bsphere->radius) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-20 18:01:42 +02:00
|
|
|
static bool draw_culling_box_test(const float (*frustum_planes)[4], const BoundBox *bbox)
|
2018-03-08 17:54:14 +01:00
|
|
|
{
|
|
|
|
|
/* 6 view frustum planes */
|
|
|
|
|
for (int p = 0; p < 6; p++) {
|
|
|
|
|
/* 8 box vertices. */
|
|
|
|
|
for (int v = 0; v < 8; v++) {
|
2019-05-20 18:01:42 +02:00
|
|
|
float dist = plane_point_side_v3(frustum_planes[p], bbox->vec[v]);
|
2018-03-08 17:54:14 +01:00
|
|
|
if (dist > 0.0f) {
|
|
|
|
|
/* At least one point in front of this plane.
|
|
|
|
|
* Go to next plane. */
|
|
|
|
|
break;
|
|
|
|
|
}
|
2020-08-07 11:49:59 +02:00
|
|
|
if (v == 7) {
|
2018-03-08 17:54:14 +01:00
|
|
|
/* 8 points behind this plane. */
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-20 18:01:42 +02:00
|
|
|
static bool draw_culling_plane_test(const BoundBox *corners, const float plane[4])
|
2018-05-03 15:43:42 +02:00
|
|
|
{
|
|
|
|
|
/* Test against the 8 frustum corners. */
|
|
|
|
|
for (int c = 0; c < 8; c++) {
|
2019-05-20 18:01:42 +02:00
|
|
|
float dist = plane_point_side_v3(plane, corners->vec[c]);
|
2018-05-03 15:43:42 +02:00
|
|
|
if (dist < 0.0f) {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-21 16:55:17 +02:00
|
|
|
bool DRW_culling_sphere_test(const DRWView *view, const BoundSphere *bsphere)
|
2018-05-26 22:28:35 +02:00
|
|
|
{
|
2019-05-21 16:55:17 +02:00
|
|
|
view = view ? view : DST.view_default;
|
|
|
|
|
return draw_culling_sphere_test(&view->frustum_bsphere, view->frustum_planes, bsphere);
|
2018-05-26 22:28:35 +02:00
|
|
|
}
|
|
|
|
|
|
2019-05-21 16:55:17 +02:00
|
|
|
bool DRW_culling_box_test(const DRWView *view, const BoundBox *bbox)
|
2018-06-04 17:33:25 +02:00
|
|
|
{
|
2019-05-21 16:55:17 +02:00
|
|
|
view = view ? view : DST.view_default;
|
|
|
|
|
return draw_culling_box_test(view->frustum_planes, bbox);
|
2018-06-04 17:33:25 +02:00
|
|
|
}
|
|
|
|
|
|
2019-05-21 16:55:17 +02:00
|
|
|
bool DRW_culling_plane_test(const DRWView *view, const float plane[4])
|
2018-04-24 12:29:15 +02:00
|
|
|
{
|
2019-05-21 16:55:17 +02:00
|
|
|
view = view ? view : DST.view_default;
|
|
|
|
|
return draw_culling_plane_test(&view->frustum_corners, plane);
|
2019-05-20 18:01:42 +02:00
|
|
|
}
|
2018-04-24 12:29:15 +02:00
|
|
|
|
2019-08-15 10:17:41 -03:00
|
|
|
bool DRW_culling_min_max_test(const DRWView *view, float obmat[4][4], float min[3], float max[3])
|
|
|
|
|
{
|
|
|
|
|
view = view ? view : DST.view_default;
|
|
|
|
|
float tobmat[4][4];
|
|
|
|
|
transpose_m4_m4(tobmat, obmat);
|
|
|
|
|
for (int i = 6; i--;) {
|
|
|
|
|
float frustum_plane_local[4], bb_near[3], bb_far[3];
|
|
|
|
|
mul_v4_m4v4(frustum_plane_local, tobmat, view->frustum_planes[i]);
|
|
|
|
|
aabb_get_near_far_from_plane(frustum_plane_local, min, max, bb_near, bb_far);
|
|
|
|
|
|
|
|
|
|
if (plane_point_side_v3(frustum_plane_local, bb_far) < 0.0f) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-21 21:29:25 +02:00
|
|
|
void DRW_culling_frustum_corners_get(const DRWView *view, BoundBox *corners)
|
2019-05-20 18:01:42 +02:00
|
|
|
{
|
2019-05-21 21:29:25 +02:00
|
|
|
view = view ? view : DST.view_default;
|
|
|
|
|
*corners = view->frustum_corners;
|
2019-05-20 18:01:42 +02:00
|
|
|
}
|
2018-04-24 12:29:15 +02:00
|
|
|
|
2019-05-21 21:29:25 +02:00
|
|
|
void DRW_culling_frustum_planes_get(const DRWView *view, float planes[6][4])
|
2019-05-20 18:01:42 +02:00
|
|
|
{
|
2019-05-21 21:29:25 +02:00
|
|
|
view = view ? view : DST.view_default;
|
2020-08-08 13:29:21 +10:00
|
|
|
memcpy(planes, view->frustum_planes, sizeof(float[6][4]));
|
2018-04-24 12:29:15 +02:00
|
|
|
}
|
|
|
|
|
|
2019-05-20 18:01:42 +02:00
|
|
|
static void draw_compute_culling(DRWView *view)
|
2018-02-28 01:16:23 +01:00
|
|
|
{
|
2019-05-20 18:01:42 +02:00
|
|
|
view = view->parent ? view->parent : view;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-09-19 14:32:41 +10:00
|
|
|
/* TODO(fclem): multi-thread this. */
|
|
|
|
|
/* TODO(fclem): compute all dirty views at once. */
|
2019-05-20 18:01:42 +02:00
|
|
|
if (!view->is_dirty) {
|
2018-04-24 12:29:15 +02:00
|
|
|
return;
|
2018-05-30 19:45:03 +02:00
|
|
|
}
|
2019-05-11 14:04:18 +02:00
|
|
|
|
2019-05-20 18:01:42 +02:00
|
|
|
BLI_memblock_iter iter;
|
|
|
|
|
BLI_memblock_iternew(DST.vmempool->cullstates, &iter);
|
|
|
|
|
DRWCullingState *cull;
|
|
|
|
|
while ((cull = BLI_memblock_iterstep(&iter))) {
|
|
|
|
|
if (cull->bsphere.radius < 0.0) {
|
|
|
|
|
cull->mask = 0;
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
bool culled = !draw_culling_sphere_test(
|
|
|
|
|
&view->frustum_bsphere, view->frustum_planes, &cull->bsphere);
|
|
|
|
|
|
|
|
|
|
#ifdef DRW_DEBUG_CULLING
|
|
|
|
|
if (G.debug_value != 0) {
|
|
|
|
|
if (culled) {
|
|
|
|
|
DRW_debug_sphere(
|
|
|
|
|
cull->bsphere.center, cull->bsphere.radius, (const float[4]){1, 0, 0, 1});
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
DRW_debug_sphere(
|
|
|
|
|
cull->bsphere.center, cull->bsphere.radius, (const float[4]){0, 1, 0, 1});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
#endif
|
2018-02-28 01:16:23 +01:00
|
|
|
|
2019-05-20 18:01:42 +02:00
|
|
|
if (view->visibility_fn) {
|
|
|
|
|
culled = !view->visibility_fn(!culled, cull->user_data);
|
|
|
|
|
}
|
2019-05-07 17:21:26 +02:00
|
|
|
|
2019-05-20 18:01:42 +02:00
|
|
|
SET_FLAG_FROM_TEST(cull->mask, culled, view->culling_mask);
|
2019-05-07 17:21:26 +02:00
|
|
|
}
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
2019-05-07 17:21:26 +02:00
|
|
|
|
2019-05-20 18:01:42 +02:00
|
|
|
view->is_dirty = false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/** \} */
|
|
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Draw (DRW_draw)
|
|
|
|
|
* \{ */
|
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
BLI_INLINE void draw_legacy_matrix_update(DRWShadingGroup *shgroup,
|
|
|
|
|
DRWResourceHandle *handle,
|
|
|
|
|
float obmat_loc,
|
2020-06-03 10:58:05 +02:00
|
|
|
float obinv_loc)
|
2019-05-20 18:01:42 +02:00
|
|
|
{
|
2019-05-31 01:45:41 +02:00
|
|
|
/* Still supported for compatibility with gpu_shader_* but should be forbidden. */
|
|
|
|
|
DRWObjectMatrix *ob_mats = DRW_memblock_elem_from_handle(DST.vmempool->obmats, handle);
|
|
|
|
|
if (obmat_loc != -1) {
|
|
|
|
|
GPU_shader_uniform_vector(shgroup->shader, obmat_loc, 16, 1, (float *)ob_mats->model);
|
2019-08-22 14:26:09 +02:00
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
if (obinv_loc != -1) {
|
|
|
|
|
GPU_shader_uniform_vector(shgroup->shader, obinv_loc, 16, 1, (float *)ob_mats->modelinverse);
|
2019-05-20 18:01:42 +02:00
|
|
|
}
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
BLI_INLINE void draw_geometry_bind(DRWShadingGroup *shgroup, GPUBatch *geom)
|
|
|
|
|
{
|
|
|
|
|
DST.batch = geom;
|
|
|
|
|
|
2020-08-09 00:52:45 +02:00
|
|
|
GPU_batch_set_shader(geom, shgroup->shader);
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
|
|
2019-05-22 13:27:43 +02:00
|
|
|
BLI_INLINE void draw_geometry_execute(DRWShadingGroup *shgroup,
|
|
|
|
|
GPUBatch *geom,
|
2019-05-31 01:45:41 +02:00
|
|
|
int vert_first,
|
|
|
|
|
int vert_count,
|
|
|
|
|
int inst_first,
|
|
|
|
|
int inst_count,
|
|
|
|
|
int baseinst_loc)
|
2018-02-28 01:16:23 +01:00
|
|
|
{
|
2019-05-31 01:45:41 +02:00
|
|
|
/* inst_count can be -1. */
|
|
|
|
|
inst_count = max_ii(0, inst_count);
|
|
|
|
|
|
|
|
|
|
if (baseinst_loc != -1) {
|
|
|
|
|
/* Fallback when ARB_shader_draw_parameters is not supported. */
|
|
|
|
|
GPU_shader_uniform_vector_int(shgroup->shader, baseinst_loc, 1, 1, (int *)&inst_first);
|
|
|
|
|
/* Avoids VAO reconfiguration on older hardware. (see GPU_batch_draw_advanced) */
|
|
|
|
|
inst_first = 0;
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-22 11:31:49 +02:00
|
|
|
/* bind vertex array */
|
2019-05-22 13:27:43 +02:00
|
|
|
if (DST.batch != geom) {
|
2019-05-31 01:45:41 +02:00
|
|
|
draw_geometry_bind(shgroup, geom);
|
2019-05-22 13:27:43 +02:00
|
|
|
}
|
2019-05-22 11:31:49 +02:00
|
|
|
|
|
|
|
|
GPU_batch_draw_advanced(geom, vert_first, vert_count, inst_first, inst_count);
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
BLI_INLINE void draw_indirect_call(DRWShadingGroup *shgroup, DRWCommandsState *state)
|
|
|
|
|
{
|
|
|
|
|
if (state->inst_count == 0) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
if (state->baseinst_loc == -1) {
|
|
|
|
|
/* bind vertex array */
|
|
|
|
|
if (DST.batch != state->batch) {
|
|
|
|
|
GPU_draw_list_submit(DST.draw_list);
|
|
|
|
|
draw_geometry_bind(shgroup, state->batch);
|
|
|
|
|
}
|
2020-08-08 15:24:52 +02:00
|
|
|
GPU_draw_list_append(DST.draw_list, state->batch, state->base_inst, state->inst_count);
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
|
/* Fallback when unsupported */
|
|
|
|
|
else {
|
2020-08-08 15:24:52 +02:00
|
|
|
draw_geometry_execute(
|
|
|
|
|
shgroup, state->batch, 0, 0, state->base_inst, state->inst_count, state->baseinst_loc);
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
static void draw_update_uniforms(DRWShadingGroup *shgroup,
|
|
|
|
|
DRWCommandsState *state,
|
|
|
|
|
bool *use_tfeedback)
|
2019-05-19 16:54:01 +02:00
|
|
|
{
|
2022-03-30 20:24:39 +02:00
|
|
|
#define MAX_UNIFORM_STACK_SIZE 64
|
|
|
|
|
|
|
|
|
|
/* Uniform array elements stored as separate entries. We need to batch these together */
|
2022-03-31 13:36:04 +02:00
|
|
|
int array_uniform_loc = -1;
|
|
|
|
|
int array_index = 0;
|
|
|
|
|
float mat4_stack[4 * 4];
|
2022-03-30 20:24:39 +02:00
|
|
|
|
2022-03-31 13:36:04 +02:00
|
|
|
/* Loop through uniforms in reverse order. */
|
|
|
|
|
for (DRWUniformChunk *unichunk = shgroup->uniforms; unichunk; unichunk = unichunk->next) {
|
|
|
|
|
DRWUniform *uni = unichunk->uniforms + unichunk->uniform_used - 1;
|
2022-03-30 20:24:39 +02:00
|
|
|
|
2022-03-31 13:36:04 +02:00
|
|
|
for (int i = 0; i < unichunk->uniform_used; i++, uni--) {
|
2022-03-30 20:24:39 +02:00
|
|
|
/* For uniform array copies, copy per-array-element data into local buffer before upload. */
|
2022-03-31 13:36:04 +02:00
|
|
|
if (uni->arraysize > 1 && uni->type == DRW_UNIFORM_FLOAT_COPY) {
|
|
|
|
|
/* Only written for mat4 copy for now and is not meant to become generalized. */
|
|
|
|
|
/* TODO(@fclem): Use UBOs/SSBOs instead of inline mat4 copies. */
|
|
|
|
|
BLI_assert(uni->arraysize == 4 && uni->length == 4);
|
2022-03-30 20:24:39 +02:00
|
|
|
/* Begin copying uniform array. */
|
2022-03-31 13:36:04 +02:00
|
|
|
if (array_uniform_loc == -1) {
|
|
|
|
|
array_uniform_loc = uni->location;
|
|
|
|
|
array_index = uni->arraysize * uni->length;
|
2022-03-30 20:24:39 +02:00
|
|
|
}
|
|
|
|
|
/* Debug check same array loc. */
|
2022-03-31 13:36:04 +02:00
|
|
|
BLI_assert(array_uniform_loc > -1 && array_uniform_loc == uni->location);
|
2022-03-30 20:24:39 +02:00
|
|
|
/* Copy array element data to local buffer. */
|
2022-03-31 13:36:04 +02:00
|
|
|
array_index -= uni->length;
|
|
|
|
|
memcpy(&mat4_stack[array_index], uni->fvalue, sizeof(float) * uni->length);
|
2022-03-30 20:24:39 +02:00
|
|
|
/* Flush array data to shader. */
|
2022-03-31 13:36:04 +02:00
|
|
|
if (array_index <= 0) {
|
2022-03-31 13:41:15 +02:00
|
|
|
GPU_shader_uniform_vector(shgroup->shader, uni->location, 16, 1, mat4_stack);
|
2022-03-31 13:36:04 +02:00
|
|
|
array_uniform_loc = -1;
|
2022-03-30 20:24:39 +02:00
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Handle standard cases. */
|
2019-05-31 01:45:41 +02:00
|
|
|
switch (uni->type) {
|
|
|
|
|
case DRW_UNIFORM_INT_COPY:
|
2022-03-30 20:24:39 +02:00
|
|
|
BLI_assert(uni->arraysize == 1);
|
|
|
|
|
if (uni->arraysize == 1) {
|
|
|
|
|
GPU_shader_uniform_vector_int(
|
|
|
|
|
shgroup->shader, uni->location, uni->length, uni->arraysize, uni->ivalue);
|
|
|
|
|
}
|
2020-06-02 18:14:28 +02:00
|
|
|
break;
|
2019-05-31 01:45:41 +02:00
|
|
|
case DRW_UNIFORM_INT:
|
|
|
|
|
GPU_shader_uniform_vector_int(
|
2020-06-02 18:14:28 +02:00
|
|
|
shgroup->shader, uni->location, uni->length, uni->arraysize, uni->pvalue);
|
2019-05-31 01:45:41 +02:00
|
|
|
break;
|
|
|
|
|
case DRW_UNIFORM_FLOAT_COPY:
|
2022-03-30 20:24:39 +02:00
|
|
|
BLI_assert(uni->arraysize == 1);
|
|
|
|
|
if (uni->arraysize == 1) {
|
|
|
|
|
GPU_shader_uniform_vector(
|
|
|
|
|
shgroup->shader, uni->location, uni->length, uni->arraysize, uni->fvalue);
|
|
|
|
|
}
|
2020-06-02 18:14:28 +02:00
|
|
|
break;
|
2019-05-31 01:45:41 +02:00
|
|
|
case DRW_UNIFORM_FLOAT:
|
|
|
|
|
GPU_shader_uniform_vector(
|
2020-06-02 18:14:28 +02:00
|
|
|
shgroup->shader, uni->location, uni->length, uni->arraysize, uni->pvalue);
|
2019-05-31 01:45:41 +02:00
|
|
|
break;
|
|
|
|
|
case DRW_UNIFORM_TEXTURE:
|
2020-06-03 13:03:36 +02:00
|
|
|
GPU_texture_bind_ex(uni->texture, uni->sampler_state, uni->location, false);
|
2019-05-31 01:45:41 +02:00
|
|
|
break;
|
|
|
|
|
case DRW_UNIFORM_TEXTURE_REF:
|
2020-06-03 13:03:36 +02:00
|
|
|
GPU_texture_bind_ex(*uni->texture_ref, uni->sampler_state, uni->location, false);
|
2020-06-02 16:58:07 +02:00
|
|
|
break;
|
2020-09-12 06:10:11 +02:00
|
|
|
case DRW_UNIFORM_IMAGE:
|
|
|
|
|
GPU_texture_image_bind(uni->texture, uni->location);
|
|
|
|
|
break;
|
|
|
|
|
case DRW_UNIFORM_IMAGE_REF:
|
|
|
|
|
GPU_texture_image_bind(*uni->texture_ref, uni->location);
|
|
|
|
|
break;
|
2019-05-31 01:45:41 +02:00
|
|
|
case DRW_UNIFORM_BLOCK:
|
2020-08-20 23:09:37 +02:00
|
|
|
GPU_uniformbuf_bind(uni->block, uni->location);
|
2019-05-31 01:45:41 +02:00
|
|
|
break;
|
2020-06-02 16:58:07 +02:00
|
|
|
case DRW_UNIFORM_BLOCK_REF:
|
2020-08-20 23:09:37 +02:00
|
|
|
GPU_uniformbuf_bind(*uni->block_ref, uni->location);
|
2020-06-02 16:58:07 +02:00
|
|
|
break;
|
2022-02-08 23:19:31 +01:00
|
|
|
case DRW_UNIFORM_STORAGE_BLOCK:
|
|
|
|
|
GPU_storagebuf_bind(uni->ssbo, uni->location);
|
|
|
|
|
break;
|
|
|
|
|
case DRW_UNIFORM_STORAGE_BLOCK_REF:
|
|
|
|
|
GPU_storagebuf_bind(*uni->ssbo_ref, uni->location);
|
|
|
|
|
break;
|
2019-05-31 01:45:41 +02:00
|
|
|
case DRW_UNIFORM_BLOCK_OBMATS:
|
|
|
|
|
state->obmats_loc = uni->location;
|
2020-08-20 23:09:37 +02:00
|
|
|
GPU_uniformbuf_bind(DST.vmempool->matrices_ubo[0], uni->location);
|
2019-05-31 01:45:41 +02:00
|
|
|
break;
|
|
|
|
|
case DRW_UNIFORM_BLOCK_OBINFOS:
|
|
|
|
|
state->obinfos_loc = uni->location;
|
2020-08-20 23:09:37 +02:00
|
|
|
GPU_uniformbuf_bind(DST.vmempool->obinfos_ubo[0], uni->location);
|
2019-05-31 01:45:41 +02:00
|
|
|
break;
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
case DRW_UNIFORM_BLOCK_OBATTRS:
|
|
|
|
|
state->obattrs_loc = uni->location;
|
|
|
|
|
state->obattrs_ubo = DRW_uniform_attrs_pool_find_ubo(DST.vmempool->obattrs_ubo_pool,
|
|
|
|
|
uni->uniform_attrs);
|
|
|
|
|
DRW_sparse_uniform_buffer_bind(state->obattrs_ubo, 0, uni->location);
|
|
|
|
|
break;
|
Attribute Node: support accessing attributes of View Layer and Scene.
The attribute node already allows accessing attributes associated
with objects and meshes, which allows changing the behavior of the
same material between different objects or instances. The same idea
can be extended to an even more global level of layers and scenes.
Currently view layers provide an option to replace all materials
with a different one. However, since the same material will be applied
to all objects in the layer, varying the behavior between layers while
preserving distinct materials requires duplicating objects.
Providing access to properties of layers and scenes via the attribute
node enables making materials with built-in switches or settings that
can be controlled globally at the view layer level. This is probably
most useful for complex NPR shading and compositing. Like with objects,
the node can also access built-in scene properties, like render resolution
or FOV of the active camera. Lookup is also attempted in World, similar
to how the Object mode checks the Mesh datablock.
In Cycles this mode is implemented by replacing the attribute node with
the attribute value during sync, allowing constant folding to take the
values into account. This means however that materials that use this
feature have to be re-synced upon any changes to scene, world or camera.
The Eevee version uses a new uniform buffer containing a sorted array
mapping name hashes to values, with binary search lookup. The array
is limited to 512 entries, which is effectively limitless even
considering it is shared by all materials in the scene; it is also
just 16KB of memory so no point trying to optimize further.
The buffer has to be rebuilt when new attributes are detected in a
material, so the draw engine keeps a table of recently seen attribute
names to minimize the chance of extra rebuilds mid-draw.
Differential Revision: https://developer.blender.org/D15941
2022-09-12 00:30:58 +03:00
|
|
|
case DRW_UNIFORM_BLOCK_VLATTRS:
|
|
|
|
|
state->vlattrs_loc = uni->location;
|
|
|
|
|
GPU_uniformbuf_bind(drw_ensure_layer_attribute_buffer(), uni->location);
|
|
|
|
|
break;
|
2019-05-31 01:45:41 +02:00
|
|
|
case DRW_UNIFORM_RESOURCE_CHUNK:
|
|
|
|
|
state->chunkid_loc = uni->location;
|
|
|
|
|
GPU_shader_uniform_int(shgroup->shader, uni->location, 0);
|
|
|
|
|
break;
|
2020-03-09 16:27:24 +01:00
|
|
|
case DRW_UNIFORM_RESOURCE_ID:
|
|
|
|
|
state->resourceid_loc = uni->location;
|
|
|
|
|
break;
|
2019-05-31 01:45:41 +02:00
|
|
|
case DRW_UNIFORM_TFEEDBACK_TARGET:
|
2020-06-02 18:14:28 +02:00
|
|
|
BLI_assert(uni->pvalue && (*use_tfeedback == false));
|
2020-08-14 15:20:35 +02:00
|
|
|
*use_tfeedback = GPU_shader_transform_feedback_enable(shgroup->shader,
|
|
|
|
|
((GPUVertBuf *)uni->pvalue));
|
2019-05-31 01:45:41 +02:00
|
|
|
break;
|
2022-05-15 15:20:27 +02:00
|
|
|
case DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE_REF:
|
|
|
|
|
GPU_vertbuf_bind_as_texture(*uni->vertbuf_ref, uni->location);
|
|
|
|
|
break;
|
|
|
|
|
case DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE:
|
|
|
|
|
GPU_vertbuf_bind_as_texture(uni->vertbuf, uni->location);
|
|
|
|
|
break;
|
2022-02-04 18:32:40 +01:00
|
|
|
case DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE_REF:
|
|
|
|
|
GPU_vertbuf_bind_as_ssbo(*uni->vertbuf_ref, uni->location);
|
|
|
|
|
break;
|
2021-05-28 08:16:26 +02:00
|
|
|
case DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE:
|
2022-02-04 18:32:40 +01:00
|
|
|
GPU_vertbuf_bind_as_ssbo(uni->vertbuf, uni->location);
|
2021-05-28 08:16:26 +02:00
|
|
|
break;
|
2019-05-31 01:45:41 +02:00
|
|
|
/* Legacy/Fallback support. */
|
|
|
|
|
case DRW_UNIFORM_BASE_INSTANCE:
|
|
|
|
|
state->baseinst_loc = uni->location;
|
|
|
|
|
break;
|
|
|
|
|
case DRW_UNIFORM_MODEL_MATRIX:
|
|
|
|
|
state->obmat_loc = uni->location;
|
|
|
|
|
break;
|
|
|
|
|
case DRW_UNIFORM_MODEL_MATRIX_INVERSE:
|
|
|
|
|
state->obinv_loc = uni->location;
|
|
|
|
|
break;
|
2019-05-19 16:54:01 +02:00
|
|
|
}
|
2019-05-13 17:56:20 +02:00
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
2022-03-30 20:24:39 +02:00
|
|
|
/* Ensure uniform arrays copied. */
|
2022-03-31 13:36:04 +02:00
|
|
|
BLI_assert(array_index == 0);
|
|
|
|
|
BLI_assert(array_uniform_loc == -1);
|
|
|
|
|
UNUSED_VARS_NDEBUG(array_uniform_loc);
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
BLI_INLINE void draw_select_buffer(DRWShadingGroup *shgroup,
|
|
|
|
|
DRWCommandsState *state,
|
|
|
|
|
GPUBatch *batch,
|
|
|
|
|
const DRWResourceHandle *handle)
|
2019-05-31 01:45:41 +02:00
|
|
|
{
|
2019-12-02 01:40:58 +01:00
|
|
|
const bool is_instancing = (batch->inst[0] != NULL);
|
2019-05-31 01:45:41 +02:00
|
|
|
int start = 0;
|
|
|
|
|
int count = 1;
|
2020-09-06 16:40:07 +02:00
|
|
|
int tot = is_instancing ? GPU_vertbuf_get_vertex_len(batch->inst[0]) :
|
|
|
|
|
GPU_vertbuf_get_vertex_len(batch->verts[0]);
|
2021-07-07 12:55:19 +10:00
|
|
|
/* HACK: get VBO data without actually drawing. */
|
2020-09-06 16:40:07 +02:00
|
|
|
int *select_id = (void *)GPU_vertbuf_get_data(state->select_buf);
|
2019-05-31 01:45:41 +02:00
|
|
|
|
|
|
|
|
/* Batching */
|
|
|
|
|
if (!is_instancing) {
|
|
|
|
|
/* FIXME: Meh a bit nasty. */
|
2020-08-08 23:37:43 +02:00
|
|
|
if (batch->prim_type == GPU_PRIM_TRIS) {
|
2019-05-31 01:45:41 +02:00
|
|
|
count = 3;
|
|
|
|
|
}
|
2020-08-08 23:37:43 +02:00
|
|
|
else if (batch->prim_type == GPU_PRIM_LINES) {
|
2019-05-31 01:45:41 +02:00
|
|
|
count = 2;
|
|
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
|
|
|
|
|
while (start < tot) {
|
|
|
|
|
GPU_select_load_id(select_id[start]);
|
|
|
|
|
if (is_instancing) {
|
|
|
|
|
draw_geometry_execute(shgroup, batch, 0, 0, start, count, state->baseinst_loc);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
draw_geometry_execute(
|
|
|
|
|
shgroup, batch, start, count, DRW_handle_id_get(handle), 0, state->baseinst_loc);
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
start += count;
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
typedef struct DRWCommandIterator {
|
|
|
|
|
int cmd_index;
|
|
|
|
|
DRWCommandChunk *curr_chunk;
|
|
|
|
|
} DRWCommandIterator;
|
|
|
|
|
|
|
|
|
|
static void draw_command_iter_begin(DRWCommandIterator *iter, DRWShadingGroup *shgroup)
|
|
|
|
|
{
|
|
|
|
|
iter->curr_chunk = shgroup->cmd.first;
|
|
|
|
|
iter->cmd_index = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static DRWCommand *draw_command_iter_step(DRWCommandIterator *iter, eDRWCommandType *cmd_type)
|
|
|
|
|
{
|
|
|
|
|
if (iter->curr_chunk) {
|
|
|
|
|
if (iter->cmd_index == iter->curr_chunk->command_len) {
|
|
|
|
|
iter->curr_chunk = iter->curr_chunk->next;
|
|
|
|
|
iter->cmd_index = 0;
|
|
|
|
|
}
|
|
|
|
|
if (iter->curr_chunk) {
|
|
|
|
|
*cmd_type = command_type_get(iter->curr_chunk->command_type, iter->cmd_index);
|
|
|
|
|
if (iter->cmd_index < iter->curr_chunk->command_used) {
|
|
|
|
|
return iter->curr_chunk->commands + iter->cmd_index++;
|
2019-09-13 23:02:45 +02:00
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void draw_call_resource_bind(DRWCommandsState *state, const DRWResourceHandle *handle)
|
|
|
|
|
{
|
|
|
|
|
/* Front face is not a resource but it is inside the resource handle. */
|
|
|
|
|
bool neg_scale = DRW_handle_negative_scale_get(handle);
|
|
|
|
|
if (neg_scale != state->neg_scale) {
|
|
|
|
|
state->neg_scale = neg_scale;
|
2020-08-19 17:41:23 +02:00
|
|
|
GPU_front_facing(neg_scale != DST.view_active->is_inverted);
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int chunk = DRW_handle_chunk_get(handle);
|
|
|
|
|
if (state->resource_chunk != chunk) {
|
|
|
|
|
if (state->chunkid_loc != -1) {
|
2020-08-19 11:53:54 +02:00
|
|
|
GPU_shader_uniform_int(DST.shader, state->chunkid_loc, chunk);
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
|
if (state->obmats_loc != -1) {
|
2020-08-20 23:09:37 +02:00
|
|
|
GPU_uniformbuf_unbind(DST.vmempool->matrices_ubo[state->resource_chunk]);
|
|
|
|
|
GPU_uniformbuf_bind(DST.vmempool->matrices_ubo[chunk], state->obmats_loc);
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
|
if (state->obinfos_loc != -1) {
|
2020-08-20 23:09:37 +02:00
|
|
|
GPU_uniformbuf_unbind(DST.vmempool->obinfos_ubo[state->resource_chunk]);
|
|
|
|
|
GPU_uniformbuf_bind(DST.vmempool->obinfos_ubo[chunk], state->obinfos_loc);
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
if (state->obattrs_loc != -1) {
|
|
|
|
|
DRW_sparse_uniform_buffer_unbind(state->obattrs_ubo, state->resource_chunk);
|
|
|
|
|
DRW_sparse_uniform_buffer_bind(state->obattrs_ubo, chunk, state->obattrs_loc);
|
|
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
state->resource_chunk = chunk;
|
|
|
|
|
}
|
2020-03-09 16:27:24 +01:00
|
|
|
|
|
|
|
|
if (state->resourceid_loc != -1) {
|
|
|
|
|
int id = DRW_handle_id_get(handle);
|
|
|
|
|
if (state->resource_id != id) {
|
2020-08-19 11:53:54 +02:00
|
|
|
GPU_shader_uniform_int(DST.shader, state->resourceid_loc, id);
|
2020-03-09 16:27:24 +01:00
|
|
|
state->resource_id = id;
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void draw_call_batching_flush(DRWShadingGroup *shgroup, DRWCommandsState *state)
|
|
|
|
|
{
|
|
|
|
|
draw_indirect_call(shgroup, state);
|
|
|
|
|
GPU_draw_list_submit(DST.draw_list);
|
|
|
|
|
|
|
|
|
|
state->batch = NULL;
|
|
|
|
|
state->inst_count = 0;
|
|
|
|
|
state->base_inst = -1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void draw_call_single_do(DRWShadingGroup *shgroup,
|
|
|
|
|
DRWCommandsState *state,
|
|
|
|
|
GPUBatch *batch,
|
|
|
|
|
DRWResourceHandle handle,
|
|
|
|
|
int vert_first,
|
|
|
|
|
int vert_count,
|
2020-03-09 16:27:24 +01:00
|
|
|
int inst_first,
|
2019-12-02 01:40:58 +01:00
|
|
|
int inst_count,
|
|
|
|
|
bool do_base_instance)
|
2019-05-31 01:45:41 +02:00
|
|
|
{
|
|
|
|
|
draw_call_batching_flush(shgroup, state);
|
|
|
|
|
|
|
|
|
|
draw_call_resource_bind(state, &handle);
|
|
|
|
|
|
2021-07-03 23:08:40 +10:00
|
|
|
/* TODO: This is Legacy. Need to be removed. */
|
2020-06-03 10:58:05 +02:00
|
|
|
if (state->obmats_loc == -1 && (state->obmat_loc != -1 || state->obinv_loc != -1)) {
|
|
|
|
|
draw_legacy_matrix_update(shgroup, &handle, state->obmat_loc, state->obinv_loc);
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (G.f & G_FLAG_PICKSEL) {
|
|
|
|
|
if (state->select_buf != NULL) {
|
|
|
|
|
draw_select_buffer(shgroup, state, batch, &handle);
|
|
|
|
|
return;
|
|
|
|
|
}
|
2020-08-07 11:49:59 +02:00
|
|
|
|
|
|
|
|
GPU_select_load_id(state->select_id);
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
draw_geometry_execute(shgroup,
|
|
|
|
|
batch,
|
|
|
|
|
vert_first,
|
|
|
|
|
vert_count,
|
2020-03-09 16:27:24 +01:00
|
|
|
do_base_instance ? DRW_handle_id_get(&handle) : inst_first,
|
2019-05-31 01:45:41 +02:00
|
|
|
inst_count,
|
|
|
|
|
state->baseinst_loc);
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-30 13:30:59 +02:00
|
|
|
/* Not to be mistaken with draw_indirect_call which does batch many drawcalls together. This one
|
|
|
|
|
* only execute an indirect drawcall with user indirect buffer. */
|
|
|
|
|
static void draw_call_indirect(DRWShadingGroup *shgroup,
|
|
|
|
|
DRWCommandsState *state,
|
|
|
|
|
GPUBatch *batch,
|
|
|
|
|
DRWResourceHandle handle,
|
|
|
|
|
GPUStorageBuf *indirect_buf)
|
|
|
|
|
{
|
|
|
|
|
draw_call_batching_flush(shgroup, state);
|
|
|
|
|
draw_call_resource_bind(state, &handle);
|
|
|
|
|
|
|
|
|
|
if (G.f & G_FLAG_PICKSEL) {
|
|
|
|
|
GPU_select_load_id(state->select_id);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
GPU_batch_set_shader(batch, shgroup->shader);
|
2022-08-30 21:57:39 +02:00
|
|
|
GPU_batch_draw_indirect(batch, indirect_buf, 0);
|
2022-07-30 13:30:59 +02:00
|
|
|
}
|
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
static void draw_call_batching_start(DRWCommandsState *state)
|
|
|
|
|
{
|
|
|
|
|
state->neg_scale = false;
|
|
|
|
|
state->resource_chunk = 0;
|
2020-03-09 16:27:24 +01:00
|
|
|
state->resource_id = -1;
|
2019-05-31 01:45:41 +02:00
|
|
|
state->base_inst = 0;
|
|
|
|
|
state->inst_count = 0;
|
|
|
|
|
state->batch = NULL;
|
|
|
|
|
|
|
|
|
|
state->select_id = -1;
|
|
|
|
|
state->select_buf = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* NOTE: Does not support batches with instancing VBOs. */
|
|
|
|
|
static void draw_call_batching_do(DRWShadingGroup *shgroup,
|
|
|
|
|
DRWCommandsState *state,
|
|
|
|
|
DRWCommandDraw *call)
|
|
|
|
|
{
|
2021-02-05 16:23:34 +11:00
|
|
|
/* If any condition requires to interrupt the merging. */
|
2019-05-31 01:45:41 +02:00
|
|
|
bool neg_scale = DRW_handle_negative_scale_get(&call->handle);
|
|
|
|
|
int chunk = DRW_handle_chunk_get(&call->handle);
|
|
|
|
|
int id = DRW_handle_id_get(&call->handle);
|
|
|
|
|
if ((state->neg_scale != neg_scale) || /* Need to change state. */
|
|
|
|
|
(state->resource_chunk != chunk) || /* Need to change UBOs. */
|
|
|
|
|
(state->batch != call->batch) /* Need to change VAO. */
|
|
|
|
|
) {
|
|
|
|
|
draw_call_batching_flush(shgroup, state);
|
|
|
|
|
|
|
|
|
|
state->batch = call->batch;
|
|
|
|
|
state->inst_count = 1;
|
|
|
|
|
state->base_inst = id;
|
|
|
|
|
|
|
|
|
|
draw_call_resource_bind(state, &call->handle);
|
|
|
|
|
}
|
|
|
|
|
/* Is the id consecutive? */
|
|
|
|
|
else if (id != state->base_inst + state->inst_count) {
|
|
|
|
|
/* We need to add a draw command for the pending instances. */
|
|
|
|
|
draw_indirect_call(shgroup, state);
|
|
|
|
|
state->inst_count = 1;
|
|
|
|
|
state->base_inst = id;
|
|
|
|
|
}
|
|
|
|
|
/* We avoid a drawcall by merging with the precedent
|
|
|
|
|
* drawcall using instancing. */
|
2019-05-13 17:56:20 +02:00
|
|
|
else {
|
2019-05-31 01:45:41 +02:00
|
|
|
state->inst_count++;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Flush remaining pending drawcalls. */
|
|
|
|
|
static void draw_call_batching_finish(DRWShadingGroup *shgroup, DRWCommandsState *state)
|
|
|
|
|
{
|
|
|
|
|
draw_call_batching_flush(shgroup, state);
|
|
|
|
|
|
|
|
|
|
/* Reset state */
|
|
|
|
|
if (state->neg_scale) {
|
2020-08-19 17:41:23 +02:00
|
|
|
GPU_front_facing(DST.view_active->is_inverted);
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
|
if (state->obmats_loc != -1) {
|
2020-08-20 23:09:37 +02:00
|
|
|
GPU_uniformbuf_unbind(DST.vmempool->matrices_ubo[state->resource_chunk]);
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
|
if (state->obinfos_loc != -1) {
|
2020-08-20 23:09:37 +02:00
|
|
|
GPU_uniformbuf_unbind(DST.vmempool->obinfos_ubo[state->resource_chunk]);
|
2019-05-13 17:56:20 +02:00
|
|
|
}
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
if (state->obattrs_loc != -1) {
|
|
|
|
|
DRW_sparse_uniform_buffer_unbind(state->obattrs_ubo, state->resource_chunk);
|
|
|
|
|
}
|
Attribute Node: support accessing attributes of View Layer and Scene.
The attribute node already allows accessing attributes associated
with objects and meshes, which allows changing the behavior of the
same material between different objects or instances. The same idea
can be extended to an even more global level of layers and scenes.
Currently view layers provide an option to replace all materials
with a different one. However, since the same material will be applied
to all objects in the layer, varying the behavior between layers while
preserving distinct materials requires duplicating objects.
Providing access to properties of layers and scenes via the attribute
node enables making materials with built-in switches or settings that
can be controlled globally at the view layer level. This is probably
most useful for complex NPR shading and compositing. Like with objects,
the node can also access built-in scene properties, like render resolution
or FOV of the active camera. Lookup is also attempted in World, similar
to how the Object mode checks the Mesh datablock.
In Cycles this mode is implemented by replacing the attribute node with
the attribute value during sync, allowing constant folding to take the
values into account. This means however that materials that use this
feature have to be re-synced upon any changes to scene, world or camera.
The Eevee version uses a new uniform buffer containing a sorted array
mapping name hashes to values, with binary search lookup. The array
is limited to 512 entries, which is effectively limitless even
considering it is shared by all materials in the scene; it is also
just 16KB of memory so no point trying to optimize further.
The buffer has to be rebuilt when new attributes are detected in a
material, so the draw engine keeps a table of recently seen attribute
names to minimize the chance of extra rebuilds mid-draw.
Differential Revision: https://developer.blender.org/D15941
2022-09-12 00:30:58 +03:00
|
|
|
if (state->vlattrs_loc != -1) {
|
|
|
|
|
GPU_uniformbuf_unbind(DST.vmempool->vlattrs_ubo);
|
|
|
|
|
}
|
2019-05-13 17:56:20 +02:00
|
|
|
}
|
|
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
|
|
|
|
|
{
|
|
|
|
|
BLI_assert(shgroup->shader);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
DRWCommandsState state = {
|
|
|
|
|
.obmats_loc = -1,
|
|
|
|
|
.obinfos_loc = -1,
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
.obattrs_loc = -1,
|
Attribute Node: support accessing attributes of View Layer and Scene.
The attribute node already allows accessing attributes associated
with objects and meshes, which allows changing the behavior of the
same material between different objects or instances. The same idea
can be extended to an even more global level of layers and scenes.
Currently view layers provide an option to replace all materials
with a different one. However, since the same material will be applied
to all objects in the layer, varying the behavior between layers while
preserving distinct materials requires duplicating objects.
Providing access to properties of layers and scenes via the attribute
node enables making materials with built-in switches or settings that
can be controlled globally at the view layer level. This is probably
most useful for complex NPR shading and compositing. Like with objects,
the node can also access built-in scene properties, like render resolution
or FOV of the active camera. Lookup is also attempted in World, similar
to how the Object mode checks the Mesh datablock.
In Cycles this mode is implemented by replacing the attribute node with
the attribute value during sync, allowing constant folding to take the
values into account. This means however that materials that use this
feature have to be re-synced upon any changes to scene, world or camera.
The Eevee version uses a new uniform buffer containing a sorted array
mapping name hashes to values, with binary search lookup. The array
is limited to 512 entries, which is effectively limitless even
considering it is shared by all materials in the scene; it is also
just 16KB of memory so no point trying to optimize further.
The buffer has to be rebuilt when new attributes are detected in a
material, so the draw engine keeps a table of recently seen attribute
names to minimize the chance of extra rebuilds mid-draw.
Differential Revision: https://developer.blender.org/D15941
2022-09-12 00:30:58 +03:00
|
|
|
.vlattrs_loc = -1,
|
2019-05-31 01:45:41 +02:00
|
|
|
.baseinst_loc = -1,
|
|
|
|
|
.chunkid_loc = -1,
|
2020-03-09 16:27:24 +01:00
|
|
|
.resourceid_loc = -1,
|
2019-05-31 01:45:41 +02:00
|
|
|
.obmat_loc = -1,
|
|
|
|
|
.obinv_loc = -1,
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
.obattrs_ubo = NULL,
|
2019-05-31 01:45:41 +02:00
|
|
|
.drw_state_enabled = 0,
|
|
|
|
|
.drw_state_disabled = 0,
|
|
|
|
|
};
|
|
|
|
|
|
2018-03-16 08:43:52 +01:00
|
|
|
const bool shader_changed = (DST.shader != shgroup->shader);
|
2018-05-16 12:47:15 +02:00
|
|
|
bool use_tfeedback = false;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-03-16 08:43:52 +01:00
|
|
|
if (shader_changed) {
|
2019-03-28 01:11:28 +11:00
|
|
|
if (DST.shader) {
|
|
|
|
|
GPU_shader_unbind();
|
2020-06-03 16:23:13 +02:00
|
|
|
|
|
|
|
|
/* Unbinding can be costly. Skip in normal condition. */
|
|
|
|
|
if (G.debug & G_DEBUG_GPU) {
|
|
|
|
|
GPU_texture_unbind_all();
|
2022-08-01 17:58:14 +02:00
|
|
|
GPU_texture_image_unbind_all();
|
2020-08-20 23:09:37 +02:00
|
|
|
GPU_uniformbuf_unbind_all();
|
2022-02-08 23:19:31 +01:00
|
|
|
GPU_storagebuf_unbind_all();
|
2020-06-03 16:23:13 +02:00
|
|
|
}
|
2019-03-28 01:11:28 +11:00
|
|
|
}
|
2018-02-28 01:16:23 +01:00
|
|
|
GPU_shader_bind(shgroup->shader);
|
|
|
|
|
DST.shader = shgroup->shader;
|
2019-05-22 13:27:43 +02:00
|
|
|
DST.batch = NULL;
|
2018-03-09 23:50:30 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
draw_update_uniforms(shgroup, &state, &use_tfeedback);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
drw_state_set(pass_state);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
/* Rendering Calls */
|
2019-05-13 17:56:20 +02:00
|
|
|
{
|
2019-05-31 01:45:41 +02:00
|
|
|
DRWCommandIterator iter;
|
|
|
|
|
DRWCommand *cmd;
|
|
|
|
|
eDRWCommandType cmd_type;
|
|
|
|
|
|
|
|
|
|
draw_command_iter_begin(&iter, shgroup);
|
|
|
|
|
|
|
|
|
|
draw_call_batching_start(&state);
|
|
|
|
|
|
|
|
|
|
while ((cmd = draw_command_iter_step(&iter, &cmd_type))) {
|
|
|
|
|
|
|
|
|
|
switch (cmd_type) {
|
2022-07-30 13:30:59 +02:00
|
|
|
case DRW_CMD_DRAW_PROCEDURAL:
|
2019-05-31 01:45:41 +02:00
|
|
|
case DRW_CMD_DRWSTATE:
|
|
|
|
|
case DRW_CMD_STENCIL:
|
|
|
|
|
draw_call_batching_flush(shgroup, &state);
|
|
|
|
|
break;
|
|
|
|
|
case DRW_CMD_DRAW:
|
2022-07-30 13:30:59 +02:00
|
|
|
case DRW_CMD_DRAW_INDIRECT:
|
2019-05-31 01:45:41 +02:00
|
|
|
case DRW_CMD_DRAW_INSTANCE:
|
|
|
|
|
if (draw_call_is_culled(&cmd->instance.handle, DST.view_active)) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
break;
|
2019-09-13 23:02:45 +02:00
|
|
|
}
|
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
switch (cmd_type) {
|
|
|
|
|
case DRW_CMD_CLEAR:
|
2020-08-29 01:13:54 +02:00
|
|
|
GPU_framebuffer_clear(GPU_framebuffer_active_get(),
|
|
|
|
|
cmd->clear.clear_channels,
|
|
|
|
|
(float[4]){cmd->clear.r / 255.0f,
|
|
|
|
|
cmd->clear.g / 255.0f,
|
|
|
|
|
cmd->clear.b / 255.0f,
|
|
|
|
|
cmd->clear.a / 255.0f},
|
|
|
|
|
cmd->clear.depth,
|
|
|
|
|
cmd->clear.stencil);
|
2019-05-31 01:45:41 +02:00
|
|
|
break;
|
|
|
|
|
case DRW_CMD_DRWSTATE:
|
|
|
|
|
state.drw_state_enabled |= cmd->state.enable;
|
|
|
|
|
state.drw_state_disabled |= cmd->state.disable;
|
|
|
|
|
drw_state_set((pass_state & ~state.drw_state_disabled) | state.drw_state_enabled);
|
|
|
|
|
break;
|
|
|
|
|
case DRW_CMD_STENCIL:
|
2020-03-09 16:27:24 +01:00
|
|
|
drw_stencil_state_set(cmd->stencil.write_mask, cmd->stencil.ref, cmd->stencil.comp_mask);
|
2019-05-31 01:45:41 +02:00
|
|
|
break;
|
|
|
|
|
case DRW_CMD_SELECTID:
|
|
|
|
|
state.select_id = cmd->select_id.select_id;
|
|
|
|
|
state.select_buf = cmd->select_id.select_buf;
|
|
|
|
|
break;
|
|
|
|
|
case DRW_CMD_DRAW:
|
|
|
|
|
if (!USE_BATCHING || state.obmats_loc == -1 || (G.f & G_FLAG_PICKSEL) ||
|
2020-03-10 02:03:26 +01:00
|
|
|
cmd->draw.batch->inst[0]) {
|
2020-03-09 16:27:24 +01:00
|
|
|
draw_call_single_do(
|
|
|
|
|
shgroup, &state, cmd->draw.batch, cmd->draw.handle, 0, 0, 0, 0, true);
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
draw_call_batching_do(shgroup, &state, &cmd->draw);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case DRW_CMD_DRAW_PROCEDURAL:
|
|
|
|
|
draw_call_single_do(shgroup,
|
|
|
|
|
&state,
|
|
|
|
|
cmd->procedural.batch,
|
|
|
|
|
cmd->procedural.handle,
|
|
|
|
|
0,
|
|
|
|
|
cmd->procedural.vert_count,
|
2020-03-09 16:27:24 +01:00
|
|
|
0,
|
2019-12-02 01:40:58 +01:00
|
|
|
1,
|
|
|
|
|
true);
|
2019-05-31 01:45:41 +02:00
|
|
|
break;
|
2022-07-30 13:30:59 +02:00
|
|
|
case DRW_CMD_DRAW_INDIRECT:
|
|
|
|
|
draw_call_indirect(shgroup,
|
|
|
|
|
&state,
|
|
|
|
|
cmd->draw_indirect.batch,
|
|
|
|
|
cmd->draw_indirect.handle,
|
|
|
|
|
cmd->draw_indirect.indirect_buf);
|
|
|
|
|
break;
|
2019-05-31 01:45:41 +02:00
|
|
|
case DRW_CMD_DRAW_INSTANCE:
|
|
|
|
|
draw_call_single_do(shgroup,
|
|
|
|
|
&state,
|
|
|
|
|
cmd->instance.batch,
|
|
|
|
|
cmd->instance.handle,
|
|
|
|
|
0,
|
|
|
|
|
0,
|
2020-03-09 16:27:24 +01:00
|
|
|
0,
|
2019-12-02 01:40:58 +01:00
|
|
|
cmd->instance.inst_count,
|
2020-04-03 16:59:34 +11:00
|
|
|
cmd->instance.use_attrs == 0);
|
2019-05-31 01:45:41 +02:00
|
|
|
break;
|
|
|
|
|
case DRW_CMD_DRAW_RANGE:
|
|
|
|
|
draw_call_single_do(shgroup,
|
|
|
|
|
&state,
|
|
|
|
|
cmd->range.batch,
|
2020-03-09 16:27:24 +01:00
|
|
|
cmd->range.handle,
|
2019-05-31 01:45:41 +02:00
|
|
|
cmd->range.vert_first,
|
|
|
|
|
cmd->range.vert_count,
|
2020-03-09 16:27:24 +01:00
|
|
|
0,
|
2019-12-02 01:40:58 +01:00
|
|
|
1,
|
|
|
|
|
true);
|
2019-05-31 01:45:41 +02:00
|
|
|
break;
|
2020-03-09 16:27:24 +01:00
|
|
|
case DRW_CMD_DRAW_INSTANCE_RANGE:
|
|
|
|
|
draw_call_single_do(shgroup,
|
|
|
|
|
&state,
|
|
|
|
|
cmd->instance_range.batch,
|
|
|
|
|
cmd->instance_range.handle,
|
|
|
|
|
0,
|
|
|
|
|
0,
|
|
|
|
|
cmd->instance_range.inst_first,
|
|
|
|
|
cmd->instance_range.inst_count,
|
|
|
|
|
false);
|
|
|
|
|
break;
|
2021-05-28 08:16:26 +02:00
|
|
|
case DRW_CMD_COMPUTE:
|
|
|
|
|
GPU_compute_dispatch(shgroup->shader,
|
|
|
|
|
cmd->compute.groups_x_len,
|
|
|
|
|
cmd->compute.groups_y_len,
|
|
|
|
|
cmd->compute.groups_z_len);
|
|
|
|
|
break;
|
2022-02-04 18:32:40 +01:00
|
|
|
case DRW_CMD_COMPUTE_REF:
|
|
|
|
|
GPU_compute_dispatch(shgroup->shader,
|
|
|
|
|
cmd->compute_ref.groups_ref[0],
|
|
|
|
|
cmd->compute_ref.groups_ref[1],
|
|
|
|
|
cmd->compute_ref.groups_ref[2]);
|
|
|
|
|
break;
|
2022-03-16 09:06:06 +01:00
|
|
|
case DRW_CMD_COMPUTE_INDIRECT:
|
|
|
|
|
GPU_compute_dispatch_indirect(shgroup->shader, cmd->compute_indirect.indirect_buf);
|
|
|
|
|
break;
|
2022-02-04 18:32:40 +01:00
|
|
|
case DRW_CMD_BARRIER:
|
|
|
|
|
GPU_memory_barrier(cmd->barrier.type);
|
|
|
|
|
break;
|
2019-09-13 23:02:45 +02:00
|
|
|
}
|
|
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
|
|
|
|
|
draw_call_batching_finish(shgroup, &state);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-05-16 12:47:15 +02:00
|
|
|
if (use_tfeedback) {
|
|
|
|
|
GPU_shader_transform_feedback_disable(shgroup->shader);
|
|
|
|
|
}
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
|
2022-10-07 12:21:30 +02:00
|
|
|
static void drw_update_view(void)
|
2018-02-28 01:16:23 +01:00
|
|
|
{
|
2020-09-19 14:32:41 +10:00
|
|
|
/* TODO(fclem): update a big UBO and only bind ranges here. */
|
2020-08-20 23:09:37 +02:00
|
|
|
GPU_uniformbuf_update(G_draw.view_ubo, &DST.view_active->storage);
|
2022-10-07 00:06:27 +02:00
|
|
|
GPU_uniformbuf_update(G_draw.clipping_ubo, &DST.view_active->clip_planes);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2021-07-03 23:08:40 +10:00
|
|
|
/* TODO: get rid of this. */
|
2019-05-20 18:01:42 +02:00
|
|
|
DST.view_storage_cpy = DST.view_active->storage;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-05-20 18:01:42 +02:00
|
|
|
draw_compute_culling(DST.view_active);
|
2018-03-09 19:52:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void drw_draw_pass_ex(DRWPass *pass,
|
|
|
|
|
DRWShadingGroup *start_group,
|
|
|
|
|
DRWShadingGroup *end_group)
|
|
|
|
|
{
|
2020-06-02 16:58:07 +02:00
|
|
|
if (pass->original) {
|
|
|
|
|
start_group = pass->original->shgroups.first;
|
|
|
|
|
end_group = pass->original->shgroups.last;
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-25 07:10:13 +11:00
|
|
|
if (start_group == NULL) {
|
2018-04-17 18:26:05 +02:00
|
|
|
return;
|
2019-01-25 07:10:13 +11:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-03-09 19:52:37 +01:00
|
|
|
DST.shader = NULL;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
BLI_assert(DST.buffer_finish_called &&
|
|
|
|
|
"DRW_render_instance_buffer_finish had not been called before drawing");
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2022-10-07 12:21:30 +02:00
|
|
|
if (DST.view_previous != DST.view_active || DST.view_active->is_dirty) {
|
|
|
|
|
drw_update_view();
|
2019-05-21 22:11:53 +02:00
|
|
|
DST.view_active->is_dirty = false;
|
|
|
|
|
DST.view_previous = DST.view_active;
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-03-06 02:40:36 +01:00
|
|
|
/* GPU_framebuffer_clear calls can change the state outside the DRW module.
|
|
|
|
|
* Force reset the affected states to avoid problems later. */
|
|
|
|
|
drw_state_set(DST.state | DRW_STATE_WRITE_DEPTH | DRW_STATE_WRITE_COLOR);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
drw_state_set(pass->state);
|
2019-06-17 15:18:21 +02:00
|
|
|
drw_state_validate();
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-02-19 19:36:12 +01:00
|
|
|
if (DST.view_active->is_inverted) {
|
2020-08-19 17:41:23 +02:00
|
|
|
GPU_front_facing(true);
|
2020-02-19 19:36:12 +01:00
|
|
|
}
|
|
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
DRW_stats_query_start(pass->name);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
for (DRWShadingGroup *shgroup = start_group; shgroup; shgroup = shgroup->next) {
|
|
|
|
|
draw_shgroup(shgroup, pass->state);
|
|
|
|
|
/* break if upper limit */
|
|
|
|
|
if (shgroup == end_group) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
if (DST.shader) {
|
|
|
|
|
GPU_shader_unbind();
|
|
|
|
|
DST.shader = NULL;
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
if (DST.batch) {
|
|
|
|
|
DST.batch = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-25 15:37:13 +02:00
|
|
|
/* Fix T67342 for some reason. AMD Pro driver bug. */
|
|
|
|
|
if ((DST.state & DRW_STATE_BLEND_CUSTOM) != 0 &&
|
|
|
|
|
GPU_type_matches(GPU_DEVICE_ATI, GPU_OS_ANY, GPU_DRIVER_OFFICIAL)) {
|
|
|
|
|
drw_state_set(DST.state & ~DRW_STATE_BLEND_CUSTOM);
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-05 16:42:18 +01:00
|
|
|
/* HACK: Rasterized discard can affect clear commands which are not
|
|
|
|
|
* part of a DRWPass (as of now). So disable rasterized discard here
|
|
|
|
|
* if it has been enabled. */
|
|
|
|
|
if ((DST.state & DRW_STATE_RASTERIZER_ENABLED) == 0) {
|
|
|
|
|
drw_state_set((DST.state & ~DRW_STATE_RASTERIZER_ENABLED) | DRW_STATE_DEFAULT);
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-02-19 19:36:12 +01:00
|
|
|
/* Reset default. */
|
|
|
|
|
if (DST.view_active->is_inverted) {
|
2020-08-19 17:41:23 +02:00
|
|
|
GPU_front_facing(false);
|
2020-02-19 19:36:12 +01:00
|
|
|
}
|
|
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
DRW_stats_query_end();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void DRW_draw_pass(DRWPass *pass)
|
|
|
|
|
{
|
2020-06-02 16:58:07 +02:00
|
|
|
for (; pass; pass = pass->next) {
|
|
|
|
|
drw_draw_pass_ex(pass, pass->shgroups.first, pass->shgroups.last);
|
|
|
|
|
}
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void DRW_draw_pass_subset(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
|
|
|
|
|
{
|
|
|
|
|
drw_draw_pass_ex(pass, start_group, end_group);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/** \} */
|