DRW: Remove draw_manager_exec.cc
This moves the state reset to be done by the `StateSet` command.
This commit is contained in:
@@ -85,7 +85,6 @@ set(SRC
|
||||
intern/draw_instance_data.cc
|
||||
intern/draw_manager.cc
|
||||
intern/draw_manager_c.cc
|
||||
intern/draw_manager_exec.cc
|
||||
intern/draw_manager_profiling.cc
|
||||
intern/draw_manager_shader.cc
|
||||
intern/draw_manager_text.cc
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
#include "RE_engine.h"
|
||||
#include "RE_pipeline.h"
|
||||
|
||||
#include "draw_command.hh"
|
||||
#include "draw_view.hh"
|
||||
#include "external_engine.h" /* own include */
|
||||
|
||||
@@ -59,7 +60,7 @@ static void external_draw_scene_do_v3d(void *vedata)
|
||||
RegionView3D *rv3d = draw_ctx->rv3d;
|
||||
ARegion *region = draw_ctx->region;
|
||||
|
||||
DRW_state_reset_ex(DRW_STATE_WRITE_COLOR);
|
||||
blender::draw::command::StateSet::set(DRW_STATE_WRITE_COLOR);
|
||||
|
||||
/* The external engine can use the OpenGL rendering API directly, so make sure the state is
|
||||
* already applied. */
|
||||
@@ -159,7 +160,7 @@ static void external_draw_scene_do_image(void * /*vedata*/)
|
||||
BLI_assert(re != nullptr);
|
||||
BLI_assert(engine != nullptr);
|
||||
|
||||
DRW_state_reset_ex(DRW_STATE_WRITE_COLOR);
|
||||
blender::draw::command::StateSet::set(DRW_STATE_WRITE_COLOR);
|
||||
|
||||
/* The external engine can use the OpenGL rendering API directly, so make sure the state is
|
||||
* already applied. */
|
||||
@@ -192,7 +193,7 @@ static void external_draw_scene_do_image(void * /*vedata*/)
|
||||
GPU_matrix_pop();
|
||||
GPU_matrix_pop_projection();
|
||||
|
||||
DRW_state_reset();
|
||||
blender::draw::command::StateSet::set();
|
||||
GPU_bgl_end();
|
||||
|
||||
RE_engine_draw_release(re);
|
||||
|
||||
@@ -400,23 +400,6 @@ void DRW_draw_pass_subset(DRWPass *pass, DRWShadingGroup *start_group, DRWShadin
|
||||
void DRW_draw_callbacks_pre_scene();
|
||||
void DRW_draw_callbacks_post_scene();
|
||||
|
||||
/**
|
||||
* Reset state to not interfere with other UI draw-call.
|
||||
*/
|
||||
void DRW_state_reset_ex(DRWState state);
|
||||
void DRW_state_reset();
|
||||
/**
|
||||
* Use with care, intended so selection code can override passes depth settings,
|
||||
* which is important for selection to work properly.
|
||||
*
|
||||
* Should be set in main draw loop, cleared afterwards
|
||||
*/
|
||||
void DRW_state_lock(DRWState state);
|
||||
|
||||
/* Selection. */
|
||||
|
||||
void DRW_select_load_id(uint id);
|
||||
|
||||
/* Draw State. */
|
||||
|
||||
/**
|
||||
|
||||
@@ -177,22 +177,5 @@ void DRW_viewport_colormanagement_set(GPUViewport *viewport)
|
||||
blender::draw::color_management::viewport_color_management_set(*viewport);
|
||||
}
|
||||
|
||||
void DRW_transform_none(GPUTexture *tex)
|
||||
{
|
||||
drw_state_set(DRW_STATE_WRITE_COLOR);
|
||||
|
||||
GPU_matrix_identity_set();
|
||||
GPU_matrix_identity_projection_set();
|
||||
|
||||
/* Draw as texture for final render (without immediate mode). */
|
||||
blender::gpu::Batch *geom = DRW_cache_fullscreen_quad_get();
|
||||
GPU_batch_program_set_builtin(geom, GPU_SHADER_3D_IMAGE_COLOR);
|
||||
GPU_batch_uniform_4f(geom, "color", 1.0f, 1.0f, 1.0f, 1.0f);
|
||||
GPU_batch_texture_bind(geom, "image", tex);
|
||||
|
||||
GPU_batch_draw(geom);
|
||||
|
||||
GPU_texture_unbind(tex);
|
||||
}
|
||||
|
||||
/** \} */
|
||||
|
||||
@@ -14,5 +14,4 @@ struct GPUViewport;
|
||||
/**
|
||||
* Draw texture to frame-buffer without any color transforms.
|
||||
*/
|
||||
void DRW_transform_none(GPUTexture *tex);
|
||||
void DRW_viewport_colormanagement_set(GPUViewport *viewport);
|
||||
|
||||
@@ -344,6 +344,13 @@ void StateSet::execute(RecordingState &recording_state) const
|
||||
}
|
||||
}
|
||||
|
||||
/* Set state of the GPU module manually. */
|
||||
void StateSet::set(DRWState state)
|
||||
{
|
||||
RecordingState recording_state;
|
||||
StateSet{state, 0}.execute(recording_state);
|
||||
}
|
||||
|
||||
void StencilSet::execute() const
|
||||
{
|
||||
GPU_stencil_write_mask_set(write_mask);
|
||||
|
||||
@@ -463,6 +463,9 @@ struct StateSet {
|
||||
|
||||
void execute(RecordingState &state) const;
|
||||
std::string serialize() const;
|
||||
|
||||
/* Set state of the GPU module manually. */
|
||||
static void set(DRWState state = DRW_STATE_DEFAULT);
|
||||
};
|
||||
|
||||
struct StencilSet {
|
||||
|
||||
@@ -249,7 +249,7 @@ void DebugDraw::display_lines()
|
||||
|
||||
float4x4 persmat = View::default_get().persmat();
|
||||
|
||||
drw_state_set(DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS);
|
||||
command::StateSet::set(DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS);
|
||||
|
||||
gpu::Batch *batch = drw_cache_procedural_lines_get();
|
||||
GPUShader *shader = DRW_shader_debug_draw_display_get();
|
||||
|
||||
@@ -437,7 +437,6 @@ static void drw_viewport_data_reset(DRWData *drw_data)
|
||||
|
||||
BLI_memblock_clear(drw_data->commands, nullptr);
|
||||
BLI_memblock_clear(drw_data->commands_small, nullptr);
|
||||
BLI_memblock_clear(drw_data->callbuffers, nullptr);
|
||||
BLI_memblock_clear(drw_data->obmats, nullptr);
|
||||
BLI_memblock_clear(drw_data->obinfos, nullptr);
|
||||
BLI_memblock_clear(drw_data->cullstates, nullptr);
|
||||
@@ -458,7 +457,6 @@ void DRW_viewport_data_free(DRWData *drw_data)
|
||||
|
||||
BLI_memblock_destroy(drw_data->commands, nullptr);
|
||||
BLI_memblock_destroy(drw_data->commands_small, nullptr);
|
||||
BLI_memblock_destroy(drw_data->callbuffers, nullptr);
|
||||
BLI_memblock_destroy(drw_data->obmats, nullptr);
|
||||
BLI_memblock_destroy(drw_data->obinfos, nullptr);
|
||||
BLI_memblock_destroy(drw_data->cullstates, nullptr);
|
||||
@@ -1120,7 +1118,7 @@ static void drw_engines_draw_scene()
|
||||
PROFILE_END_UPDATE(data->render_time, stime);
|
||||
}
|
||||
/* Reset state after drawing */
|
||||
DRW_state_reset();
|
||||
blender::draw::command::StateSet::set();
|
||||
}
|
||||
|
||||
static void drw_engines_draw_text()
|
||||
@@ -1436,7 +1434,7 @@ void DRW_draw_callbacks_pre_scene()
|
||||
ED_region_draw_cb_draw(DST.draw_ctx.evil_C, DST.draw_ctx.region, REGION_DRAW_PRE_VIEW);
|
||||
/* Callback can be nasty and do whatever they want with the state.
|
||||
* Don't trust them! */
|
||||
DRW_state_reset();
|
||||
blender::draw::command::StateSet::set();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1452,7 +1450,7 @@ void DRW_draw_callbacks_post_scene()
|
||||
if (DST.draw_ctx.evil_C) {
|
||||
DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
|
||||
|
||||
DRW_state_reset();
|
||||
blender::draw::command::StateSet::set();
|
||||
|
||||
GPU_framebuffer_bind(dfbl->overlay_fb);
|
||||
|
||||
@@ -1499,7 +1497,7 @@ void DRW_draw_callbacks_post_scene()
|
||||
|
||||
/* Callback can be nasty and do whatever they want with the state.
|
||||
* Don't trust them! */
|
||||
DRW_state_reset();
|
||||
blender::draw::command::StateSet::set();
|
||||
|
||||
/* Needed so gizmo isn't occluded. */
|
||||
if ((v3d->gizmo_flag & V3D_GIZMO_HIDE) == 0) {
|
||||
@@ -1548,7 +1546,7 @@ void DRW_draw_callbacks_post_scene()
|
||||
if ((v3d->flag & V3D_XR_SESSION_SURFACE) != 0) {
|
||||
DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
|
||||
|
||||
DRW_state_reset();
|
||||
blender::draw::command::StateSet::set();
|
||||
|
||||
GPU_framebuffer_bind(dfbl->overlay_fb);
|
||||
|
||||
@@ -1578,7 +1576,7 @@ void DRW_draw_callbacks_post_scene()
|
||||
}
|
||||
}
|
||||
|
||||
DRW_state_reset();
|
||||
blender::draw::command::StateSet::set();
|
||||
}
|
||||
|
||||
GPU_depth_test(GPU_DEPTH_LESS_EQUAL);
|
||||
@@ -1737,7 +1735,7 @@ void DRW_draw_render_loop_ex(Depsgraph *depsgraph,
|
||||
GPU_framebuffer_bind(DST.default_framebuffer);
|
||||
|
||||
/* Start Drawing */
|
||||
DRW_state_reset();
|
||||
blender::draw::command::StateSet::set();
|
||||
|
||||
GPU_framebuffer_bind(DST.default_framebuffer);
|
||||
GPU_framebuffer_clear_depth_stencil(DST.default_framebuffer, 1.0f, 0xFF);
|
||||
@@ -1768,7 +1766,7 @@ void DRW_draw_render_loop_ex(Depsgraph *depsgraph,
|
||||
GPU_framebuffer_restore();
|
||||
}
|
||||
|
||||
DRW_state_reset();
|
||||
blender::draw::command::StateSet::set();
|
||||
drw_engines_disable();
|
||||
|
||||
drw_manager_exit(&DST);
|
||||
@@ -1940,7 +1938,7 @@ void DRW_render_gpencil(RenderEngine *engine, Depsgraph *depsgraph)
|
||||
DRW_render_gpencil_to_image(engine, render_layer, &render_rect);
|
||||
}
|
||||
|
||||
DRW_state_reset();
|
||||
blender::draw::command::StateSet::set();
|
||||
|
||||
GPU_depth_test(GPU_DEPTH_NONE);
|
||||
|
||||
@@ -2000,7 +1998,7 @@ void DRW_render_to_image(RenderEngine *engine, Depsgraph *depsgraph)
|
||||
}
|
||||
|
||||
/* Reset state before drawing */
|
||||
DRW_state_reset();
|
||||
blender::draw::command::StateSet::set();
|
||||
|
||||
/* set default viewport */
|
||||
GPU_viewport(0, 0, size[0], size[1]);
|
||||
@@ -2038,7 +2036,7 @@ void DRW_render_to_image(RenderEngine *engine, Depsgraph *depsgraph)
|
||||
DRW_cache_free_old_subdiv();
|
||||
|
||||
/* Reset state after drawing */
|
||||
DRW_state_reset();
|
||||
blender::draw::command::StateSet::set();
|
||||
|
||||
/* End GPU workload Boundary */
|
||||
GPU_render_end();
|
||||
@@ -2249,7 +2247,7 @@ void DRW_draw_render_loop_2d_ex(Depsgraph *depsgraph,
|
||||
GPU_framebuffer_bind(DST.default_framebuffer);
|
||||
|
||||
/* Start Drawing */
|
||||
DRW_state_reset();
|
||||
blender::draw::command::StateSet::set();
|
||||
|
||||
if (DST.draw_ctx.evil_C) {
|
||||
ED_region_draw_cb_draw(DST.draw_ctx.evil_C, DST.draw_ctx.region, REGION_DRAW_PRE_VIEW);
|
||||
@@ -2264,7 +2262,7 @@ void DRW_draw_render_loop_2d_ex(Depsgraph *depsgraph,
|
||||
|
||||
if (DST.draw_ctx.evil_C) {
|
||||
DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
|
||||
DRW_state_reset();
|
||||
blender::draw::command::StateSet::set();
|
||||
|
||||
GPU_framebuffer_bind(dfbl->overlay_fb);
|
||||
|
||||
@@ -2280,7 +2278,7 @@ void DRW_draw_render_loop_2d_ex(Depsgraph *depsgraph,
|
||||
GPU_matrix_pop_projection();
|
||||
/* Callback can be nasty and do whatever they want with the state.
|
||||
* Don't trust them! */
|
||||
DRW_state_reset();
|
||||
blender::draw::command::StateSet::set();
|
||||
|
||||
GPU_depth_test(GPU_DEPTH_NONE);
|
||||
drw_engines_draw_text();
|
||||
@@ -2319,7 +2317,7 @@ void DRW_draw_render_loop_2d_ex(Depsgraph *depsgraph,
|
||||
GPU_framebuffer_restore();
|
||||
}
|
||||
|
||||
DRW_state_reset();
|
||||
blender::draw::command::StateSet::set();
|
||||
drw_engines_disable();
|
||||
|
||||
drw_manager_exit(&DST);
|
||||
@@ -2538,7 +2536,6 @@ void DRW_draw_select_loop(Depsgraph *depsgraph,
|
||||
}
|
||||
}
|
||||
|
||||
DRW_select_load_id(ob->runtime->select_id);
|
||||
DST.dupli_parent = data_.dupli_parent;
|
||||
DST.dupli_source = data_.dupli_object_current;
|
||||
drw_duplidata_load(ob);
|
||||
@@ -2564,7 +2561,7 @@ void DRW_draw_select_loop(Depsgraph *depsgraph,
|
||||
DRW_viewport_texture_list_get()->depth = g_select_buffer.texture_depth;
|
||||
|
||||
/* Start Drawing */
|
||||
DRW_state_reset();
|
||||
blender::draw::command::StateSet::set();
|
||||
DRW_draw_callbacks_pre_scene();
|
||||
|
||||
DRW_curves_update(*DRW_manager_get());
|
||||
@@ -2587,7 +2584,7 @@ void DRW_draw_select_loop(Depsgraph *depsgraph,
|
||||
/* WORKAROUND: Do not leave ownership to the viewport list. */
|
||||
DRW_viewport_texture_list_get()->depth = nullptr;
|
||||
|
||||
DRW_state_reset();
|
||||
blender::draw::command::StateSet::set();
|
||||
drw_engines_disable();
|
||||
|
||||
drw_manager_exit(&DST);
|
||||
@@ -2698,7 +2695,7 @@ void DRW_draw_depth_loop(Depsgraph *depsgraph,
|
||||
}
|
||||
|
||||
/* Start Drawing */
|
||||
DRW_state_reset();
|
||||
blender::draw::command::StateSet::set();
|
||||
|
||||
DRW_curves_update(*DRW_manager_get());
|
||||
|
||||
@@ -2706,7 +2703,7 @@ void DRW_draw_depth_loop(Depsgraph *depsgraph,
|
||||
|
||||
DRW_smoke_exit(DST.vmempool);
|
||||
|
||||
DRW_state_reset();
|
||||
blender::draw::command::StateSet::set();
|
||||
|
||||
/* TODO: Reading depth for operators should be done here. */
|
||||
|
||||
@@ -2800,9 +2797,9 @@ void DRW_draw_select_id(Depsgraph *depsgraph, ARegion *region, View3D *v3d)
|
||||
}
|
||||
|
||||
/* Start Drawing */
|
||||
DRW_state_reset();
|
||||
blender::draw::command::StateSet::set();
|
||||
drw_engines_draw_scene();
|
||||
DRW_state_reset();
|
||||
blender::draw::command::StateSet::set();
|
||||
|
||||
drw_engines_disable();
|
||||
|
||||
|
||||
@@ -195,129 +195,6 @@ struct DRWObjectInfos {
|
||||
BLI_STATIC_ASSERT_ALIGN(DRWObjectMatrix, 16)
|
||||
BLI_STATIC_ASSERT_ALIGN(DRWObjectInfos, 16)
|
||||
|
||||
typedef enum {
|
||||
/* Draw Commands */
|
||||
DRW_CMD_DRAW = 0, /* Only sortable type. Must be 0. */
|
||||
DRW_CMD_DRAW_RANGE = 1,
|
||||
DRW_CMD_DRAW_INSTANCE = 2,
|
||||
DRW_CMD_DRAW_INSTANCE_RANGE = 3,
|
||||
DRW_CMD_DRAW_PROCEDURAL = 4,
|
||||
DRW_CMD_DRAW_INDIRECT = 5,
|
||||
|
||||
/* Compute Commands. */
|
||||
DRW_CMD_COMPUTE = 8,
|
||||
DRW_CMD_COMPUTE_REF = 9,
|
||||
DRW_CMD_COMPUTE_INDIRECT = 10,
|
||||
|
||||
/* Other Commands */
|
||||
DRW_CMD_BARRIER = 11,
|
||||
DRW_CMD_CLEAR = 12,
|
||||
DRW_CMD_DRWSTATE = 13,
|
||||
DRW_CMD_STENCIL = 14,
|
||||
DRW_CMD_SELECTID = 15,
|
||||
/* Needs to fit in 4bits */
|
||||
} eDRWCommandType;
|
||||
|
||||
#define DRW_MAX_DRAW_CMD_TYPE DRW_CMD_DRAW_INDIRECT
|
||||
|
||||
struct DRWCommandDraw {
|
||||
blender::gpu::Batch *batch;
|
||||
DRWResourceHandle handle;
|
||||
};
|
||||
|
||||
/* Assume DRWResourceHandle to be 0. */
|
||||
struct DRWCommandDrawRange {
|
||||
blender::gpu::Batch *batch;
|
||||
DRWResourceHandle handle;
|
||||
uint vert_first;
|
||||
uint vert_count;
|
||||
};
|
||||
|
||||
struct DRWCommandDrawInstance {
|
||||
blender::gpu::Batch *batch;
|
||||
DRWResourceHandle handle;
|
||||
uint inst_count;
|
||||
uint use_attrs; /* bool */
|
||||
};
|
||||
|
||||
struct DRWCommandDrawInstanceRange {
|
||||
blender::gpu::Batch *batch;
|
||||
DRWResourceHandle handle;
|
||||
uint inst_first;
|
||||
uint inst_count;
|
||||
};
|
||||
|
||||
struct DRWCommandDrawIndirect {
|
||||
blender::gpu::Batch *batch;
|
||||
DRWResourceHandle handle;
|
||||
GPUStorageBuf *indirect_buf;
|
||||
};
|
||||
|
||||
struct DRWCommandCompute {
|
||||
int groups_x_len;
|
||||
int groups_y_len;
|
||||
int groups_z_len;
|
||||
};
|
||||
|
||||
struct DRWCommandComputeRef {
|
||||
int *groups_ref;
|
||||
};
|
||||
|
||||
struct DRWCommandComputeIndirect {
|
||||
GPUStorageBuf *indirect_buf;
|
||||
};
|
||||
|
||||
struct DRWCommandBarrier {
|
||||
eGPUBarrier type;
|
||||
};
|
||||
|
||||
struct DRWCommandDrawProcedural {
|
||||
blender::gpu::Batch *batch;
|
||||
DRWResourceHandle handle;
|
||||
uint vert_count;
|
||||
};
|
||||
|
||||
struct DRWCommandSetMutableState {
|
||||
/** State changes (or'd or and'd with the pass's state) */
|
||||
DRWState enable;
|
||||
DRWState disable;
|
||||
};
|
||||
|
||||
struct DRWCommandSetStencil {
|
||||
uint write_mask;
|
||||
uint comp_mask;
|
||||
uint ref;
|
||||
};
|
||||
|
||||
struct DRWCommandSetSelectID {
|
||||
blender::gpu::VertBuf *select_buf;
|
||||
uint select_id;
|
||||
};
|
||||
|
||||
struct DRWCommandClear {
|
||||
eGPUFrameBufferBits clear_channels;
|
||||
uchar r, g, b, a; /* [0..1] for each channels. Normalized. */
|
||||
float depth; /* [0..1] for depth. Normalized. */
|
||||
uchar stencil; /* Stencil value [0..255] */
|
||||
};
|
||||
|
||||
union DRWCommand {
|
||||
DRWCommandDraw draw;
|
||||
DRWCommandDrawRange range;
|
||||
DRWCommandDrawInstance instance;
|
||||
DRWCommandDrawInstanceRange instance_range;
|
||||
DRWCommandDrawProcedural procedural;
|
||||
DRWCommandDrawIndirect draw_indirect;
|
||||
DRWCommandCompute compute;
|
||||
DRWCommandComputeRef compute_ref;
|
||||
DRWCommandComputeIndirect compute_indirect;
|
||||
DRWCommandBarrier barrier;
|
||||
DRWCommandSetMutableState state;
|
||||
DRWCommandSetStencil stencil;
|
||||
DRWCommandSetSelectID select_id;
|
||||
DRWCommandClear clear;
|
||||
};
|
||||
|
||||
/** Used by #DRWUniform.type */
|
||||
/* TODO(@jbakker): rename to DRW_RESOURCE/DRWResourceType. */
|
||||
typedef enum {
|
||||
@@ -470,8 +347,6 @@ struct DRWCommandChunk {
|
||||
/* 4bits for each command. */
|
||||
uint64_t command_type[6];
|
||||
/* -- 64 bytes aligned -- */
|
||||
DRWCommand commands[96];
|
||||
/* -- 64 bytes aligned -- */
|
||||
};
|
||||
|
||||
struct DRWCommandSmallChunk {
|
||||
@@ -481,7 +356,6 @@ struct DRWCommandSmallChunk {
|
||||
/* 4bits for each command. */
|
||||
/* TODO: reduce size of command_type. */
|
||||
uint64_t command_type[6];
|
||||
DRWCommand commands[6];
|
||||
};
|
||||
|
||||
/* Only true for 64-bit platforms. */
|
||||
@@ -502,7 +376,6 @@ struct DRWData {
|
||||
/** Memory-pools for draw-calls. */
|
||||
BLI_memblock *commands;
|
||||
BLI_memblock *commands_small;
|
||||
BLI_memblock *callbuffers;
|
||||
BLI_memblock *obmats;
|
||||
BLI_memblock *obinfos;
|
||||
BLI_memblock *cullstates;
|
||||
@@ -653,15 +526,11 @@ extern DRWManager DST; /* TODO: get rid of this and allow multi-threaded renderi
|
||||
|
||||
void drw_texture_set_parameters(GPUTexture *tex, DRWTextureFlag flags);
|
||||
|
||||
void drw_state_set(DRWState state);
|
||||
|
||||
void drw_debug_draw();
|
||||
void drw_debug_init();
|
||||
void drw_debug_module_free(DRWDebugModule *module);
|
||||
GPUStorageBuf *drw_debug_gpu_draw_buf_get();
|
||||
|
||||
eDRWCommandType command_type_get(const uint64_t *command_type_bits, int index);
|
||||
|
||||
void drw_batch_cache_validate(Object *ob);
|
||||
void drw_batch_cache_generate_requested(Object *ob);
|
||||
|
||||
|
||||
@@ -1,1049 +0,0 @@
|
||||
/* SPDX-FileCopyrightText: 2016 Blender Authors
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
|
||||
/** \file
|
||||
* \ingroup draw
|
||||
*/
|
||||
|
||||
#include "draw_manager_c.hh"
|
||||
|
||||
#include "BLI_alloca.h"
|
||||
#include "BLI_math_bits.h"
|
||||
#include "BLI_memblock.h"
|
||||
|
||||
#include "BKE_global.hh"
|
||||
|
||||
#include "GPU_compute.hh"
|
||||
#include "GPU_platform.hh"
|
||||
#include "GPU_shader.hh"
|
||||
#include "GPU_state.hh"
|
||||
|
||||
#ifdef USE_GPU_SELECT
|
||||
# include "GPU_select.hh"
|
||||
#endif
|
||||
|
||||
void DRW_select_load_id(uint id)
|
||||
{
|
||||
#ifdef USE_GPU_SELECT
|
||||
BLI_assert(G.f & G_FLAG_PICKSEL);
|
||||
DST.select_id = id;
|
||||
#endif
|
||||
}
|
||||
|
||||
#define DEBUG_UBO_BINDING
|
||||
|
||||
struct DRWCommandsState {
|
||||
blender::gpu::Batch *batch;
|
||||
int resource_chunk;
|
||||
int resource_id;
|
||||
int base_inst;
|
||||
int inst_count;
|
||||
bool neg_scale;
|
||||
/* Resource location. */
|
||||
int obmats_loc;
|
||||
int obinfos_loc;
|
||||
int obattrs_loc;
|
||||
int vlattrs_loc;
|
||||
int baseinst_loc;
|
||||
int chunkid_loc;
|
||||
int resourceid_loc;
|
||||
/* Legacy matrix support. */
|
||||
int obmat_loc;
|
||||
int obinv_loc;
|
||||
/* Uniform Attributes. */
|
||||
DRWSparseUniformBuf *obattrs_ubo;
|
||||
/* Selection ID state. */
|
||||
blender::gpu::VertBuf *select_buf;
|
||||
uint select_id;
|
||||
/* Drawing State */
|
||||
DRWState drw_state_enabled;
|
||||
DRWState drw_state_disabled;
|
||||
};
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
/** \name Draw State (DRW_state)
|
||||
* \{ */
|
||||
|
||||
void drw_state_set(DRWState state)
|
||||
{
|
||||
/* Mask locked state. */
|
||||
state = (~DST.state_lock & state) | (DST.state_lock & DST.state);
|
||||
|
||||
if (DST.state == state) {
|
||||
return;
|
||||
}
|
||||
|
||||
eGPUWriteMask write_mask = eGPUWriteMask(0);
|
||||
eGPUBlend blend = eGPUBlend(0);
|
||||
eGPUFaceCullTest culling_test = eGPUFaceCullTest(0);
|
||||
eGPUDepthTest depth_test = eGPUDepthTest(0);
|
||||
eGPUStencilTest stencil_test = eGPUStencilTest(0);
|
||||
eGPUStencilOp stencil_op = eGPUStencilOp(0);
|
||||
eGPUProvokingVertex provoking_vert = eGPUProvokingVertex(0);
|
||||
|
||||
if (state & DRW_STATE_WRITE_DEPTH) {
|
||||
write_mask |= GPU_WRITE_DEPTH;
|
||||
}
|
||||
if (state & DRW_STATE_WRITE_COLOR) {
|
||||
write_mask |= GPU_WRITE_COLOR;
|
||||
}
|
||||
if (state & DRW_STATE_WRITE_STENCIL_ENABLED) {
|
||||
write_mask |= GPU_WRITE_STENCIL;
|
||||
}
|
||||
|
||||
switch (state & (DRW_STATE_CULL_BACK | DRW_STATE_CULL_FRONT)) {
|
||||
case DRW_STATE_CULL_BACK:
|
||||
culling_test = GPU_CULL_BACK;
|
||||
break;
|
||||
case DRW_STATE_CULL_FRONT:
|
||||
culling_test = GPU_CULL_FRONT;
|
||||
break;
|
||||
default:
|
||||
culling_test = GPU_CULL_NONE;
|
||||
break;
|
||||
}
|
||||
|
||||
switch (state & DRW_STATE_DEPTH_TEST_ENABLED) {
|
||||
case DRW_STATE_DEPTH_LESS:
|
||||
depth_test = GPU_DEPTH_LESS;
|
||||
break;
|
||||
case DRW_STATE_DEPTH_LESS_EQUAL:
|
||||
depth_test = GPU_DEPTH_LESS_EQUAL;
|
||||
break;
|
||||
case DRW_STATE_DEPTH_EQUAL:
|
||||
depth_test = GPU_DEPTH_EQUAL;
|
||||
break;
|
||||
case DRW_STATE_DEPTH_GREATER:
|
||||
depth_test = GPU_DEPTH_GREATER;
|
||||
break;
|
||||
case DRW_STATE_DEPTH_GREATER_EQUAL:
|
||||
depth_test = GPU_DEPTH_GREATER_EQUAL;
|
||||
break;
|
||||
case DRW_STATE_DEPTH_ALWAYS:
|
||||
depth_test = GPU_DEPTH_ALWAYS;
|
||||
break;
|
||||
default:
|
||||
depth_test = GPU_DEPTH_NONE;
|
||||
break;
|
||||
}
|
||||
|
||||
switch (state & DRW_STATE_WRITE_STENCIL_ENABLED) {
|
||||
case DRW_STATE_WRITE_STENCIL:
|
||||
stencil_op = GPU_STENCIL_OP_REPLACE;
|
||||
GPU_stencil_write_mask_set(0xFF);
|
||||
break;
|
||||
case DRW_STATE_WRITE_STENCIL_SHADOW_PASS:
|
||||
stencil_op = GPU_STENCIL_OP_COUNT_DEPTH_PASS;
|
||||
GPU_stencil_write_mask_set(0xFF);
|
||||
break;
|
||||
case DRW_STATE_WRITE_STENCIL_SHADOW_FAIL:
|
||||
stencil_op = GPU_STENCIL_OP_COUNT_DEPTH_FAIL;
|
||||
GPU_stencil_write_mask_set(0xFF);
|
||||
break;
|
||||
default:
|
||||
stencil_op = GPU_STENCIL_OP_NONE;
|
||||
GPU_stencil_write_mask_set(0x00);
|
||||
break;
|
||||
}
|
||||
|
||||
switch (state & DRW_STATE_STENCIL_TEST_ENABLED) {
|
||||
case DRW_STATE_STENCIL_ALWAYS:
|
||||
stencil_test = GPU_STENCIL_ALWAYS;
|
||||
break;
|
||||
case DRW_STATE_STENCIL_EQUAL:
|
||||
stencil_test = GPU_STENCIL_EQUAL;
|
||||
break;
|
||||
case DRW_STATE_STENCIL_NEQUAL:
|
||||
stencil_test = GPU_STENCIL_NEQUAL;
|
||||
break;
|
||||
default:
|
||||
stencil_test = GPU_STENCIL_NONE;
|
||||
break;
|
||||
}
|
||||
|
||||
switch (state & DRW_STATE_BLEND_ENABLED) {
|
||||
case DRW_STATE_BLEND_ADD:
|
||||
blend = GPU_BLEND_ADDITIVE;
|
||||
break;
|
||||
case DRW_STATE_BLEND_ADD_FULL:
|
||||
blend = GPU_BLEND_ADDITIVE_PREMULT;
|
||||
break;
|
||||
case DRW_STATE_BLEND_ALPHA:
|
||||
blend = GPU_BLEND_ALPHA;
|
||||
break;
|
||||
case DRW_STATE_BLEND_ALPHA_PREMUL:
|
||||
blend = GPU_BLEND_ALPHA_PREMULT;
|
||||
break;
|
||||
case DRW_STATE_BLEND_BACKGROUND:
|
||||
blend = GPU_BLEND_BACKGROUND;
|
||||
break;
|
||||
case DRW_STATE_BLEND_OIT:
|
||||
blend = GPU_BLEND_OIT;
|
||||
break;
|
||||
case DRW_STATE_BLEND_MUL:
|
||||
blend = GPU_BLEND_MULTIPLY;
|
||||
break;
|
||||
case DRW_STATE_BLEND_SUB:
|
||||
blend = GPU_BLEND_SUBTRACT;
|
||||
break;
|
||||
case DRW_STATE_BLEND_CUSTOM:
|
||||
blend = GPU_BLEND_CUSTOM;
|
||||
break;
|
||||
case DRW_STATE_LOGIC_INVERT:
|
||||
blend = GPU_BLEND_INVERT;
|
||||
break;
|
||||
case DRW_STATE_BLEND_ALPHA_UNDER_PREMUL:
|
||||
blend = GPU_BLEND_ALPHA_UNDER_PREMUL;
|
||||
break;
|
||||
default:
|
||||
blend = GPU_BLEND_NONE;
|
||||
break;
|
||||
}
|
||||
|
||||
GPU_state_set(
|
||||
write_mask, blend, culling_test, depth_test, stencil_test, stencil_op, provoking_vert);
|
||||
|
||||
if (state & DRW_STATE_SHADOW_OFFSET) {
|
||||
GPU_shadow_offset(true);
|
||||
}
|
||||
else {
|
||||
GPU_shadow_offset(false);
|
||||
}
|
||||
|
||||
if (state & DRW_STATE_IN_FRONT_SELECT) {
|
||||
/* XXX `GPU_depth_range` is not a perfect solution
|
||||
* since very distant geometries can still be occluded.
|
||||
* Also the depth test precision of these geometries is impaired.
|
||||
* However, it solves the selection for the vast majority of cases. */
|
||||
GPU_depth_range(0.0f, 0.01f);
|
||||
}
|
||||
else {
|
||||
GPU_depth_range(0.0f, 1.0f);
|
||||
}
|
||||
|
||||
if (state & DRW_STATE_PROGRAM_POINT_SIZE) {
|
||||
GPU_program_point_size(true);
|
||||
}
|
||||
else {
|
||||
GPU_program_point_size(false);
|
||||
}
|
||||
|
||||
if (state & DRW_STATE_FIRST_VERTEX_CONVENTION) {
|
||||
GPU_provoking_vertex(GPU_VERTEX_FIRST);
|
||||
}
|
||||
else {
|
||||
GPU_provoking_vertex(GPU_VERTEX_LAST);
|
||||
}
|
||||
|
||||
DST.state = state;
|
||||
}
|
||||
|
||||
static void drw_stencil_state_set(uint write_mask, uint reference, uint compare_mask)
|
||||
{
|
||||
/* Reminders:
|
||||
* - (compare_mask & reference) is what is tested against (compare_mask & stencil_value)
|
||||
* stencil_value being the value stored in the stencil buffer.
|
||||
* - (write-mask & reference) is what gets written if the test condition is fulfilled.
|
||||
*/
|
||||
GPU_stencil_write_mask_set(write_mask);
|
||||
GPU_stencil_reference_set(reference);
|
||||
GPU_stencil_compare_mask_set(compare_mask);
|
||||
}
|
||||
|
||||
void DRW_state_reset_ex(DRWState state)
|
||||
{
|
||||
DST.state = ~state;
|
||||
drw_state_set(state);
|
||||
}
|
||||
|
||||
static void drw_state_validate()
|
||||
{
|
||||
/* Cannot write to stencil buffer without stencil test. */
|
||||
if (DST.state & DRW_STATE_WRITE_STENCIL_ENABLED) {
|
||||
BLI_assert(DST.state & DRW_STATE_STENCIL_TEST_ENABLED);
|
||||
}
|
||||
/* Cannot write to depth buffer without depth test. */
|
||||
if (DST.state & DRW_STATE_WRITE_DEPTH) {
|
||||
BLI_assert(DST.state & DRW_STATE_DEPTH_TEST_ENABLED);
|
||||
}
|
||||
}
|
||||
|
||||
void DRW_state_lock(DRWState state)
|
||||
{
|
||||
DST.state_lock = state;
|
||||
|
||||
/* We must get the current state to avoid overriding it. */
|
||||
/* Not complete, but that just what we need for now. */
|
||||
if (state & DRW_STATE_WRITE_DEPTH) {
|
||||
SET_FLAG_FROM_TEST(DST.state, GPU_depth_mask_get(), DRW_STATE_WRITE_DEPTH);
|
||||
}
|
||||
if (state & DRW_STATE_DEPTH_TEST_ENABLED) {
|
||||
DST.state &= ~DRW_STATE_DEPTH_TEST_ENABLED;
|
||||
|
||||
switch (GPU_depth_test_get()) {
|
||||
case GPU_DEPTH_ALWAYS:
|
||||
DST.state |= DRW_STATE_DEPTH_ALWAYS;
|
||||
break;
|
||||
case GPU_DEPTH_LESS:
|
||||
DST.state |= DRW_STATE_DEPTH_LESS;
|
||||
break;
|
||||
case GPU_DEPTH_LESS_EQUAL:
|
||||
DST.state |= DRW_STATE_DEPTH_LESS_EQUAL;
|
||||
break;
|
||||
case GPU_DEPTH_EQUAL:
|
||||
DST.state |= DRW_STATE_DEPTH_EQUAL;
|
||||
break;
|
||||
case GPU_DEPTH_GREATER:
|
||||
DST.state |= DRW_STATE_DEPTH_GREATER;
|
||||
break;
|
||||
case GPU_DEPTH_GREATER_EQUAL:
|
||||
DST.state |= DRW_STATE_DEPTH_GREATER_EQUAL;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void DRW_state_reset()
|
||||
{
|
||||
DRW_state_reset_ex(DRW_STATE_DEFAULT);
|
||||
|
||||
GPU_texture_unbind_all();
|
||||
GPU_texture_image_unbind_all();
|
||||
GPU_uniformbuf_debug_unbind_all();
|
||||
GPU_storagebuf_debug_unbind_all();
|
||||
|
||||
/* Should stay constant during the whole rendering. */
|
||||
GPU_point_size(5);
|
||||
GPU_line_smooth(false);
|
||||
/* Bypass #U.pixelsize factor by using a factor of 0.0f. Will be clamped to 1.0f. */
|
||||
GPU_line_width(0.0f);
|
||||
}
|
||||
|
||||
/** \} */
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
/** \name Draw (DRW_draw)
|
||||
* \{ */
|
||||
|
||||
BLI_INLINE void draw_legacy_matrix_update(DRWShadingGroup *shgroup,
|
||||
DRWResourceHandle *handle,
|
||||
float obmat_loc,
|
||||
float obinv_loc)
|
||||
{
|
||||
/* Still supported for compatibility with gpu_shader_* but should be forbidden. */
|
||||
DRWObjectMatrix *ob_mats = static_cast<DRWObjectMatrix *>(
|
||||
DRW_memblock_elem_from_handle(DST.vmempool->obmats, handle));
|
||||
if (obmat_loc != -1) {
|
||||
GPU_shader_uniform_float_ex(shgroup->shader, obmat_loc, 16, 1, (float *)ob_mats->model);
|
||||
}
|
||||
if (obinv_loc != -1) {
|
||||
GPU_shader_uniform_float_ex(shgroup->shader, obinv_loc, 16, 1, (float *)ob_mats->modelinverse);
|
||||
}
|
||||
}
|
||||
|
||||
BLI_INLINE void draw_geometry_bind(DRWShadingGroup *shgroup, blender::gpu::Batch *geom)
|
||||
{
|
||||
DST.batch = geom;
|
||||
|
||||
GPU_batch_set_shader(geom, shgroup->shader);
|
||||
}
|
||||
|
||||
BLI_INLINE void draw_geometry_execute(DRWShadingGroup *shgroup,
|
||||
blender::gpu::Batch *geom,
|
||||
int vert_first,
|
||||
int vert_count,
|
||||
int inst_first,
|
||||
int inst_count,
|
||||
int baseinst_loc)
|
||||
{
|
||||
/* inst_count can be -1. */
|
||||
inst_count = max_ii(0, inst_count);
|
||||
|
||||
if (baseinst_loc != -1) {
|
||||
/* Fallback when ARB_shader_draw_parameters is not supported. */
|
||||
GPU_shader_uniform_int_ex(shgroup->shader, baseinst_loc, 1, 1, (int *)&inst_first);
|
||||
/* Avoids VAO reconfiguration on older hardware. (see GPU_batch_draw_advanced) */
|
||||
inst_first = 0;
|
||||
}
|
||||
|
||||
/* bind vertex array */
|
||||
if (DST.batch != geom) {
|
||||
draw_geometry_bind(shgroup, geom);
|
||||
}
|
||||
|
||||
GPU_batch_draw_advanced(geom, vert_first, vert_count, inst_first, inst_count);
|
||||
}
|
||||
|
||||
BLI_INLINE void draw_indirect_call(DRWShadingGroup *shgroup, DRWCommandsState *state)
|
||||
{
|
||||
if (state->inst_count == 0) {
|
||||
return;
|
||||
}
|
||||
if (state->baseinst_loc == -1) {
|
||||
/* bind vertex array */
|
||||
if (DST.batch != state->batch) {
|
||||
GPU_draw_list_submit(DST.draw_list);
|
||||
draw_geometry_bind(shgroup, state->batch);
|
||||
}
|
||||
GPU_draw_list_append(DST.draw_list, state->batch, state->base_inst, state->inst_count);
|
||||
}
|
||||
/* Fallback when unsupported */
|
||||
else {
|
||||
draw_geometry_execute(
|
||||
shgroup, state->batch, 0, 0, state->base_inst, state->inst_count, state->baseinst_loc);
|
||||
}
|
||||
}
|
||||
|
||||
static void draw_update_uniforms(DRWShadingGroup *shgroup,
|
||||
DRWCommandsState *state,
|
||||
bool *use_tfeedback)
|
||||
{
|
||||
#define MAX_UNIFORM_STACK_SIZE 64
|
||||
|
||||
/* Uniform array elements stored as separate entries. We need to batch these together */
|
||||
int array_uniform_loc = -1;
|
||||
int array_index = 0;
|
||||
float mat4_stack[4 * 4];
|
||||
|
||||
/* Loop through uniforms in reverse order. */
|
||||
for (DRWUniformChunk *unichunk = shgroup->uniforms; unichunk; unichunk = unichunk->next) {
|
||||
DRWUniform *uni = unichunk->uniforms + unichunk->uniform_used - 1;
|
||||
|
||||
for (int i = 0; i < unichunk->uniform_used; i++, uni--) {
|
||||
/* For uniform array copies, copy per-array-element data into local buffer before upload. */
|
||||
if (uni->arraysize > 1 && uni->type == DRW_UNIFORM_FLOAT_COPY) {
|
||||
/* Only written for mat4 copy for now and is not meant to become generalized. */
|
||||
/* TODO(@fclem): Use UBOs/SSBOs instead of inline mat4 copies. */
|
||||
BLI_assert(uni->arraysize == 4 && uni->length == 4);
|
||||
/* Begin copying uniform array. */
|
||||
if (array_uniform_loc == -1) {
|
||||
array_uniform_loc = uni->location;
|
||||
array_index = uni->arraysize * uni->length;
|
||||
}
|
||||
/* Debug check same array loc. */
|
||||
BLI_assert(array_uniform_loc > -1 && array_uniform_loc == uni->location);
|
||||
/* Copy array element data to local buffer. */
|
||||
array_index -= uni->length;
|
||||
memcpy(&mat4_stack[array_index], uni->fvalue, sizeof(float) * uni->length);
|
||||
/* Flush array data to shader. */
|
||||
if (array_index <= 0) {
|
||||
GPU_shader_uniform_float_ex(shgroup->shader, uni->location, 16, 1, mat4_stack);
|
||||
array_uniform_loc = -1;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Handle standard cases. */
|
||||
switch (uni->type) {
|
||||
case DRW_UNIFORM_INT_COPY:
|
||||
BLI_assert(uni->arraysize == 1);
|
||||
if (uni->arraysize == 1) {
|
||||
GPU_shader_uniform_int_ex(
|
||||
shgroup->shader, uni->location, uni->length, uni->arraysize, uni->ivalue);
|
||||
}
|
||||
break;
|
||||
case DRW_UNIFORM_INT:
|
||||
GPU_shader_uniform_int_ex(shgroup->shader,
|
||||
uni->location,
|
||||
uni->length,
|
||||
uni->arraysize,
|
||||
static_cast<const int *>(uni->pvalue));
|
||||
break;
|
||||
case DRW_UNIFORM_FLOAT_COPY:
|
||||
BLI_assert(uni->arraysize == 1);
|
||||
if (uni->arraysize == 1) {
|
||||
GPU_shader_uniform_float_ex(
|
||||
shgroup->shader, uni->location, uni->length, uni->arraysize, uni->fvalue);
|
||||
}
|
||||
break;
|
||||
case DRW_UNIFORM_FLOAT:
|
||||
GPU_shader_uniform_float_ex(shgroup->shader,
|
||||
uni->location,
|
||||
uni->length,
|
||||
uni->arraysize,
|
||||
static_cast<const float *>(uni->pvalue));
|
||||
break;
|
||||
case DRW_UNIFORM_TEXTURE:
|
||||
GPU_texture_bind_ex(uni->texture, uni->sampler_state, uni->location);
|
||||
break;
|
||||
case DRW_UNIFORM_TEXTURE_REF:
|
||||
GPU_texture_bind_ex(*uni->texture_ref, uni->sampler_state, uni->location);
|
||||
break;
|
||||
case DRW_UNIFORM_IMAGE:
|
||||
GPU_texture_image_bind(uni->texture, uni->location);
|
||||
break;
|
||||
case DRW_UNIFORM_IMAGE_REF:
|
||||
GPU_texture_image_bind(*uni->texture_ref, uni->location);
|
||||
break;
|
||||
case DRW_UNIFORM_BLOCK:
|
||||
GPU_uniformbuf_bind(uni->block, uni->location);
|
||||
break;
|
||||
case DRW_UNIFORM_BLOCK_REF:
|
||||
GPU_uniformbuf_bind(*uni->block_ref, uni->location);
|
||||
break;
|
||||
case DRW_UNIFORM_STORAGE_BLOCK:
|
||||
GPU_storagebuf_bind(uni->ssbo, uni->location);
|
||||
break;
|
||||
case DRW_UNIFORM_STORAGE_BLOCK_REF:
|
||||
GPU_storagebuf_bind(*uni->ssbo_ref, uni->location);
|
||||
break;
|
||||
case DRW_UNIFORM_BLOCK_OBMATS:
|
||||
state->obmats_loc = uni->location;
|
||||
GPU_uniformbuf_bind(DST.vmempool->matrices_ubo[0], uni->location);
|
||||
break;
|
||||
case DRW_UNIFORM_BLOCK_OBINFOS:
|
||||
state->obinfos_loc = uni->location;
|
||||
GPU_uniformbuf_bind(DST.vmempool->obinfos_ubo[0], uni->location);
|
||||
break;
|
||||
case DRW_UNIFORM_BLOCK_OBATTRS:
|
||||
state->obattrs_loc = uni->location;
|
||||
state->obattrs_ubo = DRW_uniform_attrs_pool_find_ubo(DST.vmempool->obattrs_ubo_pool,
|
||||
uni->uniform_attrs);
|
||||
DRW_sparse_uniform_buffer_bind(state->obattrs_ubo, 0, uni->location);
|
||||
break;
|
||||
case DRW_UNIFORM_BLOCK_VLATTRS:
|
||||
state->vlattrs_loc = uni->location;
|
||||
GPU_uniformbuf_bind(drw_ensure_layer_attribute_buffer(), uni->location);
|
||||
break;
|
||||
case DRW_UNIFORM_RESOURCE_CHUNK: {
|
||||
state->chunkid_loc = uni->location;
|
||||
int zero = 0;
|
||||
GPU_shader_uniform_int_ex(shgroup->shader, uni->location, 1, 1, &zero);
|
||||
break;
|
||||
}
|
||||
case DRW_UNIFORM_RESOURCE_ID:
|
||||
state->resourceid_loc = uni->location;
|
||||
break;
|
||||
case DRW_UNIFORM_TFEEDBACK_TARGET:
|
||||
BLI_assert(uni->pvalue && (*use_tfeedback == false));
|
||||
*use_tfeedback = GPU_shader_transform_feedback_enable(
|
||||
shgroup->shader, ((blender::gpu::VertBuf *)uni->pvalue));
|
||||
break;
|
||||
case DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE_REF:
|
||||
GPU_vertbuf_bind_as_texture(*uni->vertbuf_ref, uni->location);
|
||||
break;
|
||||
case DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE:
|
||||
GPU_vertbuf_bind_as_texture(uni->vertbuf, uni->location);
|
||||
break;
|
||||
case DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE_REF:
|
||||
GPU_vertbuf_bind_as_ssbo(*uni->vertbuf_ref, uni->location);
|
||||
break;
|
||||
case DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE:
|
||||
GPU_vertbuf_bind_as_ssbo(uni->vertbuf, uni->location);
|
||||
break;
|
||||
/* Legacy/Fallback support. */
|
||||
case DRW_UNIFORM_BASE_INSTANCE:
|
||||
state->baseinst_loc = uni->location;
|
||||
break;
|
||||
case DRW_UNIFORM_MODEL_MATRIX:
|
||||
state->obmat_loc = uni->location;
|
||||
break;
|
||||
case DRW_UNIFORM_MODEL_MATRIX_INVERSE:
|
||||
state->obinv_loc = uni->location;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
/* Ensure uniform arrays copied. */
|
||||
BLI_assert(array_index == 0);
|
||||
BLI_assert(array_uniform_loc == -1);
|
||||
UNUSED_VARS_NDEBUG(array_uniform_loc);
|
||||
}
|
||||
|
||||
BLI_INLINE void draw_select_buffer(DRWShadingGroup *shgroup,
|
||||
DRWCommandsState *state,
|
||||
blender::gpu::Batch *batch,
|
||||
const DRWResourceHandle *handle)
|
||||
{
|
||||
const bool is_instancing = (batch->inst[0] != nullptr);
|
||||
int start = 0;
|
||||
int count = 1;
|
||||
int tot = is_instancing ? GPU_vertbuf_get_vertex_len(batch->inst[0]) :
|
||||
GPU_vertbuf_get_vertex_len(batch->verts[0]);
|
||||
/* HACK: get VBO data without actually drawing. */
|
||||
int *select_id = state->select_buf->data<int>().data();
|
||||
|
||||
/* Batching */
|
||||
if (!is_instancing) {
|
||||
/* FIXME: Meh a bit nasty. */
|
||||
if (batch->prim_type == GPU_PRIM_TRIS) {
|
||||
count = 3;
|
||||
}
|
||||
else if (batch->prim_type == GPU_PRIM_LINES) {
|
||||
count = 2;
|
||||
}
|
||||
}
|
||||
|
||||
while (start < tot) {
|
||||
GPU_select_load_id(select_id[start]);
|
||||
if (is_instancing) {
|
||||
draw_geometry_execute(shgroup, batch, 0, 0, start, count, state->baseinst_loc);
|
||||
}
|
||||
else {
|
||||
draw_geometry_execute(
|
||||
shgroup, batch, start, count, DRW_handle_id_get(handle), 0, state->baseinst_loc);
|
||||
}
|
||||
start += count;
|
||||
}
|
||||
}
|
||||
|
||||
struct DRWCommandIterator {
|
||||
int cmd_index;
|
||||
DRWCommandChunk *curr_chunk;
|
||||
};
|
||||
|
||||
static void draw_command_iter_begin(DRWCommandIterator *iter, DRWShadingGroup *shgroup)
|
||||
{
|
||||
iter->curr_chunk = shgroup->cmd.first;
|
||||
iter->cmd_index = 0;
|
||||
}
|
||||
|
||||
static DRWCommand *draw_command_iter_step(DRWCommandIterator *iter, eDRWCommandType * /*cmd_type*/)
|
||||
{
|
||||
if (iter->curr_chunk) {
|
||||
if (iter->cmd_index == iter->curr_chunk->command_len) {
|
||||
iter->curr_chunk = iter->curr_chunk->next;
|
||||
iter->cmd_index = 0;
|
||||
}
|
||||
if (iter->curr_chunk) {
|
||||
if (iter->cmd_index < iter->curr_chunk->command_used) {
|
||||
return iter->curr_chunk->commands + iter->cmd_index++;
|
||||
}
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static void draw_call_resource_bind(DRWCommandsState *state, const DRWResourceHandle *handle)
|
||||
{
|
||||
/* Front face is not a resource but it is inside the resource handle. */
|
||||
bool neg_scale = DRW_handle_negative_scale_get(handle);
|
||||
if (neg_scale != state->neg_scale) {
|
||||
state->neg_scale = neg_scale;
|
||||
}
|
||||
|
||||
int chunk = DRW_handle_chunk_get(handle);
|
||||
if (state->resource_chunk != chunk) {
|
||||
if (state->chunkid_loc != -1) {
|
||||
GPU_shader_uniform_int_ex(DST.shader, state->chunkid_loc, 1, 1, &chunk);
|
||||
}
|
||||
if (state->obmats_loc != -1) {
|
||||
GPU_uniformbuf_unbind(DST.vmempool->matrices_ubo[state->resource_chunk]);
|
||||
GPU_uniformbuf_bind(DST.vmempool->matrices_ubo[chunk], state->obmats_loc);
|
||||
}
|
||||
if (state->obinfos_loc != -1) {
|
||||
GPU_uniformbuf_unbind(DST.vmempool->obinfos_ubo[state->resource_chunk]);
|
||||
GPU_uniformbuf_bind(DST.vmempool->obinfos_ubo[chunk], state->obinfos_loc);
|
||||
}
|
||||
if (state->obattrs_loc != -1) {
|
||||
DRW_sparse_uniform_buffer_unbind(state->obattrs_ubo, state->resource_chunk);
|
||||
DRW_sparse_uniform_buffer_bind(state->obattrs_ubo, chunk, state->obattrs_loc);
|
||||
}
|
||||
state->resource_chunk = chunk;
|
||||
}
|
||||
|
||||
if (state->resourceid_loc != -1) {
|
||||
int id = DRW_handle_id_get(handle);
|
||||
if (state->resource_id != id) {
|
||||
GPU_shader_uniform_int_ex(DST.shader, state->resourceid_loc, 1, 1, &id);
|
||||
state->resource_id = id;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void draw_call_batching_flush(DRWShadingGroup *shgroup, DRWCommandsState *state)
|
||||
{
|
||||
draw_indirect_call(shgroup, state);
|
||||
GPU_draw_list_submit(DST.draw_list);
|
||||
|
||||
state->batch = nullptr;
|
||||
state->inst_count = 0;
|
||||
state->base_inst = -1;
|
||||
}
|
||||
|
||||
static void draw_call_single_do(DRWShadingGroup *shgroup,
|
||||
DRWCommandsState *state,
|
||||
blender::gpu::Batch *batch,
|
||||
DRWResourceHandle handle,
|
||||
int vert_first,
|
||||
int vert_count,
|
||||
int inst_first,
|
||||
int inst_count,
|
||||
bool do_base_instance)
|
||||
{
|
||||
draw_call_batching_flush(shgroup, state);
|
||||
|
||||
draw_call_resource_bind(state, &handle);
|
||||
|
||||
/* TODO: This is Legacy. Need to be removed. */
|
||||
if (state->obmats_loc == -1 && (state->obmat_loc != -1 || state->obinv_loc != -1)) {
|
||||
draw_legacy_matrix_update(shgroup, &handle, state->obmat_loc, state->obinv_loc);
|
||||
}
|
||||
|
||||
if (G.f & G_FLAG_PICKSEL) {
|
||||
if (state->select_buf != nullptr) {
|
||||
draw_select_buffer(shgroup, state, batch, &handle);
|
||||
return;
|
||||
}
|
||||
|
||||
GPU_select_load_id(state->select_id);
|
||||
}
|
||||
|
||||
draw_geometry_execute(shgroup,
|
||||
batch,
|
||||
vert_first,
|
||||
vert_count,
|
||||
do_base_instance ? DRW_handle_id_get(&handle) : inst_first,
|
||||
inst_count,
|
||||
state->baseinst_loc);
|
||||
}
|
||||
|
||||
/* Not to be mistaken with draw_indirect_call which does batch many drawcalls together. This one
|
||||
* only execute an indirect drawcall with user indirect buffer. */
|
||||
static void draw_call_indirect(DRWShadingGroup *shgroup,
|
||||
DRWCommandsState *state,
|
||||
blender::gpu::Batch *batch,
|
||||
DRWResourceHandle handle,
|
||||
GPUStorageBuf *indirect_buf)
|
||||
{
|
||||
draw_call_batching_flush(shgroup, state);
|
||||
draw_call_resource_bind(state, &handle);
|
||||
|
||||
if (G.f & G_FLAG_PICKSEL) {
|
||||
GPU_select_load_id(state->select_id);
|
||||
}
|
||||
|
||||
GPU_batch_set_shader(batch, shgroup->shader);
|
||||
GPU_batch_draw_indirect(batch, indirect_buf, 0);
|
||||
}
|
||||
|
||||
static void draw_call_batching_start(DRWCommandsState *state)
|
||||
{
|
||||
state->neg_scale = false;
|
||||
state->resource_chunk = 0;
|
||||
state->resource_id = -1;
|
||||
state->base_inst = 0;
|
||||
state->inst_count = 0;
|
||||
state->batch = nullptr;
|
||||
|
||||
state->select_id = -1;
|
||||
state->select_buf = nullptr;
|
||||
}
|
||||
|
||||
/* NOTE: Does not support batches with instancing VBOs. */
|
||||
static void draw_call_batching_do(DRWShadingGroup *shgroup,
|
||||
DRWCommandsState *state,
|
||||
DRWCommandDraw *call)
|
||||
{
|
||||
/* If any condition requires to interrupt the merging. */
|
||||
bool neg_scale = DRW_handle_negative_scale_get(&call->handle);
|
||||
int chunk = DRW_handle_chunk_get(&call->handle);
|
||||
int id = DRW_handle_id_get(&call->handle);
|
||||
if ((state->neg_scale != neg_scale) || /* Need to change state. */
|
||||
(state->resource_chunk != chunk) || /* Need to change UBOs. */
|
||||
(state->batch != call->batch) /* Need to change VAO. */
|
||||
)
|
||||
{
|
||||
draw_call_batching_flush(shgroup, state);
|
||||
|
||||
state->batch = call->batch;
|
||||
state->inst_count = 1;
|
||||
state->base_inst = id;
|
||||
|
||||
draw_call_resource_bind(state, &call->handle);
|
||||
}
|
||||
/* Is the id consecutive? */
|
||||
else if (id != state->base_inst + state->inst_count) {
|
||||
/* We need to add a draw command for the pending instances. */
|
||||
draw_indirect_call(shgroup, state);
|
||||
state->inst_count = 1;
|
||||
state->base_inst = id;
|
||||
}
|
||||
/* We avoid a drawcall by merging with the precedent
|
||||
* drawcall using instancing. */
|
||||
else {
|
||||
state->inst_count++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Flush remaining pending drawcalls. */
|
||||
static void draw_call_batching_finish(DRWShadingGroup *shgroup, DRWCommandsState *state)
|
||||
{
|
||||
draw_call_batching_flush(shgroup, state);
|
||||
|
||||
/* Reset state */
|
||||
if (state->neg_scale) {
|
||||
}
|
||||
if (state->obmats_loc != -1) {
|
||||
GPU_uniformbuf_unbind(DST.vmempool->matrices_ubo[state->resource_chunk]);
|
||||
}
|
||||
if (state->obinfos_loc != -1) {
|
||||
GPU_uniformbuf_unbind(DST.vmempool->obinfos_ubo[state->resource_chunk]);
|
||||
}
|
||||
if (state->obattrs_loc != -1) {
|
||||
DRW_sparse_uniform_buffer_unbind(state->obattrs_ubo, state->resource_chunk);
|
||||
}
|
||||
if (state->vlattrs_loc != -1) {
|
||||
GPU_uniformbuf_unbind(DST.vmempool->vlattrs_ubo);
|
||||
}
|
||||
}
|
||||
|
||||
static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
|
||||
{
|
||||
BLI_assert(shgroup->shader);
|
||||
|
||||
DRWCommandsState state{};
|
||||
state.obmats_loc = -1;
|
||||
state.obinfos_loc = -1;
|
||||
state.obattrs_loc = -1;
|
||||
state.vlattrs_loc = -1;
|
||||
state.baseinst_loc = -1;
|
||||
state.chunkid_loc = -1;
|
||||
state.resourceid_loc = -1;
|
||||
state.obmat_loc = -1;
|
||||
state.obinv_loc = -1;
|
||||
state.obattrs_ubo = nullptr;
|
||||
state.drw_state_enabled = DRWState(0);
|
||||
state.drw_state_disabled = DRWState(0);
|
||||
|
||||
const bool shader_changed = (DST.shader != shgroup->shader);
|
||||
bool use_tfeedback = false;
|
||||
|
||||
if (shader_changed) {
|
||||
if (DST.shader) {
|
||||
GPU_shader_unbind();
|
||||
|
||||
/* Unbinding can be costly. Skip in normal condition. */
|
||||
if (G.debug & G_DEBUG_GPU) {
|
||||
GPU_texture_unbind_all();
|
||||
GPU_texture_image_unbind_all();
|
||||
GPU_uniformbuf_debug_unbind_all();
|
||||
GPU_storagebuf_debug_unbind_all();
|
||||
}
|
||||
}
|
||||
GPU_shader_bind(shgroup->shader);
|
||||
DST.shader = shgroup->shader;
|
||||
DST.batch = nullptr;
|
||||
}
|
||||
|
||||
draw_update_uniforms(shgroup, &state, &use_tfeedback);
|
||||
|
||||
drw_state_set(pass_state);
|
||||
|
||||
/* Rendering Calls */
|
||||
{
|
||||
DRWCommandIterator iter;
|
||||
DRWCommand *cmd;
|
||||
eDRWCommandType cmd_type;
|
||||
|
||||
draw_command_iter_begin(&iter, shgroup);
|
||||
|
||||
draw_call_batching_start(&state);
|
||||
|
||||
while ((cmd = draw_command_iter_step(&iter, &cmd_type))) {
|
||||
|
||||
switch (cmd_type) {
|
||||
case DRW_CMD_DRAW_PROCEDURAL:
|
||||
case DRW_CMD_DRWSTATE:
|
||||
case DRW_CMD_STENCIL:
|
||||
draw_call_batching_flush(shgroup, &state);
|
||||
break;
|
||||
case DRW_CMD_DRAW:
|
||||
case DRW_CMD_DRAW_INDIRECT:
|
||||
case DRW_CMD_DRAW_INSTANCE:
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
switch (cmd_type) {
|
||||
case DRW_CMD_CLEAR:
|
||||
GPU_framebuffer_clear(GPU_framebuffer_active_get(),
|
||||
cmd->clear.clear_channels,
|
||||
blender::float4{cmd->clear.r / 255.0f,
|
||||
cmd->clear.g / 255.0f,
|
||||
cmd->clear.b / 255.0f,
|
||||
cmd->clear.a / 255.0f},
|
||||
cmd->clear.depth,
|
||||
cmd->clear.stencil);
|
||||
break;
|
||||
case DRW_CMD_DRWSTATE:
|
||||
state.drw_state_enabled |= cmd->state.enable;
|
||||
state.drw_state_disabled |= cmd->state.disable;
|
||||
drw_state_set((pass_state & ~state.drw_state_disabled) | state.drw_state_enabled);
|
||||
break;
|
||||
case DRW_CMD_STENCIL:
|
||||
drw_stencil_state_set(cmd->stencil.write_mask, cmd->stencil.ref, cmd->stencil.comp_mask);
|
||||
break;
|
||||
case DRW_CMD_SELECTID:
|
||||
state.select_id = cmd->select_id.select_id;
|
||||
state.select_buf = cmd->select_id.select_buf;
|
||||
break;
|
||||
case DRW_CMD_DRAW:
|
||||
if (!USE_BATCHING || state.obmats_loc == -1 || (G.f & G_FLAG_PICKSEL) ||
|
||||
cmd->draw.batch->inst[0])
|
||||
{
|
||||
draw_call_single_do(
|
||||
shgroup, &state, cmd->draw.batch, cmd->draw.handle, 0, 0, 0, 0, true);
|
||||
}
|
||||
else {
|
||||
draw_call_batching_do(shgroup, &state, &cmd->draw);
|
||||
}
|
||||
break;
|
||||
case DRW_CMD_DRAW_PROCEDURAL:
|
||||
draw_call_single_do(shgroup,
|
||||
&state,
|
||||
cmd->procedural.batch,
|
||||
cmd->procedural.handle,
|
||||
0,
|
||||
cmd->procedural.vert_count,
|
||||
0,
|
||||
1,
|
||||
true);
|
||||
break;
|
||||
case DRW_CMD_DRAW_INDIRECT:
|
||||
draw_call_indirect(shgroup,
|
||||
&state,
|
||||
cmd->draw_indirect.batch,
|
||||
cmd->draw_indirect.handle,
|
||||
cmd->draw_indirect.indirect_buf);
|
||||
break;
|
||||
case DRW_CMD_DRAW_INSTANCE:
|
||||
draw_call_single_do(shgroup,
|
||||
&state,
|
||||
cmd->instance.batch,
|
||||
cmd->instance.handle,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
cmd->instance.inst_count,
|
||||
cmd->instance.use_attrs == 0);
|
||||
break;
|
||||
case DRW_CMD_DRAW_RANGE:
|
||||
draw_call_single_do(shgroup,
|
||||
&state,
|
||||
cmd->range.batch,
|
||||
cmd->range.handle,
|
||||
cmd->range.vert_first,
|
||||
cmd->range.vert_count,
|
||||
0,
|
||||
1,
|
||||
true);
|
||||
break;
|
||||
case DRW_CMD_DRAW_INSTANCE_RANGE:
|
||||
draw_call_single_do(shgroup,
|
||||
&state,
|
||||
cmd->instance_range.batch,
|
||||
cmd->instance_range.handle,
|
||||
0,
|
||||
0,
|
||||
cmd->instance_range.inst_first,
|
||||
cmd->instance_range.inst_count,
|
||||
false);
|
||||
break;
|
||||
case DRW_CMD_COMPUTE:
|
||||
GPU_compute_dispatch(shgroup->shader,
|
||||
cmd->compute.groups_x_len,
|
||||
cmd->compute.groups_y_len,
|
||||
cmd->compute.groups_z_len);
|
||||
break;
|
||||
case DRW_CMD_COMPUTE_REF:
|
||||
GPU_compute_dispatch(shgroup->shader,
|
||||
cmd->compute_ref.groups_ref[0],
|
||||
cmd->compute_ref.groups_ref[1],
|
||||
cmd->compute_ref.groups_ref[2]);
|
||||
break;
|
||||
case DRW_CMD_COMPUTE_INDIRECT:
|
||||
GPU_compute_dispatch_indirect(shgroup->shader, cmd->compute_indirect.indirect_buf);
|
||||
break;
|
||||
case DRW_CMD_BARRIER:
|
||||
GPU_memory_barrier(cmd->barrier.type);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
draw_call_batching_finish(shgroup, &state);
|
||||
}
|
||||
|
||||
if (use_tfeedback) {
|
||||
GPU_shader_transform_feedback_disable(shgroup->shader);
|
||||
}
|
||||
}
|
||||
|
||||
static void drw_draw_pass_ex(DRWPass *pass,
|
||||
DRWShadingGroup *start_group,
|
||||
DRWShadingGroup *end_group)
|
||||
{
|
||||
if (pass->original) {
|
||||
start_group = pass->original->shgroups.first;
|
||||
end_group = pass->original->shgroups.last;
|
||||
}
|
||||
|
||||
if (start_group == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
DST.shader = nullptr;
|
||||
|
||||
BLI_assert_msg(DST.buffer_finish_called,
|
||||
"DRW_render_instance_buffer_finish had not been called before drawing");
|
||||
|
||||
/* GPU_framebuffer_clear calls can change the state outside the DRW module.
|
||||
* Force reset the affected states to avoid problems later. */
|
||||
drw_state_set(DST.state | DRW_STATE_WRITE_DEPTH | DRW_STATE_WRITE_COLOR);
|
||||
|
||||
drw_state_set(pass->state);
|
||||
drw_state_validate();
|
||||
|
||||
DRW_stats_query_start(pass->name);
|
||||
|
||||
for (DRWShadingGroup *shgroup = start_group; shgroup; shgroup = shgroup->next) {
|
||||
draw_shgroup(shgroup, pass->state);
|
||||
/* break if upper limit */
|
||||
if (shgroup == end_group) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (DST.shader) {
|
||||
GPU_shader_unbind();
|
||||
DST.shader = nullptr;
|
||||
}
|
||||
|
||||
if (DST.batch) {
|
||||
DST.batch = nullptr;
|
||||
}
|
||||
|
||||
/* Fix #67342 for some reason. AMD Pro driver bug. */
|
||||
if ((DST.state & DRW_STATE_BLEND_CUSTOM) != 0 &&
|
||||
GPU_type_matches(GPU_DEVICE_ATI, GPU_OS_ANY, GPU_DRIVER_OFFICIAL))
|
||||
{
|
||||
drw_state_set(DST.state & ~DRW_STATE_BLEND_CUSTOM);
|
||||
}
|
||||
|
||||
/* HACK: Rasterized discard can affect clear commands which are not
|
||||
* part of a DRWPass (as of now). So disable rasterized discard here
|
||||
* if it has been enabled. */
|
||||
if ((DST.state & DRW_STATE_RASTERIZER_ENABLED) == 0) {
|
||||
drw_state_set((DST.state & ~DRW_STATE_RASTERIZER_ENABLED) | DRW_STATE_DEFAULT);
|
||||
}
|
||||
|
||||
DRW_stats_query_end();
|
||||
}
|
||||
|
||||
void DRW_draw_pass(DRWPass *pass)
|
||||
{
|
||||
for (; pass; pass = pass->next) {
|
||||
drw_draw_pass_ex(pass, pass->shgroups.first, pass->shgroups.last);
|
||||
}
|
||||
}
|
||||
|
||||
void DRW_draw_pass_subset(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
|
||||
{
|
||||
drw_draw_pass_ex(pass, start_group, end_group);
|
||||
}
|
||||
|
||||
/** \} */
|
||||
Reference in New Issue
Block a user