DRW/GPU: move .c files to C++
Also see #103343. Pull Request: https://projects.blender.org/blender/blender/pulls/110509
This commit is contained in:
@@ -20,6 +20,7 @@
|
||||
# include "BLI_shared_cache.hh"
|
||||
# include "BLI_vector.hh"
|
||||
|
||||
# include "DNA_customdata_types.h"
|
||||
# include "DNA_meshdata_types.h"
|
||||
|
||||
struct BVHCache;
|
||||
@@ -102,7 +103,7 @@ struct MeshRuntime {
|
||||
std::mutex render_mutex;
|
||||
|
||||
/** Implicit sharing user count for #Mesh::face_offset_indices. */
|
||||
const ImplicitSharingInfoHandle *face_offsets_sharing_info;
|
||||
const ImplicitSharingInfo *face_offsets_sharing_info;
|
||||
|
||||
/**
|
||||
* A cache of bounds shared between data-blocks with unchanged positions. When changing positions
|
||||
|
||||
@@ -118,7 +118,7 @@ typedef unsigned int BLI_bitmap;
|
||||
#define BLI_BITMAP_RESIZE(_bitmap, _num) \
|
||||
{ \
|
||||
CHECK_TYPE(_bitmap, BLI_bitmap *); \
|
||||
(_bitmap) = MEM_recallocN(_bitmap, BLI_BITMAP_SIZE(_num)); \
|
||||
(_bitmap) = (unsigned int *)MEM_recallocN(_bitmap, BLI_BITMAP_SIZE(_num)); \
|
||||
} \
|
||||
(void)0
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ set(INC
|
||||
)
|
||||
|
||||
set(SRC
|
||||
intern/draw_cache.c
|
||||
intern/draw_cache.cc
|
||||
intern/draw_cache_extract_mesh.cc
|
||||
intern/draw_cache_extract_mesh_render_data.cc
|
||||
intern/mesh_extractors/extract_mesh.cc
|
||||
@@ -69,72 +69,72 @@ set(SRC
|
||||
intern/draw_cache_impl_curves.cc
|
||||
intern/draw_cache_impl_gpencil_legacy.cc
|
||||
intern/draw_cache_impl_grease_pencil.cc
|
||||
intern/draw_cache_impl_lattice.c
|
||||
intern/draw_cache_impl_lattice.cc
|
||||
intern/draw_cache_impl_mesh.cc
|
||||
intern/draw_cache_impl_particles.c
|
||||
intern/draw_cache_impl_particles.cc
|
||||
intern/draw_cache_impl_pointcloud.cc
|
||||
intern/draw_cache_impl_subdivision.cc
|
||||
intern/draw_cache_impl_volume.cc
|
||||
intern/draw_color_management.cc
|
||||
intern/draw_command.cc
|
||||
intern/draw_common.c
|
||||
intern/draw_common.cc
|
||||
intern/draw_curves.cc
|
||||
intern/draw_debug.cc
|
||||
intern/draw_fluid.c
|
||||
intern/draw_fluid.cc
|
||||
intern/draw_hair.cc
|
||||
intern/draw_instance_data.c
|
||||
intern/draw_manager.c
|
||||
intern/draw_instance_data.cc
|
||||
intern/draw_manager.cc
|
||||
intern/draw_manager_c.cc
|
||||
intern/draw_manager_data.cc
|
||||
intern/draw_manager_exec.c
|
||||
intern/draw_manager_exec.cc
|
||||
intern/draw_manager_profiling.cc
|
||||
intern/draw_manager_shader.c
|
||||
intern/draw_manager_shader.cc
|
||||
intern/draw_manager_text.cc
|
||||
intern/draw_manager_texture.c
|
||||
intern/draw_manager_texture.cc
|
||||
intern/draw_pbvh.cc
|
||||
intern/draw_pointcloud.cc
|
||||
intern/draw_resource.cc
|
||||
intern/draw_sculpt.cc
|
||||
intern/draw_select_buffer.c
|
||||
intern/draw_select_buffer.cc
|
||||
intern/draw_shader.cc
|
||||
intern/draw_texture_pool.cc
|
||||
intern/draw_view.c
|
||||
intern/draw_view.cc
|
||||
intern/draw_view_c.cc
|
||||
intern/draw_view_data.cc
|
||||
intern/draw_volume.cc
|
||||
engines/basic/basic_engine.c
|
||||
engines/basic/basic_shader.c
|
||||
engines/basic/basic_engine.cc
|
||||
engines/basic/basic_shader.cc
|
||||
engines/compositor/compositor_engine.cc
|
||||
engines/image/image_engine.cc
|
||||
engines/image/image_shader.cc
|
||||
engines/eevee/eevee_bloom.c
|
||||
engines/eevee/eevee_cryptomatte.c
|
||||
engines/eevee/eevee_data.c
|
||||
engines/eevee/eevee_depth_of_field.c
|
||||
engines/eevee/eevee_effects.c
|
||||
engines/eevee/eevee_engine.c
|
||||
engines/eevee/eevee_lightcache.c
|
||||
engines/eevee/eevee_lightprobes.c
|
||||
engines/eevee/eevee_lights.c
|
||||
engines/eevee/eevee_lookdev.c
|
||||
engines/eevee/eevee_lut.c
|
||||
engines/eevee/eevee_lut_gen.c
|
||||
engines/eevee/eevee_materials.c
|
||||
engines/eevee/eevee_mist.c
|
||||
engines/eevee/eevee_motion_blur.c
|
||||
engines/eevee/eevee_occlusion.c
|
||||
engines/eevee/eevee_render.c
|
||||
engines/eevee/eevee_renderpasses.c
|
||||
engines/eevee/eevee_sampling.c
|
||||
engines/eevee/eevee_screen_raytrace.c
|
||||
engines/eevee/eevee_bloom.cc
|
||||
engines/eevee/eevee_cryptomatte.cc
|
||||
engines/eevee/eevee_data.cc
|
||||
engines/eevee/eevee_depth_of_field.cc
|
||||
engines/eevee/eevee_effects.cc
|
||||
engines/eevee/eevee_engine.cc
|
||||
engines/eevee/eevee_lightcache.cc
|
||||
engines/eevee/eevee_lightprobes.cc
|
||||
engines/eevee/eevee_lights.cc
|
||||
engines/eevee/eevee_lookdev.cc
|
||||
engines/eevee/eevee_lut.cc
|
||||
engines/eevee/eevee_lut_gen.cc
|
||||
engines/eevee/eevee_materials.cc
|
||||
engines/eevee/eevee_mist.cc
|
||||
engines/eevee/eevee_motion_blur.cc
|
||||
engines/eevee/eevee_occlusion.cc
|
||||
engines/eevee/eevee_render.cc
|
||||
engines/eevee/eevee_renderpasses.cc
|
||||
engines/eevee/eevee_sampling.cc
|
||||
engines/eevee/eevee_screen_raytrace.cc
|
||||
engines/eevee/eevee_shaders.cc
|
||||
engines/eevee/eevee_shaders_extra.cc
|
||||
engines/eevee/eevee_shadows.c
|
||||
engines/eevee/eevee_shadows_cascade.c
|
||||
engines/eevee/eevee_shadows_cube.c
|
||||
engines/eevee/eevee_subsurface.c
|
||||
engines/eevee/eevee_temporal_sampling.c
|
||||
engines/eevee/eevee_volumes.c
|
||||
engines/eevee/eevee_shadows.cc
|
||||
engines/eevee/eevee_shadows_cascade.cc
|
||||
engines/eevee/eevee_shadows_cube.cc
|
||||
engines/eevee/eevee_subsurface.cc
|
||||
engines/eevee/eevee_temporal_sampling.cc
|
||||
engines/eevee/eevee_volumes.cc
|
||||
engines/eevee_next/eevee_ambient_occlusion.cc
|
||||
engines/eevee_next/eevee_camera.cc
|
||||
engines/eevee_next/eevee_cryptomatte.cc
|
||||
@@ -161,43 +161,43 @@ set(SRC
|
||||
engines/eevee_next/eevee_velocity.cc
|
||||
engines/eevee_next/eevee_view.cc
|
||||
engines/eevee_next/eevee_world.cc
|
||||
engines/workbench/workbench_data.c
|
||||
engines/workbench/workbench_effect_antialiasing.c
|
||||
engines/workbench/workbench_data.cc
|
||||
engines/workbench/workbench_effect_antialiasing.cc
|
||||
engines/workbench/workbench_effect_cavity.c
|
||||
engines/workbench/workbench_effect_antialiasing_c.cc
|
||||
engines/workbench/workbench_effect_cavity.cc
|
||||
engines/workbench/workbench_effect_dof.c
|
||||
engines/workbench/workbench_effect_cavity_c.cc
|
||||
engines/workbench/workbench_effect_dof.cc
|
||||
engines/workbench/workbench_effect_outline.c
|
||||
engines/workbench/workbench_effect_dof_c.cc
|
||||
engines/workbench/workbench_effect_outline.cc
|
||||
engines/workbench/workbench_engine.c
|
||||
engines/workbench/workbench_effect_outline_c.cc
|
||||
engines/workbench/workbench_engine.cc
|
||||
engines/workbench/workbench_engine_c.cc
|
||||
engines/workbench/workbench_materials.cc
|
||||
engines/workbench/workbench_materials_next.cc
|
||||
engines/workbench/workbench_mesh_passes.cc
|
||||
engines/workbench/workbench_opaque.c
|
||||
engines/workbench/workbench_render.c
|
||||
engines/workbench/workbench_opaque.cc
|
||||
engines/workbench/workbench_render.cc
|
||||
engines/workbench/workbench_resources.cc
|
||||
engines/workbench/workbench_shader.cc
|
||||
engines/workbench/workbench_shader_cache.cc
|
||||
engines/workbench/workbench_shadow.c
|
||||
engines/workbench/workbench_shadow.cc
|
||||
engines/workbench/workbench_shadow_c.cc
|
||||
engines/workbench/workbench_state.cc
|
||||
engines/workbench/workbench_transparent.c
|
||||
engines/workbench/workbench_volume.c
|
||||
engines/workbench/workbench_transparent.cc
|
||||
engines/workbench/workbench_volume.cc
|
||||
engines/workbench/workbench_volume_next.cc
|
||||
engines/external/external_engine.cc
|
||||
engines/gpencil/gpencil_antialiasing.c
|
||||
engines/gpencil/gpencil_cache_utils.c
|
||||
engines/gpencil/gpencil_draw_data.c
|
||||
engines/gpencil/gpencil_engine.c
|
||||
engines/gpencil/gpencil_antialiasing.cc
|
||||
engines/gpencil/gpencil_cache_utils.cc
|
||||
engines/gpencil/gpencil_draw_data.cc
|
||||
engines/gpencil/gpencil_engine.cc
|
||||
engines/gpencil/gpencil_render.c
|
||||
engines/gpencil/gpencil_shader.c
|
||||
engines/gpencil/gpencil_engine_c.cc
|
||||
engines/gpencil/gpencil_render.cc
|
||||
engines/gpencil/gpencil_shader.cc
|
||||
engines/gpencil/gpencil_shader_fx.c
|
||||
engines/select/select_draw_utils.c
|
||||
engines/select/select_engine.c
|
||||
engines/gpencil/gpencil_shader_c.cc
|
||||
engines/gpencil/gpencil_shader_fx.cc
|
||||
engines/select/select_draw_utils.cc
|
||||
engines/select/select_engine.cc
|
||||
engines/select/select_instance.cc
|
||||
engines/overlay/overlay_antialiasing.cc
|
||||
engines/overlay/overlay_armature.cc
|
||||
@@ -837,7 +837,7 @@ list(APPEND INC
|
||||
|
||||
if(WITH_DRAW_DEBUG)
|
||||
list(APPEND SRC
|
||||
engines/select/select_debug_engine.c
|
||||
engines/select/select_debug_engine.cc
|
||||
)
|
||||
add_definitions(-DWITH_DRAW_DEBUG)
|
||||
endif()
|
||||
|
||||
@@ -17,6 +17,10 @@
|
||||
|
||||
#include "BKE_ccg.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct PBVHAttrReq;
|
||||
struct GPUBatch;
|
||||
struct PBVHNode;
|
||||
@@ -91,3 +95,7 @@ GPUBatch *DRW_pbvh_lines_get(PBVHBatches *batches,
|
||||
PBVH_GPU_Args *args,
|
||||
int *r_prim_count,
|
||||
bool do_coarse_grids);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -31,34 +31,34 @@
|
||||
|
||||
/* GPUViewport.storage
|
||||
* Is freed every time the viewport engine changes. */
|
||||
typedef struct BASIC_StorageList {
|
||||
struct BASIC_StorageList {
|
||||
struct BASIC_PrivateData *g_data;
|
||||
} BASIC_StorageList;
|
||||
};
|
||||
|
||||
typedef struct BASIC_PassList {
|
||||
struct BASIC_PassList {
|
||||
DRWPass *depth_pass[2];
|
||||
DRWPass *depth_pass_pointcloud[2];
|
||||
DRWPass *depth_pass_cull[2];
|
||||
} BASIC_PassList;
|
||||
};
|
||||
|
||||
typedef struct BASIC_Data {
|
||||
struct BASIC_Data {
|
||||
void *engine_type;
|
||||
DRWViewportEmptyList *fbl;
|
||||
DRWViewportEmptyList *txl;
|
||||
BASIC_PassList *psl;
|
||||
BASIC_StorageList *stl;
|
||||
} BASIC_Data;
|
||||
};
|
||||
|
||||
/* *********** STATIC *********** */
|
||||
|
||||
typedef struct BASIC_PrivateData {
|
||||
struct BASIC_PrivateData {
|
||||
DRWShadingGroup *depth_shgrp[2];
|
||||
DRWShadingGroup *depth_shgrp_cull[2];
|
||||
DRWShadingGroup *depth_hair_shgrp[2];
|
||||
DRWShadingGroup *depth_curves_shgrp[2];
|
||||
DRWShadingGroup *depth_pointcloud_shgrp[2];
|
||||
bool use_material_slot_selection;
|
||||
} BASIC_PrivateData; /* Transient data */
|
||||
}; /* Transient data */
|
||||
|
||||
static void basic_cache_init(void *vedata)
|
||||
{
|
||||
@@ -70,15 +70,17 @@ static void basic_cache_init(void *vedata)
|
||||
|
||||
if (!stl->g_data) {
|
||||
/* Alloc transient pointers */
|
||||
stl->g_data = MEM_callocN(sizeof(*stl->g_data), __func__);
|
||||
stl->g_data = static_cast<BASIC_PrivateData *>(MEM_callocN(sizeof(*stl->g_data), __func__));
|
||||
}
|
||||
|
||||
stl->g_data->use_material_slot_selection = DRW_state_is_material_select();
|
||||
|
||||
/* Twice for normal and in front objects. */
|
||||
for (int i = 0; i < 2; i++) {
|
||||
DRWState clip_state = (draw_ctx->sh_cfg == GPU_SHADER_CFG_CLIPPED) ? DRW_STATE_CLIP_PLANES : 0;
|
||||
DRWState infront_state = (DRW_state_is_select() && (i == 1)) ? DRW_STATE_IN_FRONT_SELECT : 0;
|
||||
DRWState clip_state = DRWState(
|
||||
(draw_ctx->sh_cfg == GPU_SHADER_CFG_CLIPPED) ? DRW_STATE_CLIP_PLANES : 0);
|
||||
DRWState infront_state = DRWState(
|
||||
(DRW_state_is_select() && (i == 1)) ? DRW_STATE_IN_FRONT_SELECT : 0);
|
||||
DRWState state = DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS_EQUAL;
|
||||
|
||||
GPUShader *sh = DRW_state_is_select() ?
|
||||
@@ -114,7 +116,7 @@ static void basic_cache_init(void *vedata)
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO(fclem): DRW_cache_object_surface_material_get needs a refactor to allow passing NULL
|
||||
/* TODO(fclem): DRW_cache_object_surface_material_get needs a refactor to allow passing nullptr
|
||||
* instead of gpumat_array. Avoiding all this boilerplate code. */
|
||||
static struct GPUBatch **basic_object_surface_material_get(Object *ob)
|
||||
{
|
||||
@@ -129,19 +131,22 @@ static void basic_cache_populate_particles(void *vedata, Object *ob)
|
||||
{
|
||||
const bool do_in_front = (ob->dtx & OB_DRAW_IN_FRONT) != 0;
|
||||
BASIC_StorageList *stl = ((BASIC_Data *)vedata)->stl;
|
||||
for (ParticleSystem *psys = ob->particlesystem.first; psys != NULL; psys = psys->next) {
|
||||
for (ParticleSystem *psys = static_cast<ParticleSystem *>(ob->particlesystem.first);
|
||||
psys != nullptr;
|
||||
psys = psys->next)
|
||||
{
|
||||
if (!DRW_object_is_visible_psys_in_active_context(ob, psys)) {
|
||||
continue;
|
||||
}
|
||||
ParticleSettings *part = psys->part;
|
||||
const int draw_as = (part->draw_as == PART_DRAW_REND) ? part->ren_as : part->draw_as;
|
||||
if (draw_as == PART_DRAW_PATH) {
|
||||
struct GPUBatch *hairs = DRW_cache_particles_get_hair(ob, psys, NULL);
|
||||
struct GPUBatch *hairs = DRW_cache_particles_get_hair(ob, psys, nullptr);
|
||||
if (stl->g_data->use_material_slot_selection) {
|
||||
const short material_slot = part->omat;
|
||||
DRW_select_load_id(ob->runtime.select_id | (material_slot << 16));
|
||||
}
|
||||
DRW_shgroup_call(stl->g_data->depth_hair_shgrp[do_in_front], hairs, NULL);
|
||||
DRW_shgroup_call(stl->g_data->depth_hair_shgrp[do_in_front], hairs, nullptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -163,11 +168,12 @@ static void basic_cache_populate(void *vedata, Object *ob)
|
||||
|
||||
const bool do_in_front = (ob->dtx & OB_DRAW_IN_FRONT) != 0;
|
||||
if (ob->type == OB_CURVES) {
|
||||
DRW_shgroup_curves_create_sub(ob, stl->g_data->depth_curves_shgrp[do_in_front], NULL);
|
||||
DRW_shgroup_curves_create_sub(ob, stl->g_data->depth_curves_shgrp[do_in_front], nullptr);
|
||||
}
|
||||
|
||||
if (ob->type == OB_POINTCLOUD) {
|
||||
DRW_shgroup_pointcloud_create_sub(ob, stl->g_data->depth_pointcloud_shgrp[do_in_front], NULL);
|
||||
DRW_shgroup_pointcloud_create_sub(
|
||||
ob, stl->g_data->depth_pointcloud_shgrp[do_in_front], nullptr);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -207,7 +213,7 @@ static void basic_cache_populate(void *vedata, Object *ob)
|
||||
if (geoms) {
|
||||
const int materials_len = DRW_cache_object_material_count_get(ob);
|
||||
for (int i = 0; i < materials_len; i++) {
|
||||
if (geoms[i] == NULL) {
|
||||
if (geoms[i] == nullptr) {
|
||||
continue;
|
||||
}
|
||||
const short material_slot_select_id = i + 1;
|
||||
@@ -258,21 +264,21 @@ static void basic_engine_free(void)
|
||||
static const DrawEngineDataSize basic_data_size = DRW_VIEWPORT_DATA_SIZE(BASIC_Data);
|
||||
|
||||
DrawEngineType draw_engine_basic_type = {
|
||||
/*next*/ NULL,
|
||||
/*prev*/ NULL,
|
||||
/*next*/ nullptr,
|
||||
/*prev*/ nullptr,
|
||||
/*idname*/ N_("Basic"),
|
||||
/*vedata_size*/ &basic_data_size,
|
||||
/*engine_init*/ NULL,
|
||||
/*engine_init*/ nullptr,
|
||||
/*engine_free*/ &basic_engine_free,
|
||||
/*instance_free*/ /*instance_free*/ NULL,
|
||||
/*instance_free*/ /*instance_free*/ nullptr,
|
||||
/*cache_init*/ &basic_cache_init,
|
||||
/*cache_populate*/ &basic_cache_populate,
|
||||
/*cache_finish*/ &basic_cache_finish,
|
||||
/*draw_scene*/ &basic_draw_scene,
|
||||
/*view_update*/ NULL,
|
||||
/*id_update*/ NULL,
|
||||
/*render_to_image*/ NULL,
|
||||
/*store_metadata*/ NULL,
|
||||
/*view_update*/ nullptr,
|
||||
/*id_update*/ nullptr,
|
||||
/*render_to_image*/ nullptr,
|
||||
/*store_metadata*/ nullptr,
|
||||
};
|
||||
|
||||
#undef BASIC_ENGINE
|
||||
@@ -8,4 +8,12 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
extern DrawEngineType draw_engine_basic_type;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -12,32 +12,32 @@
|
||||
|
||||
#include "basic_private.h"
|
||||
|
||||
extern char datatoc_basic_depth_frag_glsl[];
|
||||
extern char datatoc_basic_depth_vert_glsl[];
|
||||
extern char datatoc_basic_conservative_depth_geom_glsl[];
|
||||
extern "C" char datatoc_basic_depth_frag_glsl[];
|
||||
extern "C" char datatoc_basic_depth_vert_glsl[];
|
||||
extern "C" char datatoc_basic_conservative_depth_geom_glsl[];
|
||||
|
||||
extern char datatoc_common_view_lib_glsl[];
|
||||
extern char datatoc_common_pointcloud_lib_glsl[];
|
||||
extern "C" char datatoc_common_view_lib_glsl[];
|
||||
extern "C" char datatoc_common_pointcloud_lib_glsl[];
|
||||
|
||||
/* Shaders */
|
||||
|
||||
typedef struct BASIC_Shaders {
|
||||
struct BASIC_Shaders {
|
||||
/* Depth Pre Pass */
|
||||
GPUShader *depth;
|
||||
GPUShader *pointcloud_depth;
|
||||
GPUShader *curves_depth;
|
||||
GPUShader *depth_conservative;
|
||||
GPUShader *pointcloud_depth_conservative;
|
||||
} BASIC_Shaders;
|
||||
};
|
||||
|
||||
static struct {
|
||||
BASIC_Shaders sh_data[GPU_SHADER_CFG_LEN];
|
||||
} e_data = {{{NULL}}}; /* Engine data */
|
||||
} e_data = {{{nullptr}}}; /* Engine data */
|
||||
|
||||
GPUShader *BASIC_shaders_depth_sh_get(eGPUShaderConfig config)
|
||||
{
|
||||
BASIC_Shaders *sh_data = &e_data.sh_data[config];
|
||||
if (sh_data->depth == NULL) {
|
||||
if (sh_data->depth == nullptr) {
|
||||
sh_data->depth = GPU_shader_create_from_info_name(
|
||||
config == GPU_SHADER_CFG_CLIPPED ? "basic_depth_mesh_clipped" : "basic_depth_mesh");
|
||||
}
|
||||
@@ -47,7 +47,7 @@ GPUShader *BASIC_shaders_depth_sh_get(eGPUShaderConfig config)
|
||||
GPUShader *BASIC_shaders_pointcloud_depth_sh_get(eGPUShaderConfig config)
|
||||
{
|
||||
BASIC_Shaders *sh_data = &e_data.sh_data[config];
|
||||
if (sh_data->pointcloud_depth == NULL) {
|
||||
if (sh_data->pointcloud_depth == nullptr) {
|
||||
sh_data->pointcloud_depth = GPU_shader_create_from_info_name(
|
||||
config == GPU_SHADER_CFG_CLIPPED ? "basic_depth_pointcloud_clipped" :
|
||||
"basic_depth_pointcloud");
|
||||
@@ -58,7 +58,7 @@ GPUShader *BASIC_shaders_pointcloud_depth_sh_get(eGPUShaderConfig config)
|
||||
GPUShader *BASIC_shaders_curves_depth_sh_get(eGPUShaderConfig config)
|
||||
{
|
||||
BASIC_Shaders *sh_data = &e_data.sh_data[config];
|
||||
if (sh_data->curves_depth == NULL) {
|
||||
if (sh_data->curves_depth == nullptr) {
|
||||
sh_data->curves_depth = GPU_shader_create_from_info_name(
|
||||
config == GPU_SHADER_CFG_CLIPPED ? "basic_depth_curves_clipped" : "basic_depth_curves");
|
||||
}
|
||||
@@ -68,7 +68,7 @@ GPUShader *BASIC_shaders_curves_depth_sh_get(eGPUShaderConfig config)
|
||||
GPUShader *BASIC_shaders_depth_conservative_sh_get(eGPUShaderConfig config)
|
||||
{
|
||||
BASIC_Shaders *sh_data = &e_data.sh_data[config];
|
||||
if (sh_data->depth_conservative == NULL) {
|
||||
if (sh_data->depth_conservative == nullptr) {
|
||||
sh_data->depth_conservative = GPU_shader_create_from_info_name(
|
||||
config == GPU_SHADER_CFG_CLIPPED ? "basic_depth_mesh_conservative_clipped" :
|
||||
"basic_depth_mesh_conservative");
|
||||
@@ -79,7 +79,7 @@ GPUShader *BASIC_shaders_depth_conservative_sh_get(eGPUShaderConfig config)
|
||||
GPUShader *BASIC_shaders_pointcloud_depth_conservative_sh_get(eGPUShaderConfig config)
|
||||
{
|
||||
BASIC_Shaders *sh_data = &e_data.sh_data[config];
|
||||
if (sh_data->pointcloud_depth_conservative == NULL) {
|
||||
if (sh_data->pointcloud_depth_conservative == nullptr) {
|
||||
sh_data->pointcloud_depth_conservative = GPU_shader_create_from_info_name(
|
||||
config == GPU_SHADER_CFG_CLIPPED ? "basic_depth_pointcloud_conservative_clipped" :
|
||||
"basic_depth_pointcloud_conservative");
|
||||
@@ -18,7 +18,7 @@
|
||||
|
||||
static const bool use_highres = true;
|
||||
|
||||
int EEVEE_bloom_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
|
||||
int EEVEE_bloom_init(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data *vedata)
|
||||
{
|
||||
EEVEE_StorageList *stl = vedata->stl;
|
||||
EEVEE_FramebufferList *fbl = vedata->fbl;
|
||||
@@ -142,7 +142,7 @@ static DRWShadingGroup *eevee_create_bloom_pass(const char *name,
|
||||
*pass = DRW_pass_create(name, DRW_STATE_WRITE_COLOR);
|
||||
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, *pass);
|
||||
DRW_shgroup_call(grp, quad, NULL);
|
||||
DRW_shgroup_call(grp, quad, nullptr);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "sourceBuffer", &effects->unf_source_buffer);
|
||||
DRW_shgroup_uniform_vec2(grp, "sourceBufferTexelSize", effects->unf_source_texel_size, 1);
|
||||
if (upsample) {
|
||||
@@ -157,13 +157,13 @@ static DRWShadingGroup *eevee_create_bloom_pass(const char *name,
|
||||
return grp;
|
||||
}
|
||||
|
||||
void EEVEE_bloom_cache_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
|
||||
void EEVEE_bloom_cache_init(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data *vedata)
|
||||
{
|
||||
EEVEE_PassList *psl = vedata->psl;
|
||||
EEVEE_StorageList *stl = vedata->stl;
|
||||
EEVEE_EffectsInfo *effects = stl->effects;
|
||||
|
||||
psl->bloom_accum_ps = NULL;
|
||||
psl->bloom_accum_ps = nullptr;
|
||||
|
||||
if ((effects->enabled_effects & EFFECT_BLOOM) != 0) {
|
||||
/**
|
||||
@@ -302,9 +302,9 @@ void EEVEE_bloom_draw(EEVEE_Data *vedata)
|
||||
}
|
||||
}
|
||||
|
||||
void EEVEE_bloom_output_init(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
void EEVEE_bloom_output_init(EEVEE_ViewLayerData * /*sldata*/,
|
||||
EEVEE_Data *vedata,
|
||||
uint UNUSED(tot_samples))
|
||||
uint /*tot_samples*/)
|
||||
{
|
||||
EEVEE_FramebufferList *fbl = vedata->fbl;
|
||||
EEVEE_TextureList *txl = vedata->txl;
|
||||
@@ -313,7 +313,7 @@ void EEVEE_bloom_output_init(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
EEVEE_EffectsInfo *effects = stl->effects;
|
||||
|
||||
/* Create FrameBuffer. */
|
||||
DRW_texture_ensure_fullscreen_2d(&txl->bloom_accum, GPU_R11F_G11F_B10F, 0);
|
||||
DRW_texture_ensure_fullscreen_2d(&txl->bloom_accum, GPU_R11F_G11F_B10F, DRWTextureFlag(0));
|
||||
|
||||
GPU_framebuffer_ensure_config(&fbl->bloom_pass_accum_fb,
|
||||
{GPU_ATTACHMENT_NONE, GPU_ATTACHMENT_TEXTURE(txl->bloom_accum)});
|
||||
@@ -328,7 +328,7 @@ void EEVEE_bloom_output_init(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
false);
|
||||
}
|
||||
|
||||
void EEVEE_bloom_output_accumulate(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
|
||||
void EEVEE_bloom_output_accumulate(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data *vedata)
|
||||
{
|
||||
EEVEE_FramebufferList *fbl = vedata->fbl;
|
||||
EEVEE_PassList *psl = vedata->psl;
|
||||
@@ -56,8 +56,8 @@
|
||||
|
||||
BLI_INLINE eViewLayerCryptomatteFlags eevee_cryptomatte_active_layers(const ViewLayer *view_layer)
|
||||
{
|
||||
const eViewLayerCryptomatteFlags cryptomatte_layers = view_layer->cryptomatte_flag &
|
||||
VIEW_LAYER_CRYPTOMATTE_ALL;
|
||||
const eViewLayerCryptomatteFlags cryptomatte_layers = eViewLayerCryptomatteFlags(
|
||||
view_layer->cryptomatte_flag & VIEW_LAYER_CRYPTOMATTE_ALL);
|
||||
return cryptomatte_layers;
|
||||
}
|
||||
|
||||
@@ -125,15 +125,16 @@ void EEVEE_cryptomatte_renderpasses_init(EEVEE_Data *vedata)
|
||||
}
|
||||
g_data->cryptomatte_session = session;
|
||||
|
||||
g_data->render_passes |= EEVEE_RENDER_PASS_CRYPTOMATTE | EEVEE_RENDER_PASS_VOLUME_LIGHT;
|
||||
g_data->render_passes = eViewLayerEEVEEPassType(
|
||||
g_data->render_passes | EEVEE_RENDER_PASS_CRYPTOMATTE | EEVEE_RENDER_PASS_VOLUME_LIGHT);
|
||||
g_data->cryptomatte_accurate_mode = (view_layer->cryptomatte_flag &
|
||||
VIEW_LAYER_CRYPTOMATTE_ACCURATE) != 0;
|
||||
}
|
||||
}
|
||||
|
||||
void EEVEE_cryptomatte_output_init(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
void EEVEE_cryptomatte_output_init(EEVEE_ViewLayerData * /*sldata*/,
|
||||
EEVEE_Data *vedata,
|
||||
int UNUSED(tot_samples))
|
||||
int /*tot_samples*/)
|
||||
{
|
||||
EEVEE_FramebufferList *fbl = vedata->fbl;
|
||||
EEVEE_TextureList *txl = vedata->txl;
|
||||
@@ -151,14 +152,14 @@ void EEVEE_cryptomatte_output_init(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
const float *viewport_size = DRW_viewport_size_get();
|
||||
const int buffer_size = viewport_size[0] * viewport_size[1];
|
||||
|
||||
if (g_data->cryptomatte_accum_buffer == NULL) {
|
||||
g_data->cryptomatte_accum_buffer = MEM_calloc_arrayN(
|
||||
buffer_size * eevee_cryptomatte_pixel_stride(view_layer),
|
||||
sizeof(EEVEE_CryptomatteSample),
|
||||
__func__);
|
||||
if (g_data->cryptomatte_accum_buffer == nullptr) {
|
||||
g_data->cryptomatte_accum_buffer = static_cast<EEVEE_CryptomatteSample *>(
|
||||
MEM_calloc_arrayN(buffer_size * eevee_cryptomatte_pixel_stride(view_layer),
|
||||
sizeof(EEVEE_CryptomatteSample),
|
||||
__func__));
|
||||
/* Download buffer should store a float per active cryptomatte layer. */
|
||||
g_data->cryptomatte_download_buffer = MEM_malloc_arrayN(
|
||||
buffer_size * num_cryptomatte_layers, sizeof(float), __func__);
|
||||
g_data->cryptomatte_download_buffer = static_cast<float *>(
|
||||
MEM_malloc_arrayN(buffer_size * num_cryptomatte_layers, sizeof(float), __func__));
|
||||
}
|
||||
else {
|
||||
/* During multiview rendering the `cryptomatte_accum_buffer` is deallocated after all views
|
||||
@@ -169,7 +170,7 @@ void EEVEE_cryptomatte_output_init(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
sizeof(EEVEE_CryptomatteSample));
|
||||
}
|
||||
|
||||
DRW_texture_ensure_fullscreen_2d(&txl->cryptomatte, format, 0);
|
||||
DRW_texture_ensure_fullscreen_2d(&txl->cryptomatte, format, DRWTextureFlag(0));
|
||||
GPU_framebuffer_ensure_config(&fbl->cryptomatte_fb,
|
||||
{
|
||||
GPU_ATTACHMENT_TEXTURE(dtxl->depth),
|
||||
@@ -183,7 +184,7 @@ void EEVEE_cryptomatte_output_init(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
/** \name Populate Cache
|
||||
* \{ */
|
||||
|
||||
void EEVEE_cryptomatte_cache_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
|
||||
void EEVEE_cryptomatte_cache_init(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data *vedata)
|
||||
{
|
||||
EEVEE_PassList *psl = vedata->psl;
|
||||
if ((vedata->stl->g_data->render_passes & EEVEE_RENDER_PASS_CRYPTOMATTE) != 0) {
|
||||
@@ -242,7 +243,7 @@ static void eevee_cryptomatte_curves_cache_populate(EEVEE_Data *vedata,
|
||||
{
|
||||
DRWShadingGroup *grp = eevee_cryptomatte_shading_group_create(
|
||||
vedata, sldata, ob, material, true);
|
||||
DRW_shgroup_hair_create_sub(ob, psys, md, grp, NULL);
|
||||
DRW_shgroup_hair_create_sub(ob, psys, md, grp, nullptr);
|
||||
}
|
||||
|
||||
void EEVEE_cryptomatte_object_curves_cache_populate(EEVEE_Data *vedata,
|
||||
@@ -253,7 +254,7 @@ void EEVEE_cryptomatte_object_curves_cache_populate(EEVEE_Data *vedata,
|
||||
Material *material = BKE_object_material_get_eval(ob, CURVES_MATERIAL_NR);
|
||||
DRWShadingGroup *grp = eevee_cryptomatte_shading_group_create(
|
||||
vedata, sldata, ob, material, true);
|
||||
DRW_shgroup_curves_create_sub(ob, grp, NULL);
|
||||
DRW_shgroup_curves_create_sub(ob, grp, nullptr);
|
||||
}
|
||||
|
||||
void EEVEE_cryptomatte_particle_hair_cache_populate(EEVEE_Data *vedata,
|
||||
@@ -299,7 +300,7 @@ void EEVEE_cryptomatte_cache_populate(EEVEE_Data *vedata, EEVEE_ViewLayerData *s
|
||||
if (geoms) {
|
||||
for (int i = 0; i < materials_len; i++) {
|
||||
GPUBatch *geom = geoms[i];
|
||||
if (geom == NULL) {
|
||||
if (geom == nullptr) {
|
||||
continue;
|
||||
}
|
||||
Material *material = BKE_object_material_get_eval(ob, i + 1);
|
||||
@@ -313,7 +314,7 @@ void EEVEE_cryptomatte_cache_populate(EEVEE_Data *vedata, EEVEE_ViewLayerData *s
|
||||
GPUBatch *geom = DRW_cache_object_surface_get(ob);
|
||||
if (geom) {
|
||||
DRWShadingGroup *grp = eevee_cryptomatte_shading_group_create(
|
||||
vedata, sldata, ob, NULL, false);
|
||||
vedata, sldata, ob, nullptr, false);
|
||||
DRW_shgroup_call(grp, geom, ob);
|
||||
}
|
||||
}
|
||||
@@ -387,7 +388,7 @@ static void eevee_cryptomatte_download_buffer(EEVEE_Data *vedata, GPUFrameBuffer
|
||||
}
|
||||
}
|
||||
|
||||
void EEVEE_cryptomatte_output_accumulate(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
|
||||
void EEVEE_cryptomatte_output_accumulate(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data *vedata)
|
||||
{
|
||||
EEVEE_FramebufferList *fbl = vedata->fbl;
|
||||
EEVEE_StorageList *stl = vedata->stl;
|
||||
@@ -462,8 +463,8 @@ void EEVEE_cryptomatte_update_passes(RenderEngine *engine, Scene *scene, ViewLay
|
||||
* beginning of the list. */
|
||||
static int eevee_cryptomatte_sample_cmp_reverse(const void *a_, const void *b_)
|
||||
{
|
||||
const EEVEE_CryptomatteSample *a = a_;
|
||||
const EEVEE_CryptomatteSample *b = b_;
|
||||
const EEVEE_CryptomatteSample *a = static_cast<const EEVEE_CryptomatteSample *>(a_);
|
||||
const EEVEE_CryptomatteSample *b = static_cast<const EEVEE_CryptomatteSample *>(b_);
|
||||
if (a->weight < b->weight) {
|
||||
return 1;
|
||||
}
|
||||
@@ -491,10 +492,10 @@ static void eevee_cryptomatte_postprocess_weights(EEVEE_Data *vedata)
|
||||
|
||||
EEVEE_CryptomatteSample *accum_buffer = g_data->cryptomatte_accum_buffer;
|
||||
BLI_assert(accum_buffer);
|
||||
float *volumetric_transmittance_buffer = NULL;
|
||||
float *volumetric_transmittance_buffer = nullptr;
|
||||
if ((effects->enabled_effects & EFFECT_VOLUMETRIC) != 0) {
|
||||
volumetric_transmittance_buffer = GPU_texture_read(
|
||||
txl->volume_transmittance_accum, GPU_DATA_FLOAT, 0);
|
||||
volumetric_transmittance_buffer = static_cast<float *>(
|
||||
GPU_texture_read(txl->volume_transmittance_accum, GPU_DATA_FLOAT, 0));
|
||||
}
|
||||
const int num_samples = effects->taa_current_sample - 1;
|
||||
|
||||
@@ -505,7 +506,7 @@ static void eevee_cryptomatte_postprocess_weights(EEVEE_Data *vedata)
|
||||
pixel_index++, accum_pixel_index += accum_pixel_stride)
|
||||
{
|
||||
float coverage = 1.0f;
|
||||
if (volumetric_transmittance_buffer != NULL) {
|
||||
if (volumetric_transmittance_buffer != nullptr) {
|
||||
coverage = (volumetric_transmittance_buffer[pixel_index * 4] +
|
||||
volumetric_transmittance_buffer[pixel_index * 4 + 1] +
|
||||
volumetric_transmittance_buffer[pixel_index * 4 + 2]) /
|
||||
@@ -612,13 +613,13 @@ void EEVEE_cryptomatte_render_result(RenderLayer *rl,
|
||||
const char *viewname,
|
||||
const rcti *rect,
|
||||
EEVEE_Data *vedata,
|
||||
EEVEE_ViewLayerData *UNUSED(sldata))
|
||||
EEVEE_ViewLayerData * /*sldata*/)
|
||||
{
|
||||
EEVEE_PrivateData *g_data = vedata->stl->g_data;
|
||||
const DRWContextState *draw_ctx = DRW_context_state_get();
|
||||
const ViewLayer *view_layer = draw_ctx->view_layer;
|
||||
const eViewLayerCryptomatteFlags cryptomatte_layers = view_layer->cryptomatte_flag &
|
||||
VIEW_LAYER_CRYPTOMATTE_ALL;
|
||||
const eViewLayerCryptomatteFlags cryptomatte_layers = eViewLayerCryptomatteFlags(
|
||||
view_layer->cryptomatte_flag & VIEW_LAYER_CRYPTOMATTE_ALL);
|
||||
|
||||
eevee_cryptomatte_postprocess_weights(vedata);
|
||||
|
||||
@@ -708,6 +709,6 @@ void EEVEE_cryptomatte_free(EEVEE_Data *vedata)
|
||||
MEM_SAFE_FREE(g_data->cryptomatte_download_buffer);
|
||||
if (g_data->cryptomatte_session) {
|
||||
BKE_cryptomatte_free(g_data->cryptomatte_session);
|
||||
g_data->cryptomatte_session = NULL;
|
||||
g_data->cryptomatte_session = nullptr;
|
||||
}
|
||||
}
|
||||
@@ -29,10 +29,10 @@
|
||||
static void eevee_motion_blur_mesh_data_free(void *val)
|
||||
{
|
||||
EEVEE_ObjectMotionData *mb_data = (EEVEE_ObjectMotionData *)val;
|
||||
if (mb_data->hair_data != NULL) {
|
||||
if (mb_data->hair_data != nullptr) {
|
||||
MEM_freeN(mb_data->hair_data);
|
||||
}
|
||||
if (mb_data->geometry_data != NULL) {
|
||||
if (mb_data->geometry_data != nullptr) {
|
||||
MEM_freeN(mb_data->geometry_data);
|
||||
}
|
||||
MEM_freeN(val);
|
||||
@@ -81,15 +81,15 @@ void EEVEE_motion_hair_step_free(EEVEE_HairMotionStepData *step_data)
|
||||
|
||||
void EEVEE_motion_blur_data_init(EEVEE_MotionBlurData *mb)
|
||||
{
|
||||
if (mb->object == NULL) {
|
||||
if (mb->object == nullptr) {
|
||||
mb->object = BLI_ghash_new(eevee_object_key_hash, eevee_object_key_cmp, "EEVEE Object Motion");
|
||||
}
|
||||
for (int i = 0; i < 2; i++) {
|
||||
if (mb->position_vbo_cache[i] == NULL) {
|
||||
if (mb->position_vbo_cache[i] == nullptr) {
|
||||
mb->position_vbo_cache[i] = BLI_ghash_new(
|
||||
BLI_ghashutil_ptrhash, BLI_ghashutil_ptrcmp, "EEVEE duplicate vbo cache");
|
||||
}
|
||||
if (mb->hair_motion_step_cache[i] == NULL) {
|
||||
if (mb->hair_motion_step_cache[i] == nullptr) {
|
||||
mb->hair_motion_step_cache[i] = BLI_ghash_new(
|
||||
BLI_ghashutil_ptrhash, BLI_ghashutil_ptrcmp, "EEVEE hair motion step cache");
|
||||
}
|
||||
@@ -100,17 +100,17 @@ void EEVEE_motion_blur_data_free(EEVEE_MotionBlurData *mb)
|
||||
{
|
||||
if (mb->object) {
|
||||
BLI_ghash_free(mb->object, MEM_freeN, eevee_motion_blur_mesh_data_free);
|
||||
mb->object = NULL;
|
||||
mb->object = nullptr;
|
||||
}
|
||||
for (int i = 0; i < 2; i++) {
|
||||
if (mb->position_vbo_cache[i]) {
|
||||
BLI_ghash_free(mb->position_vbo_cache[i], NULL, (GHashValFreeFP)GPU_vertbuf_discard);
|
||||
mb->position_vbo_cache[i] = NULL;
|
||||
BLI_ghash_free(mb->position_vbo_cache[i], nullptr, (GHashValFreeFP)GPU_vertbuf_discard);
|
||||
mb->position_vbo_cache[i] = nullptr;
|
||||
}
|
||||
if (mb->hair_motion_step_cache[i]) {
|
||||
BLI_ghash_free(
|
||||
mb->hair_motion_step_cache[i], NULL, (GHashValFreeFP)EEVEE_motion_hair_step_free);
|
||||
mb->hair_motion_step_cache[i] = NULL;
|
||||
mb->hair_motion_step_cache[i], nullptr, (GHashValFreeFP)EEVEE_motion_hair_step_free);
|
||||
mb->hair_motion_step_cache[i] = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -119,8 +119,8 @@ EEVEE_ObjectMotionData *EEVEE_motion_blur_object_data_get(EEVEE_MotionBlurData *
|
||||
Object *ob,
|
||||
bool is_psys)
|
||||
{
|
||||
if (mb->object == NULL) {
|
||||
return NULL;
|
||||
if (mb->object == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
EEVEE_ObjectKey key, *key_p;
|
||||
@@ -139,12 +139,14 @@ EEVEE_ObjectMotionData *EEVEE_motion_blur_object_data_get(EEVEE_MotionBlurData *
|
||||
memset(key.id, 0, sizeof(key.id));
|
||||
}
|
||||
|
||||
EEVEE_ObjectMotionData *ob_step = BLI_ghash_lookup(mb->object, &key);
|
||||
if (ob_step == NULL) {
|
||||
key_p = MEM_mallocN(sizeof(*key_p), __func__);
|
||||
EEVEE_ObjectMotionData *ob_step = static_cast<EEVEE_ObjectMotionData *>(
|
||||
BLI_ghash_lookup(mb->object, &key));
|
||||
if (ob_step == nullptr) {
|
||||
key_p = static_cast<EEVEE_ObjectKey *>(MEM_mallocN(sizeof(*key_p), __func__));
|
||||
memcpy(key_p, &key, sizeof(*key_p));
|
||||
|
||||
ob_step = MEM_callocN(sizeof(EEVEE_ObjectMotionData), __func__);
|
||||
ob_step = static_cast<EEVEE_ObjectMotionData *>(
|
||||
MEM_callocN(sizeof(EEVEE_ObjectMotionData), __func__));
|
||||
|
||||
BLI_ghash_insert(mb->object, key_p, ob_step);
|
||||
}
|
||||
@@ -153,8 +155,9 @@ EEVEE_ObjectMotionData *EEVEE_motion_blur_object_data_get(EEVEE_MotionBlurData *
|
||||
|
||||
EEVEE_GeometryMotionData *EEVEE_motion_blur_geometry_data_get(EEVEE_ObjectMotionData *mb_data)
|
||||
{
|
||||
if (mb_data->geometry_data == NULL) {
|
||||
EEVEE_GeometryMotionData *geom_step = MEM_callocN(sizeof(EEVEE_GeometryMotionData), __func__);
|
||||
if (mb_data->geometry_data == nullptr) {
|
||||
EEVEE_GeometryMotionData *geom_step = static_cast<EEVEE_GeometryMotionData *>(
|
||||
MEM_callocN(sizeof(EEVEE_GeometryMotionData), __func__));
|
||||
geom_step->type = EEVEE_MOTION_DATA_MESH;
|
||||
mb_data->geometry_data = geom_step;
|
||||
}
|
||||
@@ -163,11 +166,11 @@ EEVEE_GeometryMotionData *EEVEE_motion_blur_geometry_data_get(EEVEE_ObjectMotion
|
||||
|
||||
EEVEE_HairMotionData *EEVEE_motion_blur_hair_data_get(EEVEE_ObjectMotionData *mb_data, Object *ob)
|
||||
{
|
||||
if (mb_data->hair_data == NULL) {
|
||||
if (mb_data->hair_data == nullptr) {
|
||||
/* Ugly, we allocate for each modifiers and just fill based on modifier index in the list. */
|
||||
int psys_len = BLI_listbase_count(&ob->modifiers);
|
||||
EEVEE_HairMotionData *hair_step = MEM_callocN(
|
||||
sizeof(EEVEE_HairMotionData) + sizeof(hair_step->psys[0]) * psys_len, __func__);
|
||||
EEVEE_HairMotionData *hair_step = static_cast<EEVEE_HairMotionData *>(MEM_callocN(
|
||||
sizeof(EEVEE_HairMotionData) + sizeof(hair_step->psys[0]) * psys_len, __func__));
|
||||
hair_step->psys_len = psys_len;
|
||||
hair_step->type = EEVEE_MOTION_DATA_HAIR;
|
||||
mb_data->hair_data = hair_step;
|
||||
@@ -177,9 +180,9 @@ EEVEE_HairMotionData *EEVEE_motion_blur_hair_data_get(EEVEE_ObjectMotionData *mb
|
||||
|
||||
EEVEE_HairMotionData *EEVEE_motion_blur_curves_data_get(EEVEE_ObjectMotionData *mb_data)
|
||||
{
|
||||
if (mb_data->hair_data == NULL) {
|
||||
EEVEE_HairMotionData *hair_step = MEM_callocN(
|
||||
sizeof(EEVEE_HairMotionData) + sizeof(hair_step->psys[0]), __func__);
|
||||
if (mb_data->hair_data == nullptr) {
|
||||
EEVEE_HairMotionData *hair_step = static_cast<EEVEE_HairMotionData *>(
|
||||
MEM_callocN(sizeof(EEVEE_HairMotionData) + sizeof(hair_step->psys[0]), __func__));
|
||||
hair_step->psys_len = 1;
|
||||
hair_step->type = EEVEE_MOTION_DATA_HAIR;
|
||||
mb_data->hair_data = hair_step;
|
||||
@@ -207,7 +210,7 @@ void EEVEE_view_layer_data_free(void *storage)
|
||||
|
||||
if (sldata->fallback_lightcache) {
|
||||
EEVEE_lightcache_free(sldata->fallback_lightcache);
|
||||
sldata->fallback_lightcache = NULL;
|
||||
sldata->fallback_lightcache = nullptr;
|
||||
}
|
||||
|
||||
/* Probes */
|
||||
@@ -229,8 +232,8 @@ void EEVEE_view_layer_data_free(void *storage)
|
||||
}
|
||||
|
||||
if (sldata->material_cache) {
|
||||
BLI_memblock_destroy(sldata->material_cache, NULL);
|
||||
sldata->material_cache = NULL;
|
||||
BLI_memblock_destroy(sldata->material_cache, nullptr);
|
||||
sldata->material_cache = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -249,8 +252,9 @@ EEVEE_ViewLayerData *EEVEE_view_layer_data_ensure_ex(ViewLayer *view_layer)
|
||||
EEVEE_ViewLayerData **sldata = (EEVEE_ViewLayerData **)DRW_view_layer_engine_data_ensure_ex(
|
||||
view_layer, &draw_engine_eevee_type, &EEVEE_view_layer_data_free);
|
||||
|
||||
if (*sldata == NULL) {
|
||||
*sldata = MEM_callocN(sizeof(**sldata), "EEVEE_ViewLayerData");
|
||||
if (*sldata == nullptr) {
|
||||
*sldata = static_cast<EEVEE_ViewLayerData *>(
|
||||
MEM_callocN(sizeof(**sldata), "EEVEE_ViewLayerData"));
|
||||
eevee_view_layer_init(*sldata);
|
||||
}
|
||||
|
||||
@@ -262,8 +266,9 @@ EEVEE_ViewLayerData *EEVEE_view_layer_data_ensure(void)
|
||||
EEVEE_ViewLayerData **sldata = (EEVEE_ViewLayerData **)DRW_view_layer_engine_data_ensure(
|
||||
&draw_engine_eevee_type, &EEVEE_view_layer_data_free);
|
||||
|
||||
if (*sldata == NULL) {
|
||||
*sldata = MEM_callocN(sizeof(**sldata), "EEVEE_ViewLayerData");
|
||||
if (*sldata == nullptr) {
|
||||
*sldata = static_cast<EEVEE_ViewLayerData *>(
|
||||
MEM_callocN(sizeof(**sldata), "EEVEE_ViewLayerData"));
|
||||
eevee_view_layer_init(*sldata);
|
||||
}
|
||||
|
||||
@@ -283,7 +288,7 @@ static void eevee_object_data_init(DrawData *dd)
|
||||
EEVEE_ObjectEngineData *EEVEE_object_data_get(Object *ob)
|
||||
{
|
||||
if (ELEM(ob->type, OB_LIGHTPROBE, OB_LAMP)) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
return (EEVEE_ObjectEngineData *)DRW_drawdata_get(&ob->id, &draw_engine_eevee_type);
|
||||
}
|
||||
@@ -295,7 +300,7 @@ EEVEE_ObjectEngineData *EEVEE_object_data_ensure(Object *ob)
|
||||
&draw_engine_eevee_type,
|
||||
sizeof(EEVEE_ObjectEngineData),
|
||||
eevee_object_data_init,
|
||||
NULL);
|
||||
nullptr);
|
||||
}
|
||||
|
||||
/* Light probe data. */
|
||||
@@ -309,7 +314,7 @@ static void eevee_lightprobe_data_init(DrawData *dd)
|
||||
EEVEE_LightProbeEngineData *EEVEE_lightprobe_data_get(Object *ob)
|
||||
{
|
||||
if (ob->type != OB_LIGHTPROBE) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
return (EEVEE_LightProbeEngineData *)DRW_drawdata_get(&ob->id, &draw_engine_eevee_type);
|
||||
}
|
||||
@@ -321,7 +326,7 @@ EEVEE_LightProbeEngineData *EEVEE_lightprobe_data_ensure(Object *ob)
|
||||
&draw_engine_eevee_type,
|
||||
sizeof(EEVEE_LightProbeEngineData),
|
||||
eevee_lightprobe_data_init,
|
||||
NULL);
|
||||
nullptr);
|
||||
}
|
||||
|
||||
/* Light data. */
|
||||
@@ -335,7 +340,7 @@ static void eevee_light_data_init(DrawData *dd)
|
||||
EEVEE_LightEngineData *EEVEE_light_data_get(Object *ob)
|
||||
{
|
||||
if (ob->type != OB_LAMP) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
return (EEVEE_LightEngineData *)DRW_drawdata_get(&ob->id, &draw_engine_eevee_type);
|
||||
}
|
||||
@@ -347,7 +352,7 @@ EEVEE_LightEngineData *EEVEE_light_data_ensure(Object *ob)
|
||||
&draw_engine_eevee_type,
|
||||
sizeof(EEVEE_LightEngineData),
|
||||
eevee_light_data_init,
|
||||
NULL);
|
||||
nullptr);
|
||||
}
|
||||
|
||||
/* World data. */
|
||||
@@ -369,5 +374,5 @@ EEVEE_WorldEngineData *EEVEE_world_data_ensure(World *wo)
|
||||
&draw_engine_eevee_type,
|
||||
sizeof(EEVEE_WorldEngineData),
|
||||
eevee_world_data_init,
|
||||
NULL);
|
||||
nullptr);
|
||||
}
|
||||
@@ -153,7 +153,7 @@ int EEVEE_depth_of_field_sample_count_get(EEVEE_EffectsInfo *effects,
|
||||
int *r_ring_count)
|
||||
{
|
||||
if (effects->dof_jitter_radius == 0.0f) {
|
||||
if (r_ring_count != NULL) {
|
||||
if (r_ring_count != nullptr) {
|
||||
*r_ring_count = 0;
|
||||
}
|
||||
return 1;
|
||||
@@ -172,15 +172,13 @@ int EEVEE_depth_of_field_sample_count_get(EEVEE_EffectsInfo *effects,
|
||||
|
||||
sample_count = dof_jitter_total_sample_count(CAMERA_JITTER_RING_DENSITY, ring_count);
|
||||
|
||||
if (r_ring_count != NULL) {
|
||||
if (r_ring_count != nullptr) {
|
||||
*r_ring_count = ring_count;
|
||||
}
|
||||
return sample_count;
|
||||
}
|
||||
|
||||
int EEVEE_depth_of_field_init(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
EEVEE_Data *vedata,
|
||||
Object *camera)
|
||||
int EEVEE_depth_of_field_init(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data *vedata, Object *camera)
|
||||
{
|
||||
EEVEE_TextureList *txl = vedata->txl;
|
||||
EEVEE_StorageList *stl = vedata->stl;
|
||||
@@ -190,7 +188,8 @@ int EEVEE_depth_of_field_init(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
const DRWContextState *draw_ctx = DRW_context_state_get();
|
||||
const Scene *scene_eval = DEG_get_evaluated_scene(draw_ctx->depsgraph);
|
||||
|
||||
Camera *cam = (camera != NULL && camera->type == OB_CAMERA) ? camera->data : NULL;
|
||||
Camera *cam = static_cast<Camera *>(
|
||||
(camera != nullptr && camera->type == OB_CAMERA) ? camera->data : nullptr);
|
||||
|
||||
if (cam && (cam->dof.flag & CAM_DOF_ENABLED)) {
|
||||
RegionView3D *rv3d = draw_ctx->rv3d;
|
||||
@@ -225,7 +224,7 @@ int EEVEE_depth_of_field_init(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
float focal_len_scaled = scale_camera * focal_len;
|
||||
float sensor_scaled = scale_camera * sensor;
|
||||
|
||||
if (rv3d != NULL) {
|
||||
if (rv3d != nullptr) {
|
||||
sensor_scaled *= rv3d->viewcamtexcofac[0];
|
||||
}
|
||||
|
||||
@@ -348,9 +347,9 @@ static void dof_bokeh_pass_init(EEVEE_FramebufferList *fbl,
|
||||
if ((fx->dof_bokeh_aniso[0] == 1.0f) && (fx->dof_bokeh_aniso[1] == 1.0f) &&
|
||||
(fx->dof_bokeh_blades == 0.0))
|
||||
{
|
||||
fx->dof_bokeh_gather_lut_tx = NULL;
|
||||
fx->dof_bokeh_scatter_lut_tx = NULL;
|
||||
fx->dof_bokeh_resolve_lut_tx = NULL;
|
||||
fx->dof_bokeh_gather_lut_tx = nullptr;
|
||||
fx->dof_bokeh_scatter_lut_tx = nullptr;
|
||||
fx->dof_bokeh_resolve_lut_tx = nullptr;
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -364,15 +363,15 @@ static void dof_bokeh_pass_init(EEVEE_FramebufferList *fbl,
|
||||
DRW_shgroup_uniform_float_copy(grp, "bokehSides", fx->dof_bokeh_blades);
|
||||
DRW_shgroup_uniform_float_copy(grp, "bokehRotation", fx->dof_bokeh_rotation);
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "bokehAnisotropyInv", fx->dof_bokeh_aniso_inv);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT;
|
||||
fx->dof_bokeh_gather_lut_tx = DRW_texture_pool_query_2d_ex(
|
||||
UNPACK2(res), GPU_RG16F, usage, owner);
|
||||
UNPACK2(res), GPU_RG16F, usage, static_cast<DrawEngineType *>(owner));
|
||||
fx->dof_bokeh_scatter_lut_tx = DRW_texture_pool_query_2d_ex(
|
||||
UNPACK2(res), GPU_R16F, usage, owner);
|
||||
UNPACK2(res), GPU_R16F, usage, static_cast<DrawEngineType *>(owner));
|
||||
fx->dof_bokeh_resolve_lut_tx = DRW_texture_pool_query_2d_ex(
|
||||
UNPACK2(res), GPU_R16F, usage, owner);
|
||||
UNPACK2(res), GPU_R16F, usage, static_cast<DrawEngineType *>(owner));
|
||||
|
||||
GPU_framebuffer_ensure_config(&fbl->dof_bokeh_fb,
|
||||
{
|
||||
@@ -394,7 +393,7 @@ static void dof_setup_pass_init(EEVEE_FramebufferList *fbl,
|
||||
|
||||
void *owner = (void *)&EEVEE_depth_of_field_init;
|
||||
const float *fullres = DRW_viewport_size_get();
|
||||
int res[2] = {divide_ceil_u(fullres[0], 2), divide_ceil_u(fullres[1], 2)};
|
||||
int res[2] = {int(divide_ceil_u(fullres[0], 2)), int(divide_ceil_u(fullres[1], 2))};
|
||||
|
||||
DRW_PASS_CREATE(psl->dof_setup, DRW_STATE_WRITE_COLOR);
|
||||
|
||||
@@ -404,12 +403,13 @@ static void dof_setup_pass_init(EEVEE_FramebufferList *fbl,
|
||||
DRW_shgroup_uniform_texture_ref_ex(grp, "depthBuffer", &dtxl->depth, NO_FILTERING);
|
||||
DRW_shgroup_uniform_vec4_copy(grp, "cocParams", fx->dof_coc_params);
|
||||
DRW_shgroup_uniform_float_copy(grp, "bokehMaxSize", fx->dof_bokeh_max_size);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT;
|
||||
fx->dof_half_res_color_tx = DRW_texture_pool_query_2d_ex(
|
||||
UNPACK2(res), COLOR_FORMAT, usage, owner);
|
||||
fx->dof_half_res_coc_tx = DRW_texture_pool_query_2d_ex(UNPACK2(res), GPU_RG16F, usage, owner);
|
||||
UNPACK2(res), COLOR_FORMAT, usage, static_cast<DrawEngineType *>(owner));
|
||||
fx->dof_half_res_coc_tx = DRW_texture_pool_query_2d_ex(
|
||||
UNPACK2(res), GPU_RG16F, usage, static_cast<DrawEngineType *>(owner));
|
||||
|
||||
GPU_framebuffer_ensure_config(&fbl->dof_setup_fb,
|
||||
{
|
||||
@@ -428,8 +428,8 @@ static void dof_flatten_tiles_pass_init(EEVEE_FramebufferList *fbl,
|
||||
{
|
||||
void *owner = (void *)&EEVEE_depth_of_field_init;
|
||||
const float *fullres = DRW_viewport_size_get();
|
||||
int res[2] = {divide_ceil_u(fullres[0], DOF_TILE_DIVISOR),
|
||||
divide_ceil_u(fullres[1], DOF_TILE_DIVISOR)};
|
||||
int res[2] = {int(divide_ceil_u(fullres[0], DOF_TILE_DIVISOR)),
|
||||
int(divide_ceil_u(fullres[1], DOF_TILE_DIVISOR))};
|
||||
|
||||
DRW_PASS_CREATE(psl->dof_flatten_tiles, DRW_STATE_WRITE_COLOR);
|
||||
|
||||
@@ -437,13 +437,13 @@ static void dof_flatten_tiles_pass_init(EEVEE_FramebufferList *fbl,
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, psl->dof_flatten_tiles);
|
||||
DRW_shgroup_uniform_texture_ref_ex(
|
||||
grp, "halfResCocBuffer", &fx->dof_half_res_coc_tx, NO_FILTERING);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT;
|
||||
fx->dof_coc_tiles_fg_tx = DRW_texture_pool_query_2d_ex(
|
||||
UNPACK2(res), FG_TILE_FORMAT, usage, owner);
|
||||
UNPACK2(res), FG_TILE_FORMAT, usage, static_cast<DrawEngineType *>(owner));
|
||||
fx->dof_coc_tiles_bg_tx = DRW_texture_pool_query_2d_ex(
|
||||
UNPACK2(res), BG_TILE_FORMAT, usage, owner);
|
||||
UNPACK2(res), BG_TILE_FORMAT, usage, static_cast<DrawEngineType *>(owner));
|
||||
|
||||
GPU_framebuffer_ensure_config(&fbl->dof_flatten_tiles_fb,
|
||||
{
|
||||
@@ -464,8 +464,8 @@ static void dof_dilate_tiles_pass_init(EEVEE_FramebufferList *fbl,
|
||||
{
|
||||
void *owner = (void *)&EEVEE_depth_of_field_init;
|
||||
const float *fullres = DRW_viewport_size_get();
|
||||
int res[2] = {divide_ceil_u(fullres[0], DOF_TILE_DIVISOR),
|
||||
divide_ceil_u(fullres[1], DOF_TILE_DIVISOR)};
|
||||
int res[2] = {int(divide_ceil_u(fullres[0], DOF_TILE_DIVISOR)),
|
||||
int(divide_ceil_u(fullres[1], DOF_TILE_DIVISOR))};
|
||||
|
||||
DRW_PASS_CREATE(psl->dof_dilate_tiles_minmax, DRW_STATE_WRITE_COLOR);
|
||||
DRW_PASS_CREATE(psl->dof_dilate_tiles_minabs, DRW_STATE_WRITE_COLOR);
|
||||
@@ -479,13 +479,13 @@ static void dof_dilate_tiles_pass_init(EEVEE_FramebufferList *fbl,
|
||||
DRW_shgroup_uniform_bool(grp, "dilateSlightFocus", &fx->dof_dilate_slight_focus, 1);
|
||||
DRW_shgroup_uniform_int(grp, "ringCount", &fx->dof_dilate_ring_count, 1);
|
||||
DRW_shgroup_uniform_int(grp, "ringWidthMultiplier", &fx->dof_dilate_ring_width_multiplier, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT;
|
||||
fx->dof_coc_dilated_tiles_fg_tx = DRW_texture_pool_query_2d_ex(
|
||||
UNPACK2(res), FG_TILE_FORMAT, usage, owner);
|
||||
UNPACK2(res), FG_TILE_FORMAT, usage, static_cast<DrawEngineType *>(owner));
|
||||
fx->dof_coc_dilated_tiles_bg_tx = DRW_texture_pool_query_2d_ex(
|
||||
UNPACK2(res), BG_TILE_FORMAT, usage, owner);
|
||||
UNPACK2(res), BG_TILE_FORMAT, usage, static_cast<DrawEngineType *>(owner));
|
||||
|
||||
GPU_framebuffer_ensure_config(&fbl->dof_dilate_tiles_fb,
|
||||
{
|
||||
@@ -558,10 +558,10 @@ static void dof_reduce_pass_init(EEVEE_FramebufferList *fbl,
|
||||
/* This ensure the mipmaps are aligned for the needed 4 mip levels.
|
||||
* Starts at 2 because already at half resolution. */
|
||||
int multiple = 2 << (mip_count - 1);
|
||||
int res[2] = {(multiple * divide_ceil_u(fullres[0], multiple)) / 2,
|
||||
(multiple * divide_ceil_u(fullres[1], multiple)) / 2};
|
||||
int res[2] = {(multiple * int(divide_ceil_u(fullres[0], multiple))) / 2,
|
||||
(multiple * int(divide_ceil_u(fullres[1], multiple))) / 2};
|
||||
|
||||
int quater_res[2] = {divide_ceil_u(fullres[0], 4), divide_ceil_u(fullres[1], 4)};
|
||||
int quater_res[2] = {int(divide_ceil_u(fullres[0], 4)), int(divide_ceil_u(fullres[1], 4))};
|
||||
|
||||
/* TODO(fclem): Make this dependent of the quality of the gather pass. */
|
||||
fx->dof_scatter_coc_threshold = 4.0f;
|
||||
@@ -575,13 +575,13 @@ static void dof_reduce_pass_init(EEVEE_FramebufferList *fbl,
|
||||
grp, "colorBuffer", &fx->dof_reduce_input_color_tx, NO_FILTERING);
|
||||
DRW_shgroup_uniform_texture_ref_ex(
|
||||
grp, "cocBuffer", &fx->dof_reduce_input_coc_tx, NO_FILTERING);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
|
||||
void *owner = (void *)&EEVEE_depth_of_field_init;
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT |
|
||||
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW;
|
||||
fx->dof_downsample_tx = DRW_texture_pool_query_2d_ex(
|
||||
UNPACK2(quater_res), COLOR_FORMAT, usage, owner);
|
||||
UNPACK2(quater_res), COLOR_FORMAT, usage, static_cast<DrawEngineType *>(owner));
|
||||
|
||||
GPU_framebuffer_ensure_config(&fbl->dof_downsample_fb,
|
||||
{
|
||||
@@ -608,12 +608,12 @@ static void dof_reduce_pass_init(EEVEE_FramebufferList *fbl,
|
||||
DRW_shgroup_uniform_float_copy(grp, "scatterCocThreshold", fx->dof_scatter_coc_threshold);
|
||||
DRW_shgroup_uniform_float_copy(grp, "colorNeighborClamping", fx->dof_denoise_factor);
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "bokehAnisotropy", fx->dof_bokeh_aniso);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
|
||||
void *owner = (void *)&EEVEE_depth_of_field_init;
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT;
|
||||
fx->dof_scatter_src_tx = DRW_texture_pool_query_2d_ex(
|
||||
UNPACK2(res), GPU_R11F_G11F_B10F, usage, owner);
|
||||
UNPACK2(res), GPU_R11F_G11F_B10F, usage, static_cast<DrawEngineType *>(owner));
|
||||
}
|
||||
|
||||
{
|
||||
@@ -626,7 +626,7 @@ static void dof_reduce_pass_init(EEVEE_FramebufferList *fbl,
|
||||
grp, "colorBuffer", &fx->dof_reduce_input_color_tx, NO_FILTERING);
|
||||
DRW_shgroup_uniform_texture_ref_ex(
|
||||
grp, "cocBuffer", &fx->dof_reduce_input_coc_tx, NO_FILTERING);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
|
||||
if (txl->dof_reduced_color) {
|
||||
@@ -640,15 +640,15 @@ static void dof_reduce_pass_init(EEVEE_FramebufferList *fbl,
|
||||
}
|
||||
}
|
||||
|
||||
if (txl->dof_reduced_color == NULL) {
|
||||
if (txl->dof_reduced_color == nullptr) {
|
||||
/* Color needs to be signed format here. See note in shader for explanation. */
|
||||
/* Do not use texture pool because of needs mipmaps. */
|
||||
eGPUTextureUsage tex_flags = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT |
|
||||
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW;
|
||||
txl->dof_reduced_color = GPU_texture_create_2d(
|
||||
"dof_reduced_color", UNPACK2(res), mip_count, GPU_RGBA16F, tex_flags, NULL);
|
||||
"dof_reduced_color", UNPACK2(res), mip_count, GPU_RGBA16F, tex_flags, nullptr);
|
||||
txl->dof_reduced_coc = GPU_texture_create_2d(
|
||||
"dof_reduced_coc", UNPACK2(res), mip_count, GPU_R16F, tex_flags, NULL);
|
||||
"dof_reduced_coc", UNPACK2(res), mip_count, GPU_R16F, tex_flags, nullptr);
|
||||
}
|
||||
|
||||
GPU_framebuffer_ensure_config(&fbl->dof_reduce_fb,
|
||||
@@ -678,12 +678,12 @@ static void dof_gather_pass_init(EEVEE_FramebufferList *fbl,
|
||||
{
|
||||
void *owner = (void *)&EEVEE_depth_of_field_init;
|
||||
const float *fullres = DRW_viewport_size_get();
|
||||
int res[2] = {divide_ceil_u(fullres[0], 2), divide_ceil_u(fullres[1], 2)};
|
||||
int res[2] = {int(divide_ceil_u(fullres[0], 2)), int(divide_ceil_u(fullres[1], 2))};
|
||||
int input_size[2];
|
||||
GPU_texture_get_mipmap_size(txl->dof_reduced_color, 0, input_size);
|
||||
float uv_correction_fac[2] = {res[0] / (float)input_size[0], res[1] / (float)input_size[1]};
|
||||
float output_texel_size[2] = {1.0f / res[0], 1.0f / res[1]};
|
||||
const bool use_bokeh_tx = (fx->dof_bokeh_gather_lut_tx != NULL);
|
||||
const bool use_bokeh_tx = (fx->dof_bokeh_gather_lut_tx != nullptr);
|
||||
|
||||
{
|
||||
DRW_PASS_CREATE(psl->dof_gather_fg_holefill, DRW_STATE_WRITE_COLOR);
|
||||
@@ -699,7 +699,7 @@ static void dof_gather_pass_init(EEVEE_FramebufferList *fbl,
|
||||
DRW_shgroup_uniform_texture(grp, "utilTex", EEVEE_materials_get_util_tex());
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "gatherInputUvCorrection", uv_correction_fac);
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "gatherOutputTexelSize", output_texel_size);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
|
||||
/* Reuse textures from the setup pass. */
|
||||
/* NOTE: We could use the texture pool do that for us but it does not track usage and it might
|
||||
@@ -707,7 +707,7 @@ static void dof_gather_pass_init(EEVEE_FramebufferList *fbl,
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT;
|
||||
fx->dof_fg_holefill_color_tx = fx->dof_half_res_color_tx;
|
||||
fx->dof_fg_holefill_weight_tx = DRW_texture_pool_query_2d_ex(
|
||||
UNPACK2(res), GPU_R16F, usage, owner);
|
||||
UNPACK2(res), GPU_R16F, usage, static_cast<DrawEngineType *>(owner));
|
||||
|
||||
GPU_framebuffer_ensure_config(&fbl->dof_gather_fg_holefill_fb,
|
||||
{
|
||||
@@ -738,10 +738,12 @@ static void dof_gather_pass_init(EEVEE_FramebufferList *fbl,
|
||||
/* Restore. */
|
||||
negate_v2(fx->dof_bokeh_aniso);
|
||||
}
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT;
|
||||
fx->dof_fg_color_tx = DRW_texture_pool_query_2d_ex(UNPACK2(res), COLOR_FORMAT, usage, owner);
|
||||
fx->dof_fg_weight_tx = DRW_texture_pool_query_2d_ex(UNPACK2(res), GPU_R16F, usage, owner);
|
||||
fx->dof_fg_color_tx = DRW_texture_pool_query_2d_ex(
|
||||
UNPACK2(res), COLOR_FORMAT, usage, static_cast<DrawEngineType *>(owner));
|
||||
fx->dof_fg_weight_tx = DRW_texture_pool_query_2d_ex(
|
||||
UNPACK2(res), GPU_R16F, usage, static_cast<DrawEngineType *>(owner));
|
||||
/* Reuse textures from the setup pass. */
|
||||
/* NOTE: We could use the texture pool do that for us but it does not track usage and it might
|
||||
* backfire (it does in practice). */
|
||||
@@ -775,11 +777,13 @@ static void dof_gather_pass_init(EEVEE_FramebufferList *fbl,
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "bokehAnisotropy", fx->dof_bokeh_aniso);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "bokehLut", &fx->dof_bokeh_gather_lut_tx);
|
||||
}
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT;
|
||||
fx->dof_bg_color_tx = DRW_texture_pool_query_2d_ex(UNPACK2(res), COLOR_FORMAT, usage, owner);
|
||||
fx->dof_bg_weight_tx = DRW_texture_pool_query_2d_ex(UNPACK2(res), GPU_R16F, usage, owner);
|
||||
fx->dof_bg_color_tx = DRW_texture_pool_query_2d_ex(
|
||||
UNPACK2(res), COLOR_FORMAT, usage, static_cast<DrawEngineType *>(owner));
|
||||
fx->dof_bg_weight_tx = DRW_texture_pool_query_2d_ex(
|
||||
UNPACK2(res), GPU_R16F, usage, static_cast<DrawEngineType *>(owner));
|
||||
/* Reuse, since only used for scatter. Foreground is processed before background. */
|
||||
fx->dof_bg_occlusion_tx = fx->dof_fg_occlusion_tx;
|
||||
|
||||
@@ -812,7 +816,7 @@ static void dof_filter_pass_init(EEVEE_FramebufferList *fbl,
|
||||
grp, "colorBuffer", &fx->dof_fg_holefill_color_tx, NO_FILTERING);
|
||||
DRW_shgroup_uniform_texture_ref_ex(
|
||||
grp, "weightBuffer", &fx->dof_fg_holefill_weight_tx, NO_FILTERING);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
|
||||
GPU_framebuffer_ensure_config(&fbl->dof_filter_fg_fb,
|
||||
{
|
||||
@@ -844,7 +848,7 @@ static void dof_scatter_pass_init(EEVEE_FramebufferList *fbl,
|
||||
/* Draw a sprite for every four half-res pixels. */
|
||||
int sprite_count = (input_size[0] / 2) * (input_size[1] / 2);
|
||||
float target_texel_size[2] = {1.0f / target_size[0], 1.0f / target_size[1]};
|
||||
const bool use_bokeh_tx = (fx->dof_bokeh_gather_lut_tx != NULL);
|
||||
const bool use_bokeh_tx = (fx->dof_bokeh_gather_lut_tx != nullptr);
|
||||
|
||||
{
|
||||
DRW_PASS_CREATE(psl->dof_scatter_fg, DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_ADD_FULL);
|
||||
@@ -866,7 +870,7 @@ static void dof_scatter_pass_init(EEVEE_FramebufferList *fbl,
|
||||
/* Restore. */
|
||||
negate_v2(fx->dof_bokeh_aniso_inv);
|
||||
}
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, sprite_count);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, sprite_count);
|
||||
|
||||
GPU_framebuffer_ensure_config(&fbl->dof_scatter_fg_fb,
|
||||
{
|
||||
@@ -890,7 +894,7 @@ static void dof_scatter_pass_init(EEVEE_FramebufferList *fbl,
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "bokehAnisotropyInv", fx->dof_bokeh_aniso_inv);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "bokehLut", &fx->dof_bokeh_scatter_lut_tx);
|
||||
}
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, sprite_count);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, sprite_count);
|
||||
|
||||
GPU_framebuffer_ensure_config(&fbl->dof_scatter_bg_fb,
|
||||
{
|
||||
@@ -904,12 +908,12 @@ static void dof_scatter_pass_init(EEVEE_FramebufferList *fbl,
|
||||
* Recombine the result of the foreground and background processing. Also perform a slight out of
|
||||
* focus blur to improve geometric continuity.
|
||||
*/
|
||||
static void dof_recombine_pass_init(EEVEE_FramebufferList *UNUSED(fbl),
|
||||
static void dof_recombine_pass_init(EEVEE_FramebufferList * /*fbl*/,
|
||||
EEVEE_PassList *psl,
|
||||
EEVEE_EffectsInfo *fx)
|
||||
{
|
||||
DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
|
||||
const bool use_bokeh_tx = (fx->dof_bokeh_gather_lut_tx != NULL);
|
||||
const bool use_bokeh_tx = (fx->dof_bokeh_gather_lut_tx != nullptr);
|
||||
|
||||
DRW_PASS_CREATE(psl->dof_resolve, DRW_STATE_WRITE_COLOR);
|
||||
|
||||
@@ -932,10 +936,10 @@ static void dof_recombine_pass_init(EEVEE_FramebufferList *UNUSED(fbl),
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "bokehAnisotropyInv", fx->dof_bokeh_aniso_inv);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "bokehLut", &fx->dof_bokeh_resolve_lut_tx);
|
||||
}
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
|
||||
void EEVEE_depth_of_field_cache_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
|
||||
void EEVEE_depth_of_field_cache_init(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data *vedata)
|
||||
{
|
||||
EEVEE_TextureList *txl = vedata->txl;
|
||||
EEVEE_FramebufferList *fbl = vedata->fbl;
|
||||
@@ -961,7 +965,7 @@ void EEVEE_depth_of_field_cache_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_
|
||||
}
|
||||
}
|
||||
|
||||
static void dof_recursive_reduce(void *vedata, int UNUSED(level))
|
||||
static void dof_recursive_reduce(void *vedata, int /*level*/)
|
||||
{
|
||||
EEVEE_PassList *psl = ((EEVEE_Data *)vedata)->psl;
|
||||
EEVEE_TextureList *txl = ((EEVEE_Data *)vedata)->txl;
|
||||
@@ -986,7 +990,7 @@ void EEVEE_depth_of_field_draw(EEVEE_Data *vedata)
|
||||
if ((effects->enabled_effects & EFFECT_DOF) != 0) {
|
||||
DRW_stats_group_start("Depth of Field");
|
||||
|
||||
if (fx->dof_bokeh_gather_lut_tx != NULL) {
|
||||
if (fx->dof_bokeh_gather_lut_tx != nullptr) {
|
||||
GPU_framebuffer_bind(fbl->dof_bokeh_fb);
|
||||
DRW_draw_pass(psl->dof_bokeh);
|
||||
}
|
||||
@@ -26,7 +26,7 @@ static struct {
|
||||
int depth_src_layer;
|
||||
/* Size can be vec3. But we only use 2 components in the shader. */
|
||||
float texel_size[2];
|
||||
} e_data = {NULL}; /* Engine data */
|
||||
} e_data = {nullptr}; /* Engine data */
|
||||
|
||||
#define SETUP_BUFFER(tex, fb, fb_color) \
|
||||
{ \
|
||||
@@ -70,7 +70,8 @@ void EEVEE_effects_init(EEVEE_ViewLayerData *sldata,
|
||||
const int size_fs[2] = {(int)viewport_size[0], (int)viewport_size[1]};
|
||||
|
||||
if (!stl->effects) {
|
||||
stl->effects = MEM_callocN(sizeof(EEVEE_EffectsInfo), "EEVEE_EffectsInfo");
|
||||
stl->effects = static_cast<EEVEE_EffectsInfo *>(
|
||||
MEM_callocN(sizeof(EEVEE_EffectsInfo), "EEVEE_EffectsInfo"));
|
||||
stl->effects->taa_render_sample = 1;
|
||||
}
|
||||
|
||||
@@ -84,14 +85,14 @@ void EEVEE_effects_init(EEVEE_ViewLayerData *sldata,
|
||||
effects->hiz_size[0] = divide_ceil_u(size_fs[0], div) * div;
|
||||
effects->hiz_size[1] = divide_ceil_u(size_fs[1], div) * div;
|
||||
|
||||
effects->enabled_effects = 0;
|
||||
effects->enabled_effects |= (G.debug_value == 9) ? EFFECT_VELOCITY_BUFFER : 0;
|
||||
effects->enabled_effects |= EEVEE_motion_blur_init(sldata, vedata);
|
||||
effects->enabled_effects |= EEVEE_bloom_init(sldata, vedata);
|
||||
effects->enabled_effects |= EEVEE_depth_of_field_init(sldata, vedata, camera);
|
||||
effects->enabled_effects |= EEVEE_temporal_sampling_init(sldata, vedata);
|
||||
effects->enabled_effects |= EEVEE_occlusion_init(sldata, vedata);
|
||||
effects->enabled_effects |= EEVEE_screen_raytrace_init(sldata, vedata);
|
||||
effects->enabled_effects = EEVEE_EffectsFlag(0);
|
||||
effects->enabled_effects |= (G.debug_value == 9) ? EFFECT_VELOCITY_BUFFER : EEVEE_EffectsFlag(0);
|
||||
effects->enabled_effects |= EEVEE_EffectsFlag(EEVEE_motion_blur_init(sldata, vedata));
|
||||
effects->enabled_effects |= EEVEE_EffectsFlag(EEVEE_bloom_init(sldata, vedata));
|
||||
effects->enabled_effects |= EEVEE_EffectsFlag(EEVEE_depth_of_field_init(sldata, vedata, camera));
|
||||
effects->enabled_effects |= EEVEE_EffectsFlag(EEVEE_temporal_sampling_init(sldata, vedata));
|
||||
effects->enabled_effects |= EEVEE_EffectsFlag(EEVEE_occlusion_init(sldata, vedata));
|
||||
effects->enabled_effects |= EEVEE_EffectsFlag(EEVEE_screen_raytrace_init(sldata, vedata));
|
||||
|
||||
/* Update matrices here because EEVEE_screen_raytrace_init can have reset the
|
||||
* taa_current_sample. (See #66811) */
|
||||
@@ -133,7 +134,7 @@ void EEVEE_effects_init(EEVEE_ViewLayerData *sldata,
|
||||
});
|
||||
}
|
||||
|
||||
if (fbl->downsample_fb == NULL) {
|
||||
if (fbl->downsample_fb == nullptr) {
|
||||
fbl->downsample_fb = GPU_framebuffer_create("downsample_fb");
|
||||
}
|
||||
|
||||
@@ -158,7 +159,7 @@ void EEVEE_effects_init(EEVEE_ViewLayerData *sldata,
|
||||
UNPACK2(effects->hiz_size),
|
||||
GPU_R11F_G11F_B10F,
|
||||
usage,
|
||||
DRW_TEX_FILTER | DRW_TEX_MIPMAP);
|
||||
DRWTextureFlag(DRW_TEX_FILTER | DRW_TEX_MIPMAP));
|
||||
|
||||
GPU_framebuffer_ensure_config(&fbl->radiance_filtered_fb,
|
||||
{
|
||||
@@ -182,7 +183,7 @@ void EEVEE_effects_init(EEVEE_ViewLayerData *sldata,
|
||||
GPU_framebuffer_texture_attach(fbl->main_fb, effects->ssr_normal_input, 1, 0);
|
||||
}
|
||||
else {
|
||||
effects->ssr_normal_input = NULL;
|
||||
effects->ssr_normal_input = nullptr;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -204,14 +205,15 @@ void EEVEE_effects_init(EEVEE_ViewLayerData *sldata,
|
||||
{GPU_ATTACHMENT_NONE, GPU_ATTACHMENT_TEXTURE(effects->velocity_tx)});
|
||||
}
|
||||
else {
|
||||
effects->velocity_tx = NULL;
|
||||
effects->velocity_tx = nullptr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup depth double buffer.
|
||||
*/
|
||||
if ((effects->enabled_effects & EFFECT_DEPTH_DOUBLE_BUFFER) != 0) {
|
||||
DRW_texture_ensure_fullscreen_2d(&txl->depth_double_buffer, GPU_DEPTH24_STENCIL8, 0);
|
||||
DRW_texture_ensure_fullscreen_2d(
|
||||
&txl->depth_double_buffer, GPU_DEPTH24_STENCIL8, DRWTextureFlag(0));
|
||||
|
||||
GPU_framebuffer_ensure_config(&fbl->double_buffer_depth_fb,
|
||||
{GPU_ATTACHMENT_TEXTURE(txl->depth_double_buffer)});
|
||||
@@ -250,16 +252,17 @@ void EEVEE_effects_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
if (effects->enabled_effects & EFFECT_RADIANCE_BUFFER) {
|
||||
DRW_PASS_CREATE(psl->color_copy_ps, DRW_STATE_WRITE_COLOR);
|
||||
grp = DRW_shgroup_create(EEVEE_shaders_effect_color_copy_sh_get(), psl->color_copy_ps);
|
||||
DRW_shgroup_uniform_texture_ref_ex(grp, "source", &e_data.color_src, GPU_SAMPLER_DEFAULT);
|
||||
DRW_shgroup_uniform_texture_ref_ex(
|
||||
grp, "source", &e_data.color_src, GPUSamplerState::default_sampler());
|
||||
DRW_shgroup_uniform_float(grp, "fireflyFactor", &sldata->common_data.ssr_firefly_fac, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
|
||||
DRW_PASS_CREATE(psl->color_downsample_ps, DRW_STATE_WRITE_COLOR);
|
||||
grp = DRW_shgroup_create(EEVEE_shaders_effect_downsample_sh_get(), psl->color_downsample_ps);
|
||||
const GPUSamplerState sampler_state = {GPU_SAMPLER_FILTERING_LINEAR};
|
||||
DRW_shgroup_uniform_texture_ex(grp, "source", txl->filtered_radiance, sampler_state);
|
||||
DRW_shgroup_uniform_vec2(grp, "texelSize", e_data.texel_size, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
|
||||
{
|
||||
@@ -269,29 +272,32 @@ void EEVEE_effects_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
DRW_shgroup_uniform_texture_ref(grp, "source", &e_data.color_src);
|
||||
DRW_shgroup_uniform_float(grp, "texelSize", e_data.texel_size, 1);
|
||||
DRW_shgroup_uniform_int_copy(grp, "Layer", 0);
|
||||
DRW_shgroup_call_instances(grp, NULL, quad, 6);
|
||||
DRW_shgroup_call_instances(grp, nullptr, quad, 6);
|
||||
}
|
||||
|
||||
{
|
||||
/* Perform min/max down-sample. */
|
||||
DRW_PASS_CREATE(psl->maxz_downlevel_ps, downsample_write);
|
||||
grp = DRW_shgroup_create(EEVEE_shaders_effect_maxz_downlevel_sh_get(), psl->maxz_downlevel_ps);
|
||||
DRW_shgroup_uniform_texture_ref_ex(grp, "depthBuffer", &txl->maxzbuffer, GPU_SAMPLER_DEFAULT);
|
||||
DRW_shgroup_uniform_texture_ref_ex(
|
||||
grp, "depthBuffer", &txl->maxzbuffer, GPUSamplerState::default_sampler());
|
||||
DRW_shgroup_uniform_vec2(grp, "texelSize", e_data.texel_size, 1);
|
||||
DRW_shgroup_call(grp, quad, NULL);
|
||||
DRW_shgroup_call(grp, quad, nullptr);
|
||||
|
||||
/* Copy depth buffer to top level of HiZ */
|
||||
DRW_PASS_CREATE(psl->maxz_copydepth_ps, downsample_write);
|
||||
grp = DRW_shgroup_create(EEVEE_shaders_effect_maxz_copydepth_sh_get(), psl->maxz_copydepth_ps);
|
||||
DRW_shgroup_uniform_texture_ref_ex(grp, "depthBuffer", &e_data.depth_src, GPU_SAMPLER_DEFAULT);
|
||||
DRW_shgroup_call(grp, quad, NULL);
|
||||
DRW_shgroup_uniform_texture_ref_ex(
|
||||
grp, "depthBuffer", &e_data.depth_src, GPUSamplerState::default_sampler());
|
||||
DRW_shgroup_call(grp, quad, nullptr);
|
||||
|
||||
DRW_PASS_CREATE(psl->maxz_copydepth_layer_ps, downsample_write);
|
||||
grp = DRW_shgroup_create(EEVEE_shaders_effect_maxz_copydepth_layer_sh_get(),
|
||||
psl->maxz_copydepth_layer_ps);
|
||||
DRW_shgroup_uniform_texture_ref_ex(grp, "depthBuffer", &e_data.depth_src, GPU_SAMPLER_DEFAULT);
|
||||
DRW_shgroup_uniform_texture_ref_ex(
|
||||
grp, "depthBuffer", &e_data.depth_src, GPUSamplerState::default_sampler());
|
||||
DRW_shgroup_uniform_int(grp, "depthLayer", &e_data.depth_src_layer, 1);
|
||||
DRW_shgroup_call(grp, quad, NULL);
|
||||
DRW_shgroup_call(grp, quad, nullptr);
|
||||
}
|
||||
|
||||
if ((effects->enabled_effects & EFFECT_VELOCITY_BUFFER) != 0) {
|
||||
@@ -307,11 +313,11 @@ void EEVEE_effects_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
DRW_shgroup_uniform_mat4(grp, "prevViewProjMatrix", mb_data->camera[MB_PREV].persmat);
|
||||
DRW_shgroup_uniform_mat4(grp, "currViewProjMatrixInv", mb_data->camera[MB_CURR].persinv);
|
||||
DRW_shgroup_uniform_mat4(grp, "nextViewProjMatrix", mb_data->camera[MB_NEXT].persmat);
|
||||
DRW_shgroup_call(grp, quad, NULL);
|
||||
DRW_shgroup_call(grp, quad, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
void EEVEE_effects_draw_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
|
||||
void EEVEE_effects_draw_init(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data *vedata)
|
||||
{
|
||||
EEVEE_FramebufferList *fbl = vedata->fbl;
|
||||
EEVEE_TextureList *txl = vedata->txl;
|
||||
@@ -339,7 +345,7 @@ void EEVEE_effects_draw_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *ve
|
||||
}
|
||||
|
||||
#if 0 /* Not required for now */
|
||||
static void min_downsample_cb(void *vedata, int UNUSED(level))
|
||||
static void min_downsample_cb(void *vedata, int /*level*/)
|
||||
{
|
||||
EEVEE_PassList *psl = ((EEVEE_Data *)vedata)->psl;
|
||||
DRW_draw_pass(psl->minz_downlevel_ps);
|
||||
@@ -517,7 +523,7 @@ void EEVEE_draw_effects(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
|
||||
/* Update double buffer status if render mode. */
|
||||
if (DRW_state_is_image_render()) {
|
||||
stl->g_data->valid_double_buffer = (txl->color_double_buffer != NULL);
|
||||
stl->g_data->valid_taa_history = (txl->taa_history != NULL);
|
||||
stl->g_data->valid_double_buffer = (txl->color_double_buffer != nullptr);
|
||||
stl->g_data->valid_taa_history = (txl->taa_history != nullptr);
|
||||
}
|
||||
}
|
||||
@@ -44,17 +44,17 @@ static void eevee_engine_init(void *ved)
|
||||
const DRWContextState *draw_ctx = DRW_context_state_get();
|
||||
View3D *v3d = draw_ctx->v3d;
|
||||
RegionView3D *rv3d = draw_ctx->rv3d;
|
||||
Object *camera = (rv3d->persp == RV3D_CAMOB) ? v3d->camera : NULL;
|
||||
Object *camera = (rv3d->persp == RV3D_CAMOB) ? v3d->camera : nullptr;
|
||||
|
||||
if (!stl->g_data) {
|
||||
/* Alloc transient pointers */
|
||||
stl->g_data = MEM_callocN(sizeof(*stl->g_data), __func__);
|
||||
stl->g_data = static_cast<EEVEE_PrivateData *>(MEM_callocN(sizeof(*stl->g_data), __func__));
|
||||
}
|
||||
stl->g_data->use_color_render_settings = USE_SCENE_LIGHT(v3d) ||
|
||||
!LOOK_DEV_STUDIO_LIGHT_ENABLED(v3d);
|
||||
stl->g_data->background_alpha = DRW_state_draw_background() ? 1.0f : 0.0f;
|
||||
stl->g_data->valid_double_buffer = (txl->color_double_buffer != NULL);
|
||||
stl->g_data->valid_taa_history = (txl->taa_history != NULL);
|
||||
stl->g_data->valid_double_buffer = (txl->color_double_buffer != nullptr);
|
||||
stl->g_data->valid_taa_history = (txl->taa_history != nullptr);
|
||||
stl->g_data->queued_shaders_count = 0;
|
||||
stl->g_data->queued_optimise_shaders_count = 0;
|
||||
stl->g_data->render_timesteps = 1;
|
||||
@@ -88,18 +88,18 @@ static void eevee_cache_init(void *vedata)
|
||||
{
|
||||
EEVEE_ViewLayerData *sldata = EEVEE_view_layer_data_ensure();
|
||||
|
||||
EEVEE_bloom_cache_init(sldata, vedata);
|
||||
EEVEE_depth_of_field_cache_init(sldata, vedata);
|
||||
EEVEE_effects_cache_init(sldata, vedata);
|
||||
EEVEE_lightprobes_cache_init(sldata, vedata);
|
||||
EEVEE_lights_cache_init(sldata, vedata);
|
||||
EEVEE_materials_cache_init(sldata, vedata);
|
||||
EEVEE_motion_blur_cache_init(sldata, vedata);
|
||||
EEVEE_occlusion_cache_init(sldata, vedata);
|
||||
EEVEE_screen_raytrace_cache_init(sldata, vedata);
|
||||
EEVEE_subsurface_cache_init(sldata, vedata);
|
||||
EEVEE_temporal_sampling_cache_init(sldata, vedata);
|
||||
EEVEE_volumes_cache_init(sldata, vedata);
|
||||
EEVEE_bloom_cache_init(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_depth_of_field_cache_init(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_effects_cache_init(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_lightprobes_cache_init(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_lights_cache_init(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_materials_cache_init(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_motion_blur_cache_init(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_occlusion_cache_init(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_screen_raytrace_cache_init(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_subsurface_cache_init(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_temporal_sampling_cache_init(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_volumes_cache_init(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
}
|
||||
|
||||
void EEVEE_cache_populate(void *vedata, Object *ob)
|
||||
@@ -111,18 +111,21 @@ void EEVEE_cache_populate(void *vedata, Object *ob)
|
||||
bool cast_shadow = false;
|
||||
|
||||
if (ob_visibility & OB_VISIBLE_PARTICLES) {
|
||||
EEVEE_particle_hair_cache_populate(vedata, sldata, ob, &cast_shadow);
|
||||
EEVEE_particle_hair_cache_populate(
|
||||
static_cast<EEVEE_Data *>(vedata), sldata, ob, &cast_shadow);
|
||||
}
|
||||
|
||||
if (DRW_object_is_renderable(ob) && (ob_visibility & OB_VISIBLE_SELF)) {
|
||||
if (ob->type == OB_MESH) {
|
||||
EEVEE_materials_cache_populate(vedata, sldata, ob, &cast_shadow);
|
||||
EEVEE_materials_cache_populate(static_cast<EEVEE_Data *>(vedata), sldata, ob, &cast_shadow);
|
||||
}
|
||||
else if (ob->type == OB_CURVES) {
|
||||
EEVEE_object_curves_cache_populate(vedata, sldata, ob, &cast_shadow);
|
||||
EEVEE_object_curves_cache_populate(
|
||||
static_cast<EEVEE_Data *>(vedata), sldata, ob, &cast_shadow);
|
||||
}
|
||||
else if (ob->type == OB_VOLUME) {
|
||||
EEVEE_volumes_cache_object_add(sldata, vedata, draw_ctx->scene, ob);
|
||||
EEVEE_volumes_cache_object_add(
|
||||
sldata, static_cast<EEVEE_Data *>(vedata), draw_ctx->scene, ob);
|
||||
}
|
||||
else if (!USE_SCENE_LIGHT(draw_ctx->v3d)) {
|
||||
/* do not add any scene light sources to the cache */
|
||||
@@ -132,7 +135,7 @@ void EEVEE_cache_populate(void *vedata, Object *ob)
|
||||
/* TODO: Special case for dupli objects because we cannot save the object pointer. */
|
||||
}
|
||||
else {
|
||||
EEVEE_lightprobes_cache_add(sldata, vedata, ob);
|
||||
EEVEE_lightprobes_cache_add(sldata, static_cast<EEVEE_Data *>(vedata), ob);
|
||||
}
|
||||
}
|
||||
else if (ob->type == OB_LAMP) {
|
||||
@@ -154,15 +157,15 @@ static void eevee_cache_finish(void *vedata)
|
||||
const DRWContextState *draw_ctx = DRW_context_state_get();
|
||||
const Scene *scene_eval = DEG_get_evaluated_scene(draw_ctx->depsgraph);
|
||||
|
||||
EEVEE_volumes_cache_finish(sldata, vedata);
|
||||
EEVEE_materials_cache_finish(sldata, vedata);
|
||||
EEVEE_lights_cache_finish(sldata, vedata);
|
||||
EEVEE_lightprobes_cache_finish(sldata, vedata);
|
||||
EEVEE_renderpasses_cache_finish(sldata, vedata);
|
||||
EEVEE_volumes_cache_finish(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_materials_cache_finish(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_lights_cache_finish(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_lightprobes_cache_finish(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_renderpasses_cache_finish(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
|
||||
EEVEE_subsurface_draw_init(sldata, vedata);
|
||||
EEVEE_effects_draw_init(sldata, vedata);
|
||||
EEVEE_volumes_draw_init(sldata, vedata);
|
||||
EEVEE_subsurface_draw_init(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_effects_draw_init(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_volumes_draw_init(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
|
||||
uint tot_samples = scene_eval->eevee.taa_render_samples;
|
||||
if (tot_samples == 0) {
|
||||
@@ -170,13 +173,13 @@ static void eevee_cache_finish(void *vedata)
|
||||
* will have the highest possible precision. */
|
||||
tot_samples = 1024;
|
||||
}
|
||||
EEVEE_renderpasses_output_init(sldata, vedata, tot_samples);
|
||||
EEVEE_renderpasses_output_init(sldata, static_cast<EEVEE_Data *>(vedata), tot_samples);
|
||||
|
||||
/* Restart TAA if a shader has finish compiling. */
|
||||
/* HACK: We should use notification of some sort from the compilation job instead. */
|
||||
if (g_data->queued_shaders_count != g_data->queued_shaders_count_prev) {
|
||||
g_data->queued_shaders_count_prev = g_data->queued_shaders_count;
|
||||
EEVEE_temporal_sampling_reset(vedata);
|
||||
EEVEE_temporal_sampling_reset(static_cast<EEVEE_Data *>(vedata));
|
||||
}
|
||||
|
||||
if (g_data->queued_shaders_count > 0) {
|
||||
@@ -238,7 +241,7 @@ static void eevee_draw_scene(void *vedata)
|
||||
BLI_halton_3d(primes, offset, samp, r);
|
||||
EEVEE_update_noise(psl, fbl, r);
|
||||
EEVEE_volumes_set_jitter(sldata, samp - 1);
|
||||
EEVEE_materials_init(sldata, vedata, stl, fbl);
|
||||
EEVEE_materials_init(sldata, static_cast<EEVEE_Data *>(vedata), stl, fbl);
|
||||
}
|
||||
/* Copy previous persmat to UBO data */
|
||||
copy_m4_m4(sldata->common_data.prev_persmat, stl->effects->prev_persmat);
|
||||
@@ -246,14 +249,14 @@ static void eevee_draw_scene(void *vedata)
|
||||
/* Refresh Probes
|
||||
* Shadows needs to be updated for correct probes */
|
||||
DRW_stats_group_start("Probes Refresh");
|
||||
EEVEE_shadows_update(sldata, vedata);
|
||||
EEVEE_lightprobes_refresh(sldata, vedata);
|
||||
EEVEE_lightprobes_refresh_planar(sldata, vedata);
|
||||
EEVEE_shadows_update(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_lightprobes_refresh(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_lightprobes_refresh_planar(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
DRW_stats_group_end();
|
||||
|
||||
/* Refresh shadows */
|
||||
DRW_stats_group_start("Shadows");
|
||||
EEVEE_shadows_draw(sldata, vedata, stl->effects->taa_view);
|
||||
EEVEE_shadows_draw(sldata, static_cast<EEVEE_Data *>(vedata), stl->effects->taa_view);
|
||||
DRW_stats_group_end();
|
||||
|
||||
if (((stl->effects->enabled_effects & EFFECT_TAA) != 0) &&
|
||||
@@ -268,7 +271,7 @@ static void eevee_draw_scene(void *vedata)
|
||||
else if (((stl->effects->enabled_effects & EFFECT_TAA) != 0) &&
|
||||
(stl->effects->taa_current_sample > 1) && DRW_state_is_image_render())
|
||||
{
|
||||
EEVEE_temporal_sampling_update_matrices(vedata);
|
||||
EEVEE_temporal_sampling_update_matrices(static_cast<EEVEE_Data *>(vedata));
|
||||
}
|
||||
|
||||
/* Set ray type. */
|
||||
@@ -293,11 +296,11 @@ static void eevee_draw_scene(void *vedata)
|
||||
|
||||
/* Create minmax texture */
|
||||
DRW_stats_group_start("Main MinMax buffer");
|
||||
EEVEE_create_minmax_buffer(vedata, dtxl->depth, -1);
|
||||
EEVEE_create_minmax_buffer(static_cast<EEVEE_Data *>(vedata), dtxl->depth, -1);
|
||||
DRW_stats_group_end();
|
||||
|
||||
EEVEE_occlusion_compute(sldata, vedata);
|
||||
EEVEE_volumes_compute(sldata, vedata);
|
||||
EEVEE_occlusion_compute(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_volumes_compute(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
|
||||
/* Shading pass */
|
||||
DRW_stats_group_start("Shading");
|
||||
@@ -305,17 +308,17 @@ static void eevee_draw_scene(void *vedata)
|
||||
DRW_draw_pass(psl->background_ps);
|
||||
}
|
||||
DRW_draw_pass(psl->material_ps);
|
||||
EEVEE_subsurface_data_render(sldata, vedata);
|
||||
EEVEE_subsurface_data_render(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
DRW_stats_group_end();
|
||||
|
||||
/* Effects pre-transparency */
|
||||
EEVEE_subsurface_compute(sldata, vedata);
|
||||
EEVEE_reflection_compute(sldata, vedata);
|
||||
EEVEE_occlusion_draw_debug(sldata, vedata);
|
||||
EEVEE_subsurface_compute(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_reflection_compute(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_occlusion_draw_debug(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
if (psl->probe_display) {
|
||||
DRW_draw_pass(psl->probe_display);
|
||||
}
|
||||
EEVEE_refraction_compute(sldata, vedata);
|
||||
EEVEE_refraction_compute(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
|
||||
/* Opaque refraction */
|
||||
DRW_stats_group_start("Opaque Refraction");
|
||||
@@ -324,13 +327,13 @@ static void eevee_draw_scene(void *vedata)
|
||||
DRW_stats_group_end();
|
||||
|
||||
/* Volumetrics Resolve Opaque */
|
||||
EEVEE_volumes_resolve(sldata, vedata);
|
||||
EEVEE_volumes_resolve(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
|
||||
/* Render-passes. */
|
||||
EEVEE_renderpasses_output_accumulate(sldata, vedata, false);
|
||||
EEVEE_renderpasses_output_accumulate(sldata, static_cast<EEVEE_Data *>(vedata), false);
|
||||
|
||||
/* Transparent */
|
||||
EEVEE_material_transparent_output_accumulate(vedata);
|
||||
EEVEE_material_transparent_output_accumulate(static_cast<EEVEE_Data *>(vedata));
|
||||
/* TODO(@fclem): should be its own Frame-buffer.
|
||||
* This is needed because dual-source blending only works with 1 color buffer. */
|
||||
GPU_framebuffer_texture_attach(fbl->main_color_fb, dtxl->depth, 0, 0);
|
||||
@@ -341,10 +344,10 @@ static void eevee_draw_scene(void *vedata)
|
||||
|
||||
/* Post Process */
|
||||
DRW_stats_group_start("Post FX");
|
||||
EEVEE_draw_effects(sldata, vedata);
|
||||
EEVEE_draw_effects(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
DRW_stats_group_end();
|
||||
|
||||
DRW_view_set_active(NULL);
|
||||
DRW_view_set_active(nullptr);
|
||||
|
||||
if (DRW_state_is_image_render() && (stl->effects->enabled_effects & EFFECT_SSR) &&
|
||||
!stl->effects->ssr_was_valid_double_buffer)
|
||||
@@ -353,7 +356,7 @@ static void eevee_draw_scene(void *vedata)
|
||||
loop_len++;
|
||||
/* Reset sampling (and accumulation) after the first sample to avoid
|
||||
* washed out first bounce for SSR. */
|
||||
EEVEE_temporal_sampling_reset(vedata);
|
||||
EEVEE_temporal_sampling_reset(static_cast<EEVEE_Data *>(vedata));
|
||||
stl->effects->ssr_was_valid_double_buffer = stl->g_data->valid_double_buffer;
|
||||
}
|
||||
|
||||
@@ -374,7 +377,7 @@ static void eevee_draw_scene(void *vedata)
|
||||
DRW_transform_none(stl->effects->final_tx);
|
||||
}
|
||||
else {
|
||||
EEVEE_renderpasses_draw(sldata, vedata);
|
||||
EEVEE_renderpasses_draw(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
}
|
||||
|
||||
if (stl->effects->bypass_drawing) {
|
||||
@@ -382,11 +385,11 @@ static void eevee_draw_scene(void *vedata)
|
||||
GPU_framebuffer_blit(fbl->double_buffer_depth_fb, 0, dfbl->default_fb, 0, GPU_DEPTH_BIT);
|
||||
}
|
||||
|
||||
EEVEE_renderpasses_draw_debug(vedata);
|
||||
EEVEE_renderpasses_draw_debug(static_cast<EEVEE_Data *>(vedata));
|
||||
|
||||
stl->g_data->view_updated = false;
|
||||
|
||||
DRW_view_set_active(NULL);
|
||||
DRW_view_set_active(nullptr);
|
||||
}
|
||||
|
||||
static void eevee_view_update(void *vedata)
|
||||
@@ -397,20 +400,20 @@ static void eevee_view_update(void *vedata)
|
||||
}
|
||||
}
|
||||
|
||||
static void eevee_id_object_update(void *UNUSED(vedata), Object *object)
|
||||
static void eevee_id_object_update(void * /*vedata*/, Object *object)
|
||||
{
|
||||
EEVEE_LightProbeEngineData *ped = EEVEE_lightprobe_data_get(object);
|
||||
if (ped != NULL && ped->dd.recalc != 0) {
|
||||
if (ped != nullptr && ped->dd.recalc != 0) {
|
||||
ped->need_update = (ped->dd.recalc & ID_RECALC_TRANSFORM) != 0;
|
||||
ped->dd.recalc = 0;
|
||||
}
|
||||
EEVEE_LightEngineData *led = EEVEE_light_data_get(object);
|
||||
if (led != NULL && led->dd.recalc != 0) {
|
||||
if (led != nullptr && led->dd.recalc != 0) {
|
||||
led->need_update = true;
|
||||
led->dd.recalc = 0;
|
||||
}
|
||||
EEVEE_ObjectEngineData *oedata = EEVEE_object_data_get(object);
|
||||
if (oedata != NULL && oedata->dd.recalc != 0) {
|
||||
if (oedata != nullptr && oedata->dd.recalc != 0) {
|
||||
oedata->need_update = true;
|
||||
oedata->geom_update = (oedata->dd.recalc & (ID_RECALC_GEOMETRY)) != 0;
|
||||
oedata->dd.recalc = 0;
|
||||
@@ -422,14 +425,14 @@ static void eevee_id_world_update(void *vedata, World *wo)
|
||||
EEVEE_StorageList *stl = ((EEVEE_Data *)vedata)->stl;
|
||||
LightCache *lcache = stl->g_data->light_cache;
|
||||
|
||||
if (ELEM(lcache, NULL, stl->lookdev_lightcache)) {
|
||||
if (ELEM(lcache, nullptr, stl->lookdev_lightcache)) {
|
||||
/* Avoid Lookdev viewport clearing the update flag (see #67741). */
|
||||
return;
|
||||
}
|
||||
|
||||
EEVEE_WorldEngineData *wedata = EEVEE_world_data_ensure(wo);
|
||||
|
||||
if (wedata != NULL && wedata->dd.recalc != 0) {
|
||||
if (wedata != nullptr && wedata->dd.recalc != 0) {
|
||||
if ((lcache->flag & LIGHTCACHE_BAKING) == 0) {
|
||||
lcache->flag |= LIGHTCACHE_UPDATE_WORLD;
|
||||
}
|
||||
@@ -472,7 +475,7 @@ static void eevee_render_to_image(void *vedata,
|
||||
const bool do_motion_blur = (scene->eevee.flag & SCE_EEVEE_MOTION_BLUR_ENABLED) != 0;
|
||||
const bool do_motion_blur_fx = do_motion_blur && (scene->eevee.motion_blur_max > 0);
|
||||
|
||||
if (!EEVEE_render_init(vedata, engine, depsgraph)) {
|
||||
if (!EEVEE_render_init(static_cast<EEVEE_Data *>(vedata), engine, depsgraph)) {
|
||||
return;
|
||||
}
|
||||
EEVEE_PrivateData *g_data = ved->stl->g_data;
|
||||
@@ -483,13 +486,13 @@ static void eevee_render_to_image(void *vedata,
|
||||
int time_steps_tot = (do_motion_blur) ? max_ii(1, scene->eevee.motion_blur_steps) : 1;
|
||||
g_data->render_timesteps = time_steps_tot;
|
||||
|
||||
EEVEE_render_modules_init(vedata, engine, depsgraph);
|
||||
EEVEE_render_modules_init(static_cast<EEVEE_Data *>(vedata), engine, depsgraph);
|
||||
|
||||
g_data->render_sample_count_per_timestep = EEVEE_temporal_sampling_sample_count_get(scene,
|
||||
ved->stl);
|
||||
|
||||
/* Reset in case the same engine is used on multiple views. */
|
||||
EEVEE_temporal_sampling_reset(vedata);
|
||||
EEVEE_temporal_sampling_reset(static_cast<EEVEE_Data *>(vedata));
|
||||
|
||||
/* Compute start time. The motion blur will cover `[time ...time + shuttertime]`. */
|
||||
float time = initial_frame + initial_subframe;
|
||||
@@ -520,16 +523,16 @@ static void eevee_render_to_image(void *vedata,
|
||||
if (i == 0) {
|
||||
EEVEE_motion_blur_step_set(ved, MB_PREV);
|
||||
DRW_render_set_time(engine, depsgraph, floorf(time_prev), fractf(time_prev));
|
||||
EEVEE_render_modules_init(vedata, engine, depsgraph);
|
||||
EEVEE_render_modules_init(static_cast<EEVEE_Data *>(vedata), engine, depsgraph);
|
||||
sldata = EEVEE_view_layer_data_ensure();
|
||||
|
||||
EEVEE_render_cache_init(sldata, vedata);
|
||||
EEVEE_render_cache_init(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
|
||||
DRW_render_object_iter(vedata, engine, depsgraph, EEVEE_render_cache);
|
||||
|
||||
EEVEE_motion_blur_cache_finish(vedata);
|
||||
EEVEE_materials_cache_finish(sldata, vedata);
|
||||
eevee_render_reset_passes(vedata);
|
||||
EEVEE_motion_blur_cache_finish(static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_materials_cache_finish(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
eevee_render_reset_passes(static_cast<EEVEE_Data *>(vedata));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -537,16 +540,16 @@ static void eevee_render_to_image(void *vedata,
|
||||
if (do_motion_blur_fx) {
|
||||
EEVEE_motion_blur_step_set(ved, MB_NEXT);
|
||||
DRW_render_set_time(engine, depsgraph, floorf(time_next), fractf(time_next));
|
||||
EEVEE_render_modules_init(vedata, engine, depsgraph);
|
||||
EEVEE_render_modules_init(static_cast<EEVEE_Data *>(vedata), engine, depsgraph);
|
||||
sldata = EEVEE_view_layer_data_ensure();
|
||||
|
||||
EEVEE_render_cache_init(sldata, vedata);
|
||||
EEVEE_render_cache_init(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
|
||||
DRW_render_object_iter(vedata, engine, depsgraph, EEVEE_render_cache);
|
||||
|
||||
EEVEE_motion_blur_cache_finish(vedata);
|
||||
EEVEE_materials_cache_finish(sldata, vedata);
|
||||
eevee_render_reset_passes(vedata);
|
||||
EEVEE_motion_blur_cache_finish(static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_materials_cache_finish(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
eevee_render_reset_passes(static_cast<EEVEE_Data *>(vedata));
|
||||
}
|
||||
|
||||
/* Current motion step. */
|
||||
@@ -554,38 +557,39 @@ static void eevee_render_to_image(void *vedata,
|
||||
if (do_motion_blur) {
|
||||
EEVEE_motion_blur_step_set(ved, MB_CURR);
|
||||
DRW_render_set_time(engine, depsgraph, floorf(time_curr), fractf(time_curr));
|
||||
EEVEE_render_modules_init(vedata, engine, depsgraph);
|
||||
EEVEE_render_modules_init(static_cast<EEVEE_Data *>(vedata), engine, depsgraph);
|
||||
sldata = EEVEE_view_layer_data_ensure();
|
||||
}
|
||||
|
||||
EEVEE_render_cache_init(sldata, vedata);
|
||||
EEVEE_render_cache_init(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
|
||||
DRW_render_object_iter(vedata, engine, depsgraph, EEVEE_render_cache);
|
||||
|
||||
EEVEE_motion_blur_cache_finish(vedata);
|
||||
EEVEE_volumes_cache_finish(sldata, vedata);
|
||||
EEVEE_materials_cache_finish(sldata, vedata);
|
||||
EEVEE_lights_cache_finish(sldata, vedata);
|
||||
EEVEE_lightprobes_cache_finish(sldata, vedata);
|
||||
EEVEE_renderpasses_cache_finish(sldata, vedata);
|
||||
EEVEE_motion_blur_cache_finish(static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_volumes_cache_finish(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_materials_cache_finish(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_lights_cache_finish(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_lightprobes_cache_finish(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_renderpasses_cache_finish(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
|
||||
EEVEE_subsurface_draw_init(sldata, vedata);
|
||||
EEVEE_effects_draw_init(sldata, vedata);
|
||||
EEVEE_volumes_draw_init(sldata, vedata);
|
||||
EEVEE_subsurface_draw_init(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_effects_draw_init(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_volumes_draw_init(sldata, static_cast<EEVEE_Data *>(vedata));
|
||||
}
|
||||
|
||||
/* Actual drawing. */
|
||||
{
|
||||
EEVEE_renderpasses_output_init(
|
||||
sldata, vedata, g_data->render_sample_count_per_timestep * time_steps_tot);
|
||||
EEVEE_renderpasses_output_init(sldata,
|
||||
static_cast<EEVEE_Data *>(vedata),
|
||||
g_data->render_sample_count_per_timestep * time_steps_tot);
|
||||
|
||||
if (scene->world) {
|
||||
/* Update world in case of animated world material. */
|
||||
eevee_id_world_update(vedata, scene->world);
|
||||
}
|
||||
|
||||
EEVEE_temporal_sampling_create_view(vedata);
|
||||
EEVEE_render_draw(vedata, engine, render_layer, rect);
|
||||
EEVEE_temporal_sampling_create_view(static_cast<EEVEE_Data *>(vedata));
|
||||
EEVEE_render_draw(static_cast<EEVEE_Data *>(vedata), engine, render_layer, rect);
|
||||
|
||||
if (i < time_steps_tot - 1) {
|
||||
/* Don't reset after the last loop. Since EEVEE_render_read_result
|
||||
@@ -599,7 +603,7 @@ static void eevee_render_to_image(void *vedata,
|
||||
* So we just swap the resources to avoid too much re-evaluation.
|
||||
* Note that this also clears the VBO references from the GPUBatches of deformed
|
||||
* geometries. */
|
||||
EEVEE_motion_blur_swap_data(vedata);
|
||||
EEVEE_motion_blur_swap_data(static_cast<EEVEE_Data *>(vedata));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -609,10 +613,11 @@ static void eevee_render_to_image(void *vedata,
|
||||
return;
|
||||
}
|
||||
|
||||
EEVEE_render_read_result(vedata, engine, render_layer, rect);
|
||||
EEVEE_render_read_result(static_cast<EEVEE_Data *>(vedata), engine, render_layer, rect);
|
||||
|
||||
/* Restore original viewport size. */
|
||||
DRW_render_viewport_size_set((int[2]){g_data->size_orig[0], g_data->size_orig[1]});
|
||||
int viewport_size[2] = {int(g_data->size_orig[0]), int(g_data->size_orig[1])};
|
||||
DRW_render_viewport_size_set(viewport_size);
|
||||
|
||||
if (scene->r.cfra != initial_frame || scene->r.subframe != initial_subframe) {
|
||||
/* Restore original frame number. This is because the render pipeline expects it. */
|
||||
@@ -642,13 +647,13 @@ static void eevee_engine_free(void)
|
||||
static const DrawEngineDataSize eevee_data_size = DRW_VIEWPORT_DATA_SIZE(EEVEE_Data);
|
||||
|
||||
DrawEngineType draw_engine_eevee_type = {
|
||||
/*next*/ NULL,
|
||||
/*prev*/ NULL,
|
||||
/*next*/ nullptr,
|
||||
/*prev*/ nullptr,
|
||||
/*idname*/ N_("Eevee"),
|
||||
/*vedata_size*/ &eevee_data_size,
|
||||
/*engine_init*/ &eevee_engine_init,
|
||||
/*engine_free*/ &eevee_engine_free,
|
||||
/*instance_free*/ /*instance_free*/ NULL,
|
||||
/*instance_free*/ /*instance_free*/ nullptr,
|
||||
/*cache_init*/ &eevee_cache_init,
|
||||
/*cache_populate*/ &EEVEE_cache_populate,
|
||||
/*cache_finish*/ &eevee_cache_finish,
|
||||
@@ -660,26 +665,26 @@ DrawEngineType draw_engine_eevee_type = {
|
||||
};
|
||||
|
||||
RenderEngineType DRW_engine_viewport_eevee_type = {
|
||||
/*next*/ NULL,
|
||||
/*prev*/ NULL,
|
||||
/*next*/ nullptr,
|
||||
/*prev*/ nullptr,
|
||||
/*idname*/ EEVEE_ENGINE,
|
||||
/*name*/ N_("Eevee"),
|
||||
/*flag*/ RE_INTERNAL | RE_USE_PREVIEW | RE_USE_STEREO_VIEWPORT | RE_USE_GPU_CONTEXT,
|
||||
/*update*/ NULL,
|
||||
/*update*/ nullptr,
|
||||
/*render*/ &DRW_render_to_image,
|
||||
/*render_frame_finish*/ NULL,
|
||||
/*draw*/ NULL,
|
||||
/*bake*/ NULL,
|
||||
/*view_update*/ NULL,
|
||||
/*view_draw*/ NULL,
|
||||
/*update_script_node*/ NULL,
|
||||
/*render_frame_finish*/ nullptr,
|
||||
/*draw*/ nullptr,
|
||||
/*bake*/ nullptr,
|
||||
/*view_update*/ nullptr,
|
||||
/*view_draw*/ nullptr,
|
||||
/*update_script_node*/ nullptr,
|
||||
/*update_render_passes*/ &EEVEE_render_update_passes,
|
||||
/*draw_engine*/ &draw_engine_eevee_type,
|
||||
/*rna_ext*/
|
||||
{
|
||||
/*data*/ NULL,
|
||||
/*srna*/ NULL,
|
||||
/*call*/ NULL,
|
||||
/*data*/ nullptr,
|
||||
/*srna*/ nullptr,
|
||||
/*call*/ nullptr,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -8,4 +8,12 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
extern RenderEngineType DRW_engine_viewport_eevee_type;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -62,15 +62,15 @@
|
||||
(IRRADIANCE_MAX_POOL_SIZE / IRRADIANCE_SAMPLE_SIZE_Y)
|
||||
|
||||
/* TODO: should be replace by a more elegant alternative. */
|
||||
extern void DRW_gpu_context_enable(void);
|
||||
extern void DRW_gpu_context_disable(void);
|
||||
extern "C" void DRW_gpu_context_enable(void);
|
||||
extern "C" void DRW_gpu_context_disable(void);
|
||||
|
||||
extern void DRW_system_gpu_render_context_enable(void *re_system_gpu_context);
|
||||
extern void DRW_system_gpu_render_context_disable(void *re_system_gpu_context);
|
||||
extern void DRW_blender_gpu_render_context_enable(void *re_blender_gpu_context);
|
||||
extern void DRW_blender_gpu_render_context_disable(void *re_blender_gpu_context);
|
||||
extern "C" void DRW_system_gpu_render_context_enable(void *re_system_gpu_context);
|
||||
extern "C" void DRW_system_gpu_render_context_disable(void *re_system_gpu_context);
|
||||
extern "C" void DRW_blender_gpu_render_context_enable(void *re_blender_gpu_context);
|
||||
extern "C" void DRW_blender_gpu_render_context_disable(void *re_blender_gpu_context);
|
||||
|
||||
typedef struct EEVEE_LightBake {
|
||||
struct EEVEE_LightBake {
|
||||
Depsgraph *depsgraph;
|
||||
ViewLayer *view_layer;
|
||||
ViewLayer *view_layer_input;
|
||||
@@ -160,7 +160,7 @@ typedef struct EEVEE_LightBake {
|
||||
void *system_gpu_context, *blender_gpu_context;
|
||||
|
||||
ThreadMutex *mutex;
|
||||
} EEVEE_LightBake;
|
||||
};
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
/** \name Light Cache
|
||||
@@ -222,7 +222,7 @@ void EEVEE_lightcache_info_update(SceneEEVEE *eevee)
|
||||
{
|
||||
LightCache *lcache = eevee->light_cache_data;
|
||||
|
||||
if (lcache != NULL) {
|
||||
if (lcache != nullptr) {
|
||||
if (!eevee_lightcache_version_check(lcache)) {
|
||||
BLI_strncpy(eevee->light_cache_info,
|
||||
TIP_("Incompatible Light cache version, please bake again"),
|
||||
@@ -292,7 +292,7 @@ static bool EEVEE_lightcache_validate(const LightCache *light_cache,
|
||||
const int grid_len,
|
||||
const int irr_size[3])
|
||||
{
|
||||
if (light_cache == NULL) {
|
||||
if (light_cache == nullptr) {
|
||||
return false;
|
||||
}
|
||||
if (!eevee_lightcache_version_check(light_cache)) {
|
||||
@@ -325,16 +325,19 @@ LightCache *EEVEE_lightcache_create(const int grid_len,
|
||||
{
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT |
|
||||
GPU_TEXTURE_USAGE_HOST_READ;
|
||||
LightCache *light_cache = MEM_callocN(sizeof(LightCache), "LightCache");
|
||||
LightCache *light_cache = static_cast<LightCache *>(
|
||||
MEM_callocN(sizeof(LightCache), "LightCache"));
|
||||
|
||||
light_cache->version = LIGHTCACHE_STATIC_VERSION;
|
||||
light_cache->type = LIGHTCACHE_TYPE_STATIC;
|
||||
|
||||
light_cache->cube_data = MEM_callocN(sizeof(EEVEE_LightProbe) * cube_len, "EEVEE_LightProbe");
|
||||
light_cache->grid_data = MEM_callocN(sizeof(EEVEE_LightGrid) * grid_len, "EEVEE_LightGrid");
|
||||
light_cache->cube_data = static_cast<LightProbeCache *>(
|
||||
MEM_callocN(sizeof(EEVEE_LightProbe) * cube_len, "EEVEE_LightProbe"));
|
||||
light_cache->grid_data = static_cast<LightGridCache *>(
|
||||
MEM_callocN(sizeof(EEVEE_LightGrid) * grid_len, "EEVEE_LightGrid"));
|
||||
|
||||
light_cache->grid_tx.tex = DRW_texture_create_2d_array_ex(
|
||||
irr_size[0], irr_size[1], irr_size[2], IRRADIANCE_FORMAT, usage, DRW_TEX_FILTER, NULL);
|
||||
irr_size[0], irr_size[1], irr_size[2], IRRADIANCE_FORMAT, usage, DRW_TEX_FILTER, nullptr);
|
||||
light_cache->grid_tx.tex_size[0] = irr_size[0];
|
||||
light_cache->grid_tx.tex_size[1] = irr_size[1];
|
||||
light_cache->grid_tx.tex_size[2] = irr_size[2];
|
||||
@@ -342,13 +345,13 @@ LightCache *EEVEE_lightcache_create(const int grid_len,
|
||||
int mips_len = log2_floor_u(cube_size) - MIN_CUBE_LOD_LEVEL;
|
||||
|
||||
/* Try to create a cubemap array. */
|
||||
DRWTextureFlag cube_texflag = DRW_TEX_FILTER | DRW_TEX_MIPMAP;
|
||||
DRWTextureFlag cube_texflag = DRWTextureFlag(DRW_TEX_FILTER | DRW_TEX_MIPMAP);
|
||||
light_cache->cube_tx.tex = DRW_texture_create_cube_array_ex(
|
||||
cube_size, cube_len, GPU_R11F_G11F_B10F, usage, cube_texflag, NULL);
|
||||
if (light_cache->cube_tx.tex == NULL) {
|
||||
cube_size, cube_len, GPU_R11F_G11F_B10F, usage, cube_texflag, nullptr);
|
||||
if (light_cache->cube_tx.tex == nullptr) {
|
||||
/* Try fallback to 2D array. */
|
||||
light_cache->cube_tx.tex = DRW_texture_create_2d_array_ex(
|
||||
cube_size, cube_size, cube_len * 6, GPU_R11F_G11F_B10F, usage, cube_texflag, NULL);
|
||||
cube_size, cube_size, cube_len * 6, GPU_R11F_G11F_B10F, usage, cube_texflag, nullptr);
|
||||
}
|
||||
|
||||
light_cache->cube_tx.tex_size[0] = cube_size;
|
||||
@@ -359,10 +362,10 @@ LightCache *EEVEE_lightcache_create(const int grid_len,
|
||||
light_cache->vis_res = vis_size;
|
||||
light_cache->ref_res = cube_size;
|
||||
|
||||
light_cache->cube_mips = MEM_callocN(sizeof(LightCacheTexture) * light_cache->mips_len,
|
||||
"LightCacheTexture");
|
||||
light_cache->cube_mips = static_cast<LightCacheTexture *>(
|
||||
MEM_callocN(sizeof(LightCacheTexture) * light_cache->mips_len, "LightCacheTexture"));
|
||||
|
||||
if (light_cache->grid_tx.tex == NULL || light_cache->cube_tx.tex == NULL) {
|
||||
if (light_cache->grid_tx.tex == nullptr || light_cache->cube_tx.tex == nullptr) {
|
||||
/* We could not create the requested textures size. Stop baking and do not use the cache. */
|
||||
light_cache->flag = LIGHTCACHE_INVALID;
|
||||
}
|
||||
@@ -391,7 +394,7 @@ static bool eevee_lightcache_static_load(LightCache *lcache)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (lcache->grid_tx.tex == NULL) {
|
||||
if (lcache->grid_tx.tex == nullptr) {
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT |
|
||||
GPU_TEXTURE_USAGE_HOST_READ;
|
||||
lcache->grid_tx.tex = GPU_texture_create_2d_array("lightcache_irradiance",
|
||||
@@ -399,10 +402,10 @@ static bool eevee_lightcache_static_load(LightCache *lcache)
|
||||
1,
|
||||
IRRADIANCE_FORMAT,
|
||||
usage,
|
||||
NULL);
|
||||
nullptr);
|
||||
GPU_texture_update(lcache->grid_tx.tex, GPU_DATA_UBYTE, lcache->grid_tx.data);
|
||||
|
||||
if (lcache->grid_tx.tex == NULL) {
|
||||
if (lcache->grid_tx.tex == nullptr) {
|
||||
lcache->flag |= LIGHTCACHE_NOT_USABLE;
|
||||
return false;
|
||||
}
|
||||
@@ -410,7 +413,7 @@ static bool eevee_lightcache_static_load(LightCache *lcache)
|
||||
GPU_texture_filter_mode(lcache->grid_tx.tex, true);
|
||||
}
|
||||
|
||||
if (lcache->cube_tx.tex == NULL) {
|
||||
if (lcache->cube_tx.tex == nullptr) {
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT |
|
||||
GPU_TEXTURE_USAGE_HOST_READ;
|
||||
|
||||
@@ -421,9 +424,9 @@ static bool eevee_lightcache_static_load(LightCache *lcache)
|
||||
lcache->mips_len + 1,
|
||||
GPU_R11F_G11F_B10F,
|
||||
usage,
|
||||
NULL);
|
||||
nullptr);
|
||||
|
||||
if (lcache->cube_tx.tex == NULL) {
|
||||
if (lcache->cube_tx.tex == nullptr) {
|
||||
/* Try fallback to 2D array. */
|
||||
|
||||
lcache->cube_tx.tex = GPU_texture_create_2d_array("lightcache_cubemaps_fallback",
|
||||
@@ -431,10 +434,10 @@ static bool eevee_lightcache_static_load(LightCache *lcache)
|
||||
lcache->mips_len + 1,
|
||||
GPU_R11F_G11F_B10F,
|
||||
usage,
|
||||
NULL);
|
||||
nullptr);
|
||||
}
|
||||
|
||||
if (lcache->cube_tx.tex == NULL) {
|
||||
if (lcache->cube_tx.tex == nullptr) {
|
||||
lcache->flag |= LIGHTCACHE_NOT_USABLE;
|
||||
return false;
|
||||
}
|
||||
@@ -450,7 +453,7 @@ static bool eevee_lightcache_static_load(LightCache *lcache)
|
||||
|
||||
bool EEVEE_lightcache_load(LightCache *lcache)
|
||||
{
|
||||
if (lcache == NULL) {
|
||||
if (lcache == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -473,7 +476,8 @@ bool EEVEE_lightcache_load(LightCache *lcache)
|
||||
static void eevee_lightbake_readback_irradiance(LightCache *lcache)
|
||||
{
|
||||
MEM_SAFE_FREE(lcache->grid_tx.data);
|
||||
lcache->grid_tx.data = GPU_texture_read(lcache->grid_tx.tex, GPU_DATA_UBYTE, 0);
|
||||
lcache->grid_tx.data = static_cast<char *>(
|
||||
GPU_texture_read(lcache->grid_tx.tex, GPU_DATA_UBYTE, 0));
|
||||
lcache->grid_tx.data_type = LIGHTCACHETEX_BYTE;
|
||||
lcache->grid_tx.components = 4;
|
||||
}
|
||||
@@ -481,7 +485,8 @@ static void eevee_lightbake_readback_irradiance(LightCache *lcache)
|
||||
static void eevee_lightbake_readback_reflections(LightCache *lcache)
|
||||
{
|
||||
MEM_SAFE_FREE(lcache->cube_tx.data);
|
||||
lcache->cube_tx.data = GPU_texture_read(lcache->cube_tx.tex, GPU_DATA_10_11_11_REV, 0);
|
||||
lcache->cube_tx.data = static_cast<char *>(
|
||||
GPU_texture_read(lcache->cube_tx.tex, GPU_DATA_10_11_11_REV, 0));
|
||||
lcache->cube_tx.data_type = LIGHTCACHETEX_UINT;
|
||||
lcache->cube_tx.components = 1;
|
||||
|
||||
@@ -490,7 +495,8 @@ static void eevee_lightbake_readback_reflections(LightCache *lcache)
|
||||
MEM_SAFE_FREE(cube_mip->data);
|
||||
GPU_texture_get_mipmap_size(lcache->cube_tx.tex, mip + 1, cube_mip->tex_size);
|
||||
|
||||
cube_mip->data = GPU_texture_read(lcache->cube_tx.tex, GPU_DATA_10_11_11_REV, mip + 1);
|
||||
cube_mip->data = static_cast<char *>(
|
||||
GPU_texture_read(lcache->cube_tx.tex, GPU_DATA_10_11_11_REV, mip + 1));
|
||||
cube_mip->data_type = LIGHTCACHETEX_UINT;
|
||||
cube_mip->components = 1;
|
||||
}
|
||||
@@ -552,7 +558,7 @@ void EEVEE_lightcache_blend_write(BlendWriter *writer, LightCache *cache)
|
||||
|
||||
static void direct_link_lightcache_texture(BlendDataReader *reader, LightCacheTexture *lctex)
|
||||
{
|
||||
lctex->tex = NULL;
|
||||
lctex->tex = nullptr;
|
||||
|
||||
if (lctex->data) {
|
||||
BLO_read_data_address(reader, &lctex->data);
|
||||
@@ -569,7 +575,7 @@ static void direct_link_lightcache_texture(BlendDataReader *reader, LightCacheTe
|
||||
}
|
||||
}
|
||||
|
||||
if (lctex->data == NULL) {
|
||||
if (lctex->data == nullptr) {
|
||||
zero_v3_int(lctex->tex_size);
|
||||
}
|
||||
}
|
||||
@@ -608,8 +614,8 @@ static void eevee_lightbake_context_enable(EEVEE_LightBake *lbake)
|
||||
|
||||
if (lbake->system_gpu_context) {
|
||||
DRW_system_gpu_render_context_enable(lbake->system_gpu_context);
|
||||
if (lbake->blender_gpu_context == NULL) {
|
||||
lbake->blender_gpu_context = GPU_context_create(NULL, lbake->system_gpu_context);
|
||||
if (lbake->blender_gpu_context == nullptr) {
|
||||
lbake->blender_gpu_context = GPU_context_create(nullptr, lbake->system_gpu_context);
|
||||
}
|
||||
DRW_blender_gpu_render_context_enable(lbake->blender_gpu_context);
|
||||
}
|
||||
@@ -682,9 +688,10 @@ static void eevee_lightbake_create_render_target(EEVEE_LightBake *lbake, int rt_
|
||||
{
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT |
|
||||
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW;
|
||||
lbake->rt_depth = DRW_texture_create_cube_ex(rt_res, GPU_DEPTH_COMPONENT24, usage, 0, NULL);
|
||||
lbake->rt_depth = DRW_texture_create_cube_ex(
|
||||
rt_res, GPU_DEPTH_COMPONENT24, usage, DRWTextureFlag(0), nullptr);
|
||||
lbake->rt_color = DRW_texture_create_cube_ex(
|
||||
rt_res, GPU_RGBA16F, usage, DRW_TEX_FILTER | DRW_TEX_MIPMAP, NULL);
|
||||
rt_res, GPU_RGBA16F, usage, DRWTextureFlag(DRW_TEX_FILTER | DRW_TEX_MIPMAP), nullptr);
|
||||
|
||||
for (int i = 0; i < 6; i++) {
|
||||
GPU_framebuffer_ensure_config(&lbake->rt_fb[i],
|
||||
@@ -707,8 +714,10 @@ static void eevee_lightbake_create_resources(EEVEE_LightBake *lbake)
|
||||
irradiance_pool_size_get(lbake->vis_res, lbake->total_irr_samples, lbake->irr_size);
|
||||
|
||||
lbake->ref_cube_res = lbake->rt_res;
|
||||
lbake->cube_prb = MEM_callocN(sizeof(LightProbe *) * lbake->cube_len, "EEVEE Cube visgroup ptr");
|
||||
lbake->grid_prb = MEM_callocN(sizeof(LightProbe *) * lbake->grid_len, "EEVEE Grid visgroup ptr");
|
||||
lbake->cube_prb = static_cast<LightProbe **>(
|
||||
MEM_callocN(sizeof(LightProbe *) * lbake->cube_len, "EEVEE Cube visgroup ptr"));
|
||||
lbake->grid_prb = static_cast<LightProbe **>(
|
||||
MEM_callocN(sizeof(LightProbe *) * lbake->grid_len, "EEVEE Grid visgroup ptr"));
|
||||
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT |
|
||||
GPU_TEXTURE_USAGE_HOST_READ;
|
||||
@@ -719,7 +728,7 @@ static void eevee_lightbake_create_resources(EEVEE_LightBake *lbake)
|
||||
IRRADIANCE_FORMAT,
|
||||
usage,
|
||||
DRW_TEX_FILTER,
|
||||
NULL);
|
||||
nullptr);
|
||||
|
||||
/* Ensure Light Cache is ready to accept new data. If not recreate one.
|
||||
* WARNING: All the following must be threadsafe. It's currently protected
|
||||
@@ -730,10 +739,10 @@ static void eevee_lightbake_create_resources(EEVEE_LightBake *lbake)
|
||||
if (!EEVEE_lightcache_validate(
|
||||
lbake->lcache, lbake->cube_len, lbake->ref_cube_res, lbake->grid_len, lbake->irr_size))
|
||||
{
|
||||
eevee->light_cache_data = lbake->lcache = NULL;
|
||||
eevee->light_cache_data = lbake->lcache = nullptr;
|
||||
}
|
||||
|
||||
if (lbake->lcache == NULL) {
|
||||
if (lbake->lcache == nullptr) {
|
||||
lbake->lcache = EEVEE_lightcache_create(
|
||||
lbake->grid_len, lbake->cube_len, lbake->ref_cube_res, lbake->vis_res, lbake->irr_size);
|
||||
|
||||
@@ -756,11 +765,11 @@ wmJob *EEVEE_lightbake_job_create(wmWindowManager *wm,
|
||||
int delay,
|
||||
int frame)
|
||||
{
|
||||
EEVEE_LightBake *lbake = NULL;
|
||||
EEVEE_LightBake *lbake = nullptr;
|
||||
|
||||
/* only one render job at a time */
|
||||
if (WM_jobs_test(wm, scene, WM_JOB_TYPE_RENDER)) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
wmJob *wm_job = WM_jobs_get(wm,
|
||||
@@ -774,7 +783,8 @@ wmJob *EEVEE_lightbake_job_create(wmWindowManager *wm,
|
||||
EEVEE_LightBake *old_lbake = (EEVEE_LightBake *)WM_jobs_customdata_get(wm_job);
|
||||
|
||||
if (old_lbake && (old_lbake->view_layer_input == view_layer) && (old_lbake->bmain == bmain)) {
|
||||
lbake = MEM_callocN(sizeof(EEVEE_LightBake), "EEVEE_LightBake");
|
||||
lbake = static_cast<EEVEE_LightBake *>(
|
||||
MEM_callocN(sizeof(EEVEE_LightBake), "EEVEE_LightBake"));
|
||||
/* Cannot reuse depsgraph for now because we cannot get the update from the
|
||||
* main database directly. TODO: reuse depsgraph and only update positions. */
|
||||
/* lbake->depsgraph = old_lbake->depsgraph; */
|
||||
@@ -793,25 +803,26 @@ wmJob *EEVEE_lightbake_job_create(wmWindowManager *wm,
|
||||
lbake->delay = delay;
|
||||
lbake->frame = frame;
|
||||
|
||||
if (lbake->system_gpu_context == NULL && !GPU_use_main_context_workaround()) {
|
||||
if (lbake->system_gpu_context == nullptr && !GPU_use_main_context_workaround()) {
|
||||
lbake->system_gpu_context = WM_system_gpu_context_create();
|
||||
wm_window_reset_drawable();
|
||||
}
|
||||
|
||||
if (old_lbake->stop != NULL) {
|
||||
if (old_lbake->stop != nullptr) {
|
||||
*old_lbake->stop = true;
|
||||
}
|
||||
BLI_mutex_unlock(old_lbake->mutex);
|
||||
}
|
||||
else {
|
||||
lbake = EEVEE_lightbake_job_data_alloc(bmain, view_layer, scene, true, frame);
|
||||
lbake = static_cast<EEVEE_LightBake *>(
|
||||
EEVEE_lightbake_job_data_alloc(bmain, view_layer, scene, true, frame));
|
||||
lbake->delay = delay;
|
||||
}
|
||||
|
||||
WM_jobs_customdata_set(wm_job, lbake, EEVEE_lightbake_job_data_free);
|
||||
WM_jobs_timer(wm_job, 0.4, NC_SCENE | NA_EDITED, 0);
|
||||
WM_jobs_callbacks(
|
||||
wm_job, EEVEE_lightbake_job, NULL, EEVEE_lightbake_update, EEVEE_lightbake_update);
|
||||
wm_job, EEVEE_lightbake_job, nullptr, EEVEE_lightbake_update, EEVEE_lightbake_update);
|
||||
|
||||
G.is_break = false;
|
||||
|
||||
@@ -823,7 +834,8 @@ void *EEVEE_lightbake_job_data_alloc(
|
||||
{
|
||||
BLI_assert(BLI_thread_is_main());
|
||||
|
||||
EEVEE_LightBake *lbake = MEM_callocN(sizeof(EEVEE_LightBake), "EEVEE_LightBake");
|
||||
EEVEE_LightBake *lbake = static_cast<EEVEE_LightBake *>(
|
||||
MEM_callocN(sizeof(EEVEE_LightBake), "EEVEE_LightBake"));
|
||||
|
||||
lbake->depsgraph = DEG_graph_new(bmain, scene, view_layer, DAG_EVAL_RENDER);
|
||||
lbake->scene = scene;
|
||||
@@ -890,15 +902,15 @@ static void eevee_lightbake_delete_resources(EEVEE_LightBake *lbake)
|
||||
if (lbake->blender_gpu_context) {
|
||||
DRW_blender_gpu_render_context_disable(lbake->blender_gpu_context);
|
||||
DRW_blender_gpu_render_context_enable(lbake->blender_gpu_context);
|
||||
GPU_context_discard(lbake->blender_gpu_context);
|
||||
GPU_context_discard(static_cast<GPUContext *>(lbake->blender_gpu_context));
|
||||
}
|
||||
|
||||
if (lbake->system_gpu_context && lbake->own_resources) {
|
||||
/* Delete the baking context. */
|
||||
DRW_system_gpu_render_context_disable(lbake->system_gpu_context);
|
||||
WM_system_gpu_context_dispose(lbake->system_gpu_context);
|
||||
lbake->blender_gpu_context = NULL;
|
||||
lbake->system_gpu_context = NULL;
|
||||
lbake->blender_gpu_context = nullptr;
|
||||
lbake->system_gpu_context = nullptr;
|
||||
}
|
||||
else if (lbake->system_gpu_context) {
|
||||
DRW_system_gpu_render_context_disable(lbake->system_gpu_context);
|
||||
@@ -927,17 +939,17 @@ static void eevee_lightbake_cache_create(EEVEE_Data *vedata, EEVEE_LightBake *lb
|
||||
scene_eval->eevee.taa_samples = 1;
|
||||
scene_eval->eevee.gi_irradiance_smoothing = 0.0f;
|
||||
|
||||
stl->g_data = MEM_callocN(sizeof(*stl->g_data), __func__);
|
||||
stl->g_data = static_cast<EEVEE_PrivateData *>(MEM_callocN(sizeof(*stl->g_data), __func__));
|
||||
stl->g_data->background_alpha = 1.0f;
|
||||
stl->g_data->render_timesteps = 1;
|
||||
|
||||
/* XXX TODO: remove this. This is in order to make the init functions work. */
|
||||
if (DRW_view_default_get() == NULL) {
|
||||
if (DRW_view_default_get() == nullptr) {
|
||||
float winmat[4][4], viewmat[4][4];
|
||||
unit_m4(viewmat);
|
||||
unit_m4(winmat);
|
||||
negate_v3(winmat[2]);
|
||||
DRWView *view = DRW_view_create(viewmat, winmat, NULL, NULL, NULL);
|
||||
DRWView *view = DRW_view_create(viewmat, winmat, nullptr, nullptr, nullptr);
|
||||
DRW_view_default_set(view);
|
||||
DRW_view_set_active(view);
|
||||
}
|
||||
@@ -950,7 +962,7 @@ static void eevee_lightbake_cache_create(EEVEE_Data *vedata, EEVEE_LightBake *lb
|
||||
};
|
||||
DRW_render_viewport_size_set(viewport_size);
|
||||
|
||||
EEVEE_effects_init(sldata, vedata, NULL, true);
|
||||
EEVEE_effects_init(sldata, vedata, nullptr, true);
|
||||
EEVEE_materials_init(sldata, vedata, stl, fbl);
|
||||
EEVEE_shadows_init(sldata);
|
||||
EEVEE_lightprobes_init(sldata, vedata);
|
||||
@@ -971,7 +983,7 @@ static void eevee_lightbake_cache_create(EEVEE_Data *vedata, EEVEE_LightBake *lb
|
||||
pinfo->vis_data.invert = prb->flag & LIGHTPROBE_FLAG_INVERT_GROUP;
|
||||
pinfo->vis_data.cached = false;
|
||||
}
|
||||
DRW_render_object_iter(vedata, NULL, lbake->depsgraph, EEVEE_render_cache);
|
||||
DRW_render_object_iter(vedata, nullptr, lbake->depsgraph, EEVEE_render_cache);
|
||||
|
||||
EEVEE_volumes_cache_finish(sldata, vedata);
|
||||
EEVEE_materials_cache_finish(sldata, vedata);
|
||||
@@ -986,7 +998,7 @@ static void eevee_lightbake_cache_create(EEVEE_Data *vedata, EEVEE_LightBake *lb
|
||||
EEVEE_effects_draw_init(sldata, vedata);
|
||||
EEVEE_volumes_draw_init(sldata, vedata);
|
||||
|
||||
txl->color = NULL;
|
||||
txl->color = nullptr;
|
||||
|
||||
DRW_render_instance_buffer_finish();
|
||||
DRW_curves_update();
|
||||
@@ -997,7 +1009,7 @@ static void eevee_lightbake_copy_irradiance(EEVEE_LightBake *lbake, LightCache *
|
||||
DRW_TEXTURE_FREE_SAFE(lbake->grid_prev);
|
||||
|
||||
/* Copy texture by reading back and re-uploading it. */
|
||||
float *tex = GPU_texture_read(lcache->grid_tx.tex, GPU_DATA_FLOAT, 0);
|
||||
float *tex = static_cast<float *>(GPU_texture_read(lcache->grid_tx.tex, GPU_DATA_FLOAT, 0));
|
||||
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT |
|
||||
GPU_TEXTURE_USAGE_HOST_READ;
|
||||
@@ -1050,7 +1062,7 @@ static void eevee_lightbake_render_world_sample(void *ved, void *user_data)
|
||||
GPU_framebuffer_texture_attach(lbake->store_fb, lbake->grid_prev, 0, 0);
|
||||
GPU_framebuffer_bind(lbake->store_fb);
|
||||
/* Clear to 1.0f for visibility. */
|
||||
GPU_framebuffer_clear_color(lbake->store_fb, ((float[4]){1.0f, 1.0f, 1.0f, 1.0f}));
|
||||
GPU_framebuffer_clear_color(lbake->store_fb, blender::float4{1.0f, 1.0f, 1.0f, 1.0f});
|
||||
DRW_draw_pass(vedata->psl->probe_grid_fill);
|
||||
|
||||
SWAP(GPUTexture *, lbake->grid_prev, lcache->grid_tx.tex);
|
||||
@@ -1358,7 +1370,7 @@ void EEVEE_lightbake_update(void *custom_data)
|
||||
|
||||
/* If a new light-cache was created, free the old one and reference the new. */
|
||||
if (lbake->lcache && scene_orig->eevee.light_cache_data != lbake->lcache) {
|
||||
if (scene_orig->eevee.light_cache_data != NULL) {
|
||||
if (scene_orig->eevee.light_cache_data != nullptr) {
|
||||
EEVEE_lightcache_free(scene_orig->eevee.light_cache_data);
|
||||
}
|
||||
scene_orig->eevee.light_cache_data = lbake->lcache;
|
||||
@@ -1445,7 +1457,7 @@ void EEVEE_lightbake_job(void *custom_data, bool *stop, bool *do_update, float *
|
||||
|
||||
/* Render world irradiance and reflection first */
|
||||
if (lcache->flag & LIGHTCACHE_UPDATE_WORLD) {
|
||||
lbake->probe = NULL;
|
||||
lbake->probe = nullptr;
|
||||
lightbake_do_sample(lbake, eevee_lightbake_render_world_sample);
|
||||
}
|
||||
|
||||
@@ -1490,10 +1502,10 @@ void EEVEE_lightbake_job(void *custom_data, bool *stop, bool *do_update, float *
|
||||
lcache->flag |= LIGHTCACHE_BAKED;
|
||||
lcache->flag &= ~LIGHTCACHE_BAKING;
|
||||
|
||||
/* Assume that if lbake->system_gpu_context is NULL
|
||||
/* Assume that if lbake->system_gpu_context is nullptr
|
||||
* we are not running in this in a job, so update
|
||||
* the scene light-cache pointer before deleting it. */
|
||||
if (lbake->system_gpu_context == NULL) {
|
||||
if (lbake->system_gpu_context == nullptr) {
|
||||
BLI_assert(BLI_thread_is_main());
|
||||
EEVEE_lightbake_update(lbake);
|
||||
}
|
||||
@@ -1509,9 +1521,8 @@ void EEVEE_lightbake_update_world_quick(EEVEE_ViewLayerData *sldata,
|
||||
float clamp = scene->eevee.gi_glossy_clamp;
|
||||
float filter_quality = scene->eevee.gi_filter_quality;
|
||||
|
||||
EEVEE_LightBake lbake = {
|
||||
.resource_only = true,
|
||||
};
|
||||
EEVEE_LightBake lbake{};
|
||||
lbake.resource_only = true;
|
||||
|
||||
/* Create resources. */
|
||||
eevee_lightbake_create_render_target(&lbake, scene->eevee.gi_cubemap_resolution);
|
||||
@@ -41,7 +41,7 @@ static struct {
|
||||
GPUTexture *depth_array_placeholder;
|
||||
|
||||
GPUVertFormat *format_probe_display_planar;
|
||||
} e_data = {NULL}; /* Engine data */
|
||||
} e_data = {nullptr}; /* Engine data */
|
||||
|
||||
/* *********** FUNCTIONS *********** */
|
||||
|
||||
@@ -51,8 +51,8 @@ bool EEVEE_lightprobes_obj_visibility_cb(bool vis_in, void *user_data)
|
||||
{
|
||||
EEVEE_ObjectEngineData *oed = (EEVEE_ObjectEngineData *)user_data;
|
||||
|
||||
/* test disabled if group is NULL */
|
||||
if (oed == NULL || oed->test_data->collection == NULL) {
|
||||
/* test disabled if group is nullptr */
|
||||
if (oed == nullptr || oed->test_data->collection == nullptr) {
|
||||
return vis_in;
|
||||
}
|
||||
|
||||
@@ -108,23 +108,35 @@ static void planar_pool_ensure_alloc(EEVEE_Data *vedata, int num_planar_ref)
|
||||
eGPUTextureUsage planar_usage_depth = GPU_TEXTURE_USAGE_ATTACHMENT |
|
||||
GPU_TEXTURE_USAGE_SHADER_READ;
|
||||
if (num_planar_ref > 0) {
|
||||
txl->planar_pool = DRW_texture_create_2d_array_ex(width,
|
||||
height,
|
||||
num_planar_ref,
|
||||
GPU_R11F_G11F_B10F,
|
||||
planar_usage,
|
||||
DRW_TEX_FILTER | DRW_TEX_MIPMAP,
|
||||
NULL);
|
||||
txl->planar_depth = DRW_texture_create_2d_array_ex(
|
||||
width, height, num_planar_ref, GPU_DEPTH_COMPONENT24, planar_usage_depth, 0, NULL);
|
||||
txl->planar_pool = DRW_texture_create_2d_array_ex(
|
||||
width,
|
||||
height,
|
||||
num_planar_ref,
|
||||
GPU_R11F_G11F_B10F,
|
||||
planar_usage,
|
||||
DRWTextureFlag(DRW_TEX_FILTER | DRW_TEX_MIPMAP),
|
||||
nullptr);
|
||||
txl->planar_depth = DRW_texture_create_2d_array_ex(width,
|
||||
height,
|
||||
num_planar_ref,
|
||||
GPU_DEPTH_COMPONENT24,
|
||||
planar_usage_depth,
|
||||
DRWTextureFlag(0),
|
||||
nullptr);
|
||||
}
|
||||
else if (num_planar_ref == 0) {
|
||||
/* Makes Opengl Happy : Create a placeholder texture that will never be sampled but still
|
||||
* bound to shader. */
|
||||
txl->planar_pool = DRW_texture_create_2d_array_ex(
|
||||
1, 1, 1, GPU_RGBA8, planar_usage, DRW_TEX_FILTER | DRW_TEX_MIPMAP, NULL);
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
GPU_RGBA8,
|
||||
planar_usage,
|
||||
DRWTextureFlag(DRW_TEX_FILTER | DRW_TEX_MIPMAP),
|
||||
nullptr);
|
||||
txl->planar_depth = DRW_texture_create_2d_array_ex(
|
||||
1, 1, 1, GPU_DEPTH_COMPONENT24, planar_usage_depth, 0, NULL);
|
||||
1, 1, 1, GPU_DEPTH_COMPONENT24, planar_usage_depth, DRWTextureFlag(0), nullptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -167,13 +179,14 @@ void EEVEE_lightprobes_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
1,
|
||||
scene_eval->eevee.gi_cubemap_resolution,
|
||||
scene_eval->eevee.gi_visibility_resolution,
|
||||
(int[3]){grid_res, grid_res, 1});
|
||||
blender::int3{grid_res, grid_res, 1});
|
||||
}
|
||||
stl->g_data->light_cache = sldata->fallback_lightcache;
|
||||
}
|
||||
|
||||
if (!sldata->probes) {
|
||||
sldata->probes = MEM_callocN(sizeof(EEVEE_LightProbesInfo), "EEVEE_LightProbesInfo");
|
||||
sldata->probes = static_cast<EEVEE_LightProbesInfo *>(
|
||||
MEM_callocN(sizeof(EEVEE_LightProbesInfo), "EEVEE_LightProbesInfo"));
|
||||
sldata->probe_ubo = GPU_uniformbuf_create(sizeof(EEVEE_LightProbe) * MAX_PROBE);
|
||||
sldata->grid_ubo = GPU_uniformbuf_create(sizeof(EEVEE_LightGrid) * MAX_GRID);
|
||||
sldata->planar_ubo = GPU_uniformbuf_create(sizeof(EEVEE_PlanarReflection) * MAX_PLANAR);
|
||||
@@ -193,7 +206,7 @@ void EEVEE_lightprobes_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
eGPUTextureUsage planar_usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_SHADER_READ |
|
||||
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW;
|
||||
e_data.planar_pool_placeholder = DRW_texture_create_2d_array_ex(
|
||||
1, 1, 1, GPU_RGBA8, planar_usage, DRW_TEX_FILTER, NULL);
|
||||
1, 1, 1, GPU_RGBA8, planar_usage, DRW_TEX_FILTER, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -227,7 +240,7 @@ void EEVEE_lightbake_cache_init(EEVEE_ViewLayerData *sldata,
|
||||
DRW_shgroup_uniform_block(grp, "renderpass_block", sldata->renderpass_ubo.combined);
|
||||
|
||||
struct GPUBatch *geom = DRW_cache_fullscreen_quad_get();
|
||||
DRW_shgroup_call_instances(grp, NULL, geom, 6);
|
||||
DRW_shgroup_call_instances(grp, nullptr, geom, 6);
|
||||
}
|
||||
|
||||
{
|
||||
@@ -247,7 +260,7 @@ void EEVEE_lightbake_cache_init(EEVEE_ViewLayerData *sldata,
|
||||
DRW_shgroup_uniform_block(grp, "renderpass_block", sldata->renderpass_ubo.combined);
|
||||
|
||||
struct GPUBatch *geom = DRW_cache_fullscreen_quad_get();
|
||||
DRW_shgroup_call(grp, geom, NULL);
|
||||
DRW_shgroup_call(grp, geom, nullptr);
|
||||
}
|
||||
|
||||
{
|
||||
@@ -266,7 +279,7 @@ void EEVEE_lightbake_cache_init(EEVEE_ViewLayerData *sldata,
|
||||
DRW_shgroup_uniform_block(grp, "renderpass_block", sldata->renderpass_ubo.combined);
|
||||
|
||||
struct GPUBatch *geom = DRW_cache_fullscreen_quad_get();
|
||||
DRW_shgroup_call(grp, geom, NULL);
|
||||
DRW_shgroup_call(grp, geom, nullptr);
|
||||
}
|
||||
|
||||
{
|
||||
@@ -278,7 +291,7 @@ void EEVEE_lightbake_cache_init(EEVEE_ViewLayerData *sldata,
|
||||
DRW_shgroup_uniform_texture_ref(grp, "irradianceGrid", &light_cache->grid_tx.tex);
|
||||
|
||||
struct GPUBatch *geom = DRW_cache_fullscreen_quad_get();
|
||||
DRW_shgroup_call(grp, geom, NULL);
|
||||
DRW_shgroup_call(grp, geom, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -293,22 +306,22 @@ void EEVEE_lightprobes_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedat
|
||||
const Scene *scene_eval = DEG_get_evaluated_scene(draw_ctx->depsgraph);
|
||||
|
||||
pinfo->num_planar = 0;
|
||||
pinfo->vis_data.collection = NULL;
|
||||
pinfo->vis_data.collection = nullptr;
|
||||
pinfo->do_grid_update = false;
|
||||
pinfo->do_cube_update = false;
|
||||
|
||||
{
|
||||
DRW_PASS_CREATE(psl->probe_background, DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_EQUAL);
|
||||
|
||||
DRWShadingGroup *grp = NULL;
|
||||
DRWShadingGroup *grp = nullptr;
|
||||
EEVEE_lookdev_cache_init(vedata, sldata, psl->probe_background, pinfo, &grp);
|
||||
|
||||
if (grp == NULL) {
|
||||
if (grp == nullptr) {
|
||||
Scene *scene = draw_ctx->scene;
|
||||
World *world = (scene->world) ? scene->world : EEVEE_world_default_get();
|
||||
|
||||
const int options = VAR_WORLD_BACKGROUND | VAR_WORLD_PROBE;
|
||||
GPUMaterial *gpumat = EEVEE_material_get(vedata, scene, NULL, world, options);
|
||||
GPUMaterial *gpumat = EEVEE_material_get(vedata, scene, nullptr, world, options);
|
||||
|
||||
grp = DRW_shgroup_material_create(gpumat, psl->probe_background);
|
||||
DRW_shgroup_uniform_float_copy(grp, "backgroundAlpha", 1.0f);
|
||||
@@ -321,7 +334,7 @@ void EEVEE_lightprobes_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedat
|
||||
DRW_shgroup_uniform_block(grp, "light_block", sldata->light_ubo);
|
||||
DRW_shgroup_uniform_block(grp, "shadow_block", sldata->shadow_ubo);
|
||||
DRW_shgroup_uniform_block_ref(grp, "renderpass_block", &stl->g_data->renderpass_ubo);
|
||||
DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), NULL);
|
||||
DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), nullptr);
|
||||
}
|
||||
|
||||
if (DRW_state_draw_support()) {
|
||||
@@ -346,7 +359,7 @@ void EEVEE_lightprobes_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedat
|
||||
DRW_shgroup_uniform_block(grp, "grid_block", sldata->grid_ubo);
|
||||
DRW_shgroup_uniform_block(grp, "renderpass_block", sldata->renderpass_ubo.combined);
|
||||
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, cube_len * 2);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, cube_len * 2);
|
||||
}
|
||||
|
||||
/* Grid Display */
|
||||
@@ -372,7 +385,7 @@ void EEVEE_lightprobes_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedat
|
||||
DRW_shgroup_uniform_block(shgrp, "common_block", sldata->common_ubo);
|
||||
DRW_shgroup_uniform_block(shgrp, "renderpass_block", sldata->renderpass_ubo.combined);
|
||||
int tri_count = egrid->resolution[0] * egrid->resolution[1] * egrid->resolution[2] * 2;
|
||||
DRW_shgroup_call_procedural_triangles(shgrp, NULL, tri_count);
|
||||
DRW_shgroup_call_procedural_triangles(shgrp, nullptr, tri_count);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -395,7 +408,7 @@ void EEVEE_lightprobes_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedat
|
||||
}
|
||||
}
|
||||
else {
|
||||
stl->g_data->planar_display_shgrp = NULL;
|
||||
stl->g_data->planar_display_shgrp = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -652,9 +665,9 @@ static void lightbake_planar_ensure_view(EEVEE_PlanarReflection *eplanar,
|
||||
/* Reflect Camera Matrix. */
|
||||
mul_m4_m4m4(viewmat, viewmat, eplanar->mtx);
|
||||
|
||||
if (*r_planar_view == NULL) {
|
||||
if (*r_planar_view == nullptr) {
|
||||
*r_planar_view = DRW_view_create(
|
||||
viewmat, winmat, NULL, NULL, EEVEE_lightprobes_obj_visibility_cb);
|
||||
viewmat, winmat, nullptr, nullptr, EEVEE_lightprobes_obj_visibility_cb);
|
||||
/* Compute offset plane equation (fix missing texels near reflection plane). */
|
||||
float clip_plane[4];
|
||||
copy_v4_v4(clip_plane, eplanar->plane_equation);
|
||||
@@ -663,7 +676,7 @@ static void lightbake_planar_ensure_view(EEVEE_PlanarReflection *eplanar,
|
||||
DRW_view_clip_planes_set(*r_planar_view, &clip_plane, 1);
|
||||
}
|
||||
else {
|
||||
DRW_view_update(*r_planar_view, viewmat, winmat, NULL, NULL);
|
||||
DRW_view_update(*r_planar_view, viewmat, winmat, nullptr, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -717,7 +730,7 @@ void EEVEE_lightprobes_cache_finish(EEVEE_ViewLayerData *sldata, EEVEE_Data *ved
|
||||
|
||||
if (draw_ctx->scene->eevee.flag & SCE_EEVEE_GI_AUTOBAKE) {
|
||||
Scene *scene_orig = DEG_get_input_scene(draw_ctx->depsgraph);
|
||||
if (scene_orig->eevee.light_cache_data != NULL) {
|
||||
if (scene_orig->eevee.light_cache_data != nullptr) {
|
||||
if (pinfo->do_grid_update) {
|
||||
scene_orig->eevee.light_cache_data->flag |= LIGHTCACHE_UPDATE_GRID;
|
||||
}
|
||||
@@ -742,7 +755,7 @@ void EEVEE_lightprobes_cache_finish(EEVEE_ViewLayerData *sldata, EEVEE_Data *ved
|
||||
|
||||
DRW_shgroup_uniform_texture_ref(grp, "source", &txl->planar_pool);
|
||||
DRW_shgroup_uniform_float(grp, "fireflyFactor", &sldata->common_data.ssr_firefly_fac, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, pinfo->num_planar);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, pinfo->num_planar);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -750,11 +763,11 @@ void EEVEE_lightprobes_cache_finish(EEVEE_ViewLayerData *sldata, EEVEE_Data *ved
|
||||
/** \name Rendering
|
||||
* \{ */
|
||||
|
||||
typedef struct EEVEE_BakeRenderData {
|
||||
struct EEVEE_BakeRenderData {
|
||||
EEVEE_Data *vedata;
|
||||
EEVEE_ViewLayerData *sldata;
|
||||
GPUFrameBuffer **face_fb; /* should contain 6 framebuffer */
|
||||
} EEVEE_BakeRenderData;
|
||||
};
|
||||
|
||||
static void render_cubemap(void (*callback)(int face, EEVEE_BakeRenderData *user_data),
|
||||
EEVEE_BakeRenderData *user_data,
|
||||
@@ -776,15 +789,15 @@ static void render_cubemap(void (*callback)(int face, EEVEE_BakeRenderData *user
|
||||
mul_m4_m4m4(viewmat, cubefacemat[i], viewmat);
|
||||
|
||||
if (do_culling) {
|
||||
if (views[i] == NULL) {
|
||||
views[i] = DRW_view_create(viewmat, winmat, NULL, NULL, NULL);
|
||||
if (views[i] == nullptr) {
|
||||
views[i] = DRW_view_create(viewmat, winmat, nullptr, nullptr, nullptr);
|
||||
}
|
||||
else {
|
||||
DRW_view_update(views[i], viewmat, winmat, NULL, NULL);
|
||||
DRW_view_update(views[i], viewmat, winmat, nullptr, nullptr);
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (views[i] == NULL) {
|
||||
if (views[i] == nullptr) {
|
||||
const DRWView *default_view = DRW_view_default_get();
|
||||
views[i] = DRW_view_create_sub(default_view, viewmat, winmat);
|
||||
}
|
||||
@@ -834,16 +847,15 @@ static void lightbake_render_world_face(int face, EEVEE_BakeRenderData *user_dat
|
||||
DRW_draw_pass(psl->probe_background);
|
||||
}
|
||||
|
||||
void EEVEE_lightbake_render_world(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
void EEVEE_lightbake_render_world(EEVEE_ViewLayerData * /*sldata*/,
|
||||
EEVEE_Data *vedata,
|
||||
GPUFrameBuffer *face_fb[6])
|
||||
{
|
||||
EEVEE_BakeRenderData brdata = {
|
||||
.vedata = vedata,
|
||||
.face_fb = face_fb,
|
||||
};
|
||||
EEVEE_BakeRenderData brdata{};
|
||||
brdata.vedata = vedata;
|
||||
brdata.face_fb = face_fb;
|
||||
|
||||
render_cubemap(lightbake_render_world_face, &brdata, (float[3]){0.0f}, 1.0f, 10.0f, false);
|
||||
render_cubemap(lightbake_render_world_face, &brdata, blender::float3(0.0f), 1.0f, 10.0f, false);
|
||||
}
|
||||
|
||||
static void lightbake_render_scene_face(int face, EEVEE_BakeRenderData *user_data)
|
||||
@@ -875,11 +887,10 @@ void EEVEE_lightbake_render_scene(EEVEE_ViewLayerData *sldata,
|
||||
float near_clip,
|
||||
float far_clip)
|
||||
{
|
||||
EEVEE_BakeRenderData brdata = {
|
||||
.vedata = vedata,
|
||||
.sldata = sldata,
|
||||
.face_fb = face_fb,
|
||||
};
|
||||
EEVEE_BakeRenderData brdata{};
|
||||
brdata.vedata = vedata;
|
||||
brdata.sldata = sldata;
|
||||
brdata.face_fb = face_fb;
|
||||
|
||||
render_cubemap(lightbake_render_scene_face, &brdata, pos, near_clip, far_clip, true);
|
||||
}
|
||||
@@ -955,10 +966,9 @@ static void lightbake_render_scene_reflected(int layer, EEVEE_BakeRenderData *us
|
||||
static void eevee_lightbake_render_scene_to_planars(EEVEE_ViewLayerData *sldata,
|
||||
EEVEE_Data *vedata)
|
||||
{
|
||||
EEVEE_BakeRenderData brdata = {
|
||||
.vedata = vedata,
|
||||
.sldata = sldata,
|
||||
};
|
||||
EEVEE_BakeRenderData brdata{};
|
||||
brdata.vedata = vedata;
|
||||
brdata.sldata = sldata;
|
||||
|
||||
render_reflections(lightbake_render_scene_reflected,
|
||||
&brdata,
|
||||
@@ -1075,8 +1085,8 @@ void EEVEE_lightbake_filter_diffuse(EEVEE_ViewLayerData *sldata,
|
||||
|
||||
pinfo->intensity_fac = intensity;
|
||||
|
||||
/* Find cell position on the virtual 3D texture. */
|
||||
/* NOTE: Keep in sync with `load_irradiance_cell()`. */
|
||||
/* Find cell position on the virtual 3D texture. */
|
||||
/* NOTE: Keep in sync with `load_irradiance_cell()`. */
|
||||
#if defined(IRRADIANCE_SH_L2)
|
||||
int size[2] = {3, 3};
|
||||
#elif defined(IRRADIANCE_HL2)
|
||||
@@ -1114,7 +1124,7 @@ void EEVEE_lightbake_filter_diffuse(EEVEE_ViewLayerData *sldata,
|
||||
|
||||
void EEVEE_lightbake_filter_visibility(EEVEE_ViewLayerData *sldata,
|
||||
EEVEE_Data *vedata,
|
||||
GPUTexture *UNUSED(rt_depth),
|
||||
GPUTexture * /*rt_depth*/,
|
||||
GPUFrameBuffer *fb,
|
||||
int grid_offset,
|
||||
float clipsta,
|
||||
@@ -1222,7 +1232,7 @@ void EEVEE_lightprobes_refresh_planar(EEVEE_ViewLayerData *sldata, EEVEE_Data *v
|
||||
eevee_lightbake_render_scene_to_planars(sldata, vedata);
|
||||
|
||||
/* Make sure no additional visibility check runs after this. */
|
||||
pinfo->vis_data.collection = NULL;
|
||||
pinfo->vis_data.collection = nullptr;
|
||||
|
||||
GPU_uniformbuf_update(sldata->planar_ubo, &sldata->probes->planar_data);
|
||||
|
||||
@@ -60,25 +60,27 @@ static void eevee_lookdev_hdri_preview_init(EEVEE_Data *vedata, EEVEE_ViewLayerD
|
||||
|
||||
{
|
||||
Material *ma = EEVEE_material_default_diffuse_get();
|
||||
GPUMaterial *gpumat = EEVEE_material_get(vedata, scene, ma, NULL, mat_options);
|
||||
GPUMaterial *gpumat = EEVEE_material_get(vedata, scene, ma, nullptr, mat_options);
|
||||
GPUShader *sh = GPU_material_get_shader(gpumat);
|
||||
|
||||
DRW_PASS_CREATE(psl->lookdev_diffuse_pass, state);
|
||||
grp = DRW_shgroup_create(sh, psl->lookdev_diffuse_pass);
|
||||
EEVEE_material_bind_resources(grp, gpumat, sldata, vedata, NULL, NULL, -1.0f, false, false);
|
||||
EEVEE_material_bind_resources(
|
||||
grp, gpumat, sldata, vedata, nullptr, nullptr, -1.0f, false, false);
|
||||
DRW_shgroup_add_material_resources(grp, gpumat);
|
||||
DRW_shgroup_call(grp, sphere, NULL);
|
||||
DRW_shgroup_call(grp, sphere, nullptr);
|
||||
}
|
||||
{
|
||||
Material *ma = EEVEE_material_default_glossy_get();
|
||||
GPUMaterial *gpumat = EEVEE_material_get(vedata, scene, ma, NULL, mat_options);
|
||||
GPUMaterial *gpumat = EEVEE_material_get(vedata, scene, ma, nullptr, mat_options);
|
||||
GPUShader *sh = GPU_material_get_shader(gpumat);
|
||||
|
||||
DRW_PASS_CREATE(psl->lookdev_glossy_pass, state);
|
||||
grp = DRW_shgroup_create(sh, psl->lookdev_glossy_pass);
|
||||
EEVEE_material_bind_resources(grp, gpumat, sldata, vedata, NULL, NULL, -1.0f, false, false);
|
||||
EEVEE_material_bind_resources(
|
||||
grp, gpumat, sldata, vedata, nullptr, nullptr, -1.0f, false, false);
|
||||
DRW_shgroup_add_material_resources(grp, gpumat);
|
||||
DRW_shgroup_call(grp, sphere, NULL);
|
||||
DRW_shgroup_call(grp, sphere, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,7 +89,7 @@ void EEVEE_lookdev_init(EEVEE_Data *vedata)
|
||||
EEVEE_StorageList *stl = vedata->stl;
|
||||
EEVEE_EffectsInfo *effects = stl->effects;
|
||||
const DRWContextState *draw_ctx = DRW_context_state_get();
|
||||
/* The view will be NULL when rendering previews. */
|
||||
/* The view will be nullptr when rendering previews. */
|
||||
const View3D *v3d = draw_ctx->v3d;
|
||||
|
||||
if (eevee_hdri_preview_overlay_enabled(v3d)) {
|
||||
@@ -149,13 +151,13 @@ void EEVEE_lookdev_cache_init(EEVEE_Data *vedata,
|
||||
EEVEE_EffectsInfo *effects = stl->effects;
|
||||
EEVEE_PrivateData *g_data = stl->g_data;
|
||||
const DRWContextState *draw_ctx = DRW_context_state_get();
|
||||
/* The view will be NULL when rendering previews. */
|
||||
/* The view will be nullptr when rendering previews. */
|
||||
const View3D *v3d = draw_ctx->v3d;
|
||||
const Scene *scene = draw_ctx->scene;
|
||||
|
||||
const bool probe_render = pinfo != NULL;
|
||||
const bool probe_render = pinfo != nullptr;
|
||||
|
||||
effects->lookdev_view = NULL;
|
||||
effects->lookdev_view = nullptr;
|
||||
|
||||
if (eevee_hdri_preview_overlay_enabled(v3d)) {
|
||||
eevee_lookdev_hdri_preview_init(vedata, sldata);
|
||||
@@ -165,7 +167,7 @@ void EEVEE_lookdev_cache_init(EEVEE_Data *vedata,
|
||||
const View3DShading *shading = &v3d->shading;
|
||||
StudioLight *sl = BKE_studiolight_find(shading->lookdev_light,
|
||||
STUDIOLIGHT_ORIENTATIONS_MATERIAL_MODE);
|
||||
if (sl == NULL || (sl->flag & STUDIOLIGHT_TYPE_WORLD) == 0) {
|
||||
if (sl == nullptr || (sl->flag & STUDIOLIGHT_TYPE_WORLD) == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -176,14 +178,14 @@ void EEVEE_lookdev_cache_init(EEVEE_Data *vedata,
|
||||
int cube_res = scene_eval->eevee.gi_cubemap_resolution;
|
||||
|
||||
/* If one of the component is missing we start from scratch. */
|
||||
if ((stl->lookdev_grid_data == NULL) || (stl->lookdev_cube_data == NULL) ||
|
||||
(txl->lookdev_grid_tx == NULL) || (txl->lookdev_cube_tx == NULL) ||
|
||||
if ((stl->lookdev_grid_data == nullptr) || (stl->lookdev_cube_data == nullptr) ||
|
||||
(txl->lookdev_grid_tx == nullptr) || (txl->lookdev_cube_tx == nullptr) ||
|
||||
(g_data->light_cache && g_data->light_cache->ref_res != cube_res))
|
||||
{
|
||||
eevee_lookdev_lightcache_delete(vedata);
|
||||
}
|
||||
|
||||
if (stl->lookdev_lightcache == NULL) {
|
||||
if (stl->lookdev_lightcache == nullptr) {
|
||||
#if defined(IRRADIANCE_SH_L2)
|
||||
int grid_res = 4;
|
||||
#elif defined(IRRADIANCE_HL2)
|
||||
@@ -191,7 +193,7 @@ void EEVEE_lookdev_cache_init(EEVEE_Data *vedata,
|
||||
#endif
|
||||
|
||||
stl->lookdev_lightcache = EEVEE_lightcache_create(
|
||||
1, 1, cube_res, 8, (int[3]){grid_res, grid_res, 1});
|
||||
1, 1, cube_res, 8, blender::int3{grid_res, grid_res, 1});
|
||||
|
||||
/* XXX: Fix memleak. TODO: find out why. */
|
||||
MEM_SAFE_FREE(stl->lookdev_cube_mips);
|
||||
@@ -218,7 +220,7 @@ void EEVEE_lookdev_cache_init(EEVEE_Data *vedata,
|
||||
float view_matrix[4][4];
|
||||
float view_rot_matrix[3][3];
|
||||
float x_rot_matrix[3][3];
|
||||
DRW_view_viewmat_get(NULL, view_matrix, false);
|
||||
DRW_view_viewmat_get(nullptr, view_matrix, false);
|
||||
copy_m3_m4(view_rot_matrix, view_matrix);
|
||||
axis_angle_to_mat3_single(x_rot_matrix, 'X', M_PI_2);
|
||||
mul_m3_m3m3(view_rot_matrix, x_rot_matrix, view_rot_matrix);
|
||||
@@ -322,7 +324,7 @@ void EEVEE_lookdev_draw(EEVEE_Data *vedata)
|
||||
eevee_lookdev_apply_taa(effects, effects->sphere_size, winmat);
|
||||
|
||||
/* "Remove" view matrix location. Leaving only rotation. */
|
||||
DRW_view_viewmat_get(NULL, viewmat, false);
|
||||
DRW_view_viewmat_get(nullptr, viewmat, false);
|
||||
zero_v3(viewmat[3]);
|
||||
|
||||
if (effects->lookdev_view) {
|
||||
@@ -346,7 +348,7 @@ void EEVEE_lookdev_draw(EEVEE_Data *vedata)
|
||||
GPU_framebuffer_bind(fb);
|
||||
|
||||
const int sphere_margin = effects->sphere_size / 6.0f;
|
||||
float offset[2] = {0.0f, sphere_margin};
|
||||
float offset[2] = {0.0f, float(sphere_margin)};
|
||||
|
||||
offset[0] = effects->sphere_size + sphere_margin;
|
||||
GPU_framebuffer_viewport_set(fb,
|
||||
@@ -371,6 +373,6 @@ void EEVEE_lookdev_draw(EEVEE_Data *vedata)
|
||||
|
||||
DRW_stats_group_end();
|
||||
|
||||
DRW_view_set_active(NULL);
|
||||
DRW_view_set_active(nullptr);
|
||||
}
|
||||
}
|
||||
@@ -26,10 +26,11 @@ float *EEVEE_lut_update_ggx_brdf(int lut_size)
|
||||
DRWPass *pass = DRW_pass_create(__func__, DRW_STATE_WRITE_COLOR);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(EEVEE_shaders_ggx_lut_sh_get(), pass);
|
||||
DRW_shgroup_uniform_float_copy(grp, "sampleCount", 64.0f); /* Actual sample count is squared. */
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
|
||||
GPUTexture *tex = DRW_texture_create_2d(lut_size, lut_size, GPU_RG16F, 0, NULL);
|
||||
GPUFrameBuffer *fb = NULL;
|
||||
GPUTexture *tex = DRW_texture_create_2d(
|
||||
lut_size, lut_size, GPU_RG16F, DRWTextureFlag(0), nullptr);
|
||||
GPUFrameBuffer *fb = nullptr;
|
||||
GPU_framebuffer_ensure_config(&fb,
|
||||
{
|
||||
GPU_ATTACHMENT_NONE,
|
||||
@@ -39,7 +40,7 @@ float *EEVEE_lut_update_ggx_brdf(int lut_size)
|
||||
DRW_draw_pass(pass);
|
||||
GPU_FRAMEBUFFER_FREE_SAFE(fb);
|
||||
|
||||
float *data = GPU_texture_read(tex, GPU_DATA_FLOAT, 0);
|
||||
float *data = static_cast<float *>(GPU_texture_read(tex, GPU_DATA_FLOAT, 0));
|
||||
GPU_texture_free(tex);
|
||||
#if DO_FILE_OUTPUT
|
||||
/* Content is to be put inside eevee_lut.c */
|
||||
@@ -65,10 +66,11 @@ float *EEVEE_lut_update_ggx_btdf(int lut_size, int lut_depth)
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(EEVEE_shaders_ggx_refraction_lut_sh_get(), pass);
|
||||
DRW_shgroup_uniform_float_copy(grp, "sampleCount", 64.0f); /* Actual sample count is squared. */
|
||||
DRW_shgroup_uniform_float(grp, "z_factor", &roughness, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
|
||||
GPUTexture *tex = DRW_texture_create_2d_array(lut_size, lut_size, lut_depth, GPU_RG16F, 0, NULL);
|
||||
GPUFrameBuffer *fb = NULL;
|
||||
GPUTexture *tex = DRW_texture_create_2d_array(
|
||||
lut_size, lut_size, lut_depth, GPU_RG16F, DRWTextureFlag(0), nullptr);
|
||||
GPUFrameBuffer *fb = nullptr;
|
||||
for (int i = 0; i < lut_depth; i++) {
|
||||
GPU_framebuffer_ensure_config(&fb,
|
||||
{
|
||||
@@ -82,7 +84,7 @@ float *EEVEE_lut_update_ggx_btdf(int lut_size, int lut_depth)
|
||||
|
||||
GPU_FRAMEBUFFER_FREE_SAFE(fb);
|
||||
|
||||
float *data = GPU_texture_read(tex, GPU_DATA_FLOAT, 0);
|
||||
float *data = static_cast<float *>(GPU_texture_read(tex, GPU_DATA_FLOAT, 0));
|
||||
GPU_texture_free(tex);
|
||||
|
||||
#if DO_FILE_OUTPUT
|
||||
@@ -41,7 +41,7 @@ static struct {
|
||||
GPUTexture *noise_tex;
|
||||
|
||||
float noise_offsets[3];
|
||||
} e_data = {NULL}; /* Engine data */
|
||||
} e_data = {nullptr}; /* Engine data */
|
||||
|
||||
typedef struct EeveeMaterialCache {
|
||||
DRWShadingGroup *depth_grp;
|
||||
@@ -137,7 +137,8 @@ void EEVEE_material_bind_resources(DRWShadingGroup *shgrp,
|
||||
|
||||
static void eevee_init_noise_texture(void)
|
||||
{
|
||||
e_data.noise_tex = DRW_texture_create_2d(64, 64, GPU_RGBA16F, 0, (float *)blue_noise);
|
||||
e_data.noise_tex = DRW_texture_create_2d(
|
||||
64, 64, GPU_RGBA16F, DRWTextureFlag(0), (float *)blue_noise);
|
||||
}
|
||||
|
||||
#define RUNTIME_LUT_CREATION 0
|
||||
@@ -145,7 +146,8 @@ static void eevee_init_noise_texture(void)
|
||||
static void eevee_init_util_texture(void)
|
||||
{
|
||||
const int layers = 4 + 16;
|
||||
float(*texels)[4] = MEM_mallocN(sizeof(float[4]) * 64 * 64 * layers, "utils texels");
|
||||
float(*texels)[4] = static_cast<float(*)[4]>(
|
||||
MEM_mallocN(sizeof(float[4]) * 64 * 64 * layers, "utils texels"));
|
||||
float(*texels_layer)[4] = texels;
|
||||
#if RUNTIME_LUT_CREATION
|
||||
float *bsdf_ggx_lut = EEVEE_lut_update_ggx_brdf(64);
|
||||
@@ -199,8 +201,13 @@ static void eevee_init_util_texture(void)
|
||||
}
|
||||
|
||||
eGPUTextureUsage util_usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_SHADER_READ;
|
||||
e_data.util_tex = DRW_texture_create_2d_array_ex(
|
||||
64, 64, layers, GPU_RGBA16F, util_usage, DRW_TEX_FILTER | DRW_TEX_WRAP, (float *)texels);
|
||||
e_data.util_tex = DRW_texture_create_2d_array_ex(64,
|
||||
64,
|
||||
layers,
|
||||
GPU_RGBA16F,
|
||||
util_usage,
|
||||
DRWTextureFlag(DRW_TEX_FILTER | DRW_TEX_WRAP),
|
||||
(float *)texels);
|
||||
|
||||
MEM_freeN(texels);
|
||||
#if RUNTIME_LUT_CREATION
|
||||
@@ -261,33 +268,33 @@ void EEVEE_materials_init(EEVEE_ViewLayerData *sldata,
|
||||
|
||||
{
|
||||
/* Create RenderPass UBO */
|
||||
if (sldata->renderpass_ubo.combined == NULL) {
|
||||
if (sldata->renderpass_ubo.combined == nullptr) {
|
||||
EEVEE_RenderPassData data;
|
||||
data = (EEVEE_RenderPassData){true, true, true, true, true, false, false, false, 0};
|
||||
data = EEVEE_RenderPassData{true, true, true, true, true, false, false, false, 0};
|
||||
sldata->renderpass_ubo.combined = GPU_uniformbuf_create_ex(
|
||||
sizeof(data), &data, "renderpass_ubo.combined");
|
||||
|
||||
data = (EEVEE_RenderPassData){true, false, false, false, false, true, false, false, 0};
|
||||
data = EEVEE_RenderPassData{true, false, false, false, false, true, false, false, 0};
|
||||
sldata->renderpass_ubo.diff_color = GPU_uniformbuf_create_ex(
|
||||
sizeof(data), &data, "renderpass_ubo.diff_color");
|
||||
|
||||
data = (EEVEE_RenderPassData){true, true, false, false, false, false, false, false, 0};
|
||||
data = EEVEE_RenderPassData{true, true, false, false, false, false, false, false, 0};
|
||||
sldata->renderpass_ubo.diff_light = GPU_uniformbuf_create_ex(
|
||||
sizeof(data), &data, "renderpass_ubo.diff_light");
|
||||
|
||||
data = (EEVEE_RenderPassData){false, false, true, false, false, false, false, false, 0};
|
||||
data = EEVEE_RenderPassData{false, false, true, false, false, false, false, false, 0};
|
||||
sldata->renderpass_ubo.spec_color = GPU_uniformbuf_create_ex(
|
||||
sizeof(data), &data, "renderpass_ubo.spec_color");
|
||||
|
||||
data = (EEVEE_RenderPassData){false, false, true, true, false, false, false, false, 0};
|
||||
data = EEVEE_RenderPassData{false, false, true, true, false, false, false, false, 0};
|
||||
sldata->renderpass_ubo.spec_light = GPU_uniformbuf_create_ex(
|
||||
sizeof(data), &data, "renderpass_ubo.spec_light");
|
||||
|
||||
data = (EEVEE_RenderPassData){false, false, false, false, true, false, false, false, 0};
|
||||
data = EEVEE_RenderPassData{false, false, false, false, true, false, false, false, 0};
|
||||
sldata->renderpass_ubo.emit = GPU_uniformbuf_create_ex(
|
||||
sizeof(data), &data, "renderpass_ubo.emit");
|
||||
|
||||
data = (EEVEE_RenderPassData){true, true, true, true, true, false, true, false, 0};
|
||||
data = EEVEE_RenderPassData{true, true, true, true, true, false, true, false, 0};
|
||||
sldata->renderpass_ubo.environment = GPU_uniformbuf_create_ex(
|
||||
sizeof(data), &data, "renderpass_ubo.environment");
|
||||
}
|
||||
@@ -349,7 +356,7 @@ void EEVEE_materials_init(EEVEE_ViewLayerData *sldata,
|
||||
Scene *scene = draw_ctx->scene;
|
||||
World *wo = scene->world;
|
||||
if (wo && wo->use_nodes) {
|
||||
EEVEE_material_get(vedata, scene, NULL, wo, VAR_WORLD_BACKGROUND);
|
||||
EEVEE_material_get(vedata, scene, nullptr, wo, VAR_WORLD_BACKGROUND);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -365,26 +372,26 @@ void EEVEE_materials_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
{
|
||||
stl->g_data->material_hash = BLI_ghash_ptr_new("Eevee_material ghash");
|
||||
|
||||
if (sldata->material_cache == NULL) {
|
||||
if (sldata->material_cache == nullptr) {
|
||||
sldata->material_cache = BLI_memblock_create(sizeof(EeveeMaterialCache));
|
||||
}
|
||||
else {
|
||||
BLI_memblock_clear(sldata->material_cache, NULL);
|
||||
BLI_memblock_clear(sldata->material_cache, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
DRW_PASS_CREATE(psl->background_ps, DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_EQUAL);
|
||||
|
||||
DRWShadingGroup *grp = NULL;
|
||||
EEVEE_lookdev_cache_init(vedata, sldata, psl->background_ps, NULL, &grp);
|
||||
DRWShadingGroup *grp = nullptr;
|
||||
EEVEE_lookdev_cache_init(vedata, sldata, psl->background_ps, nullptr, &grp);
|
||||
|
||||
if (grp == NULL) {
|
||||
if (grp == nullptr) {
|
||||
Scene *scene = draw_ctx->scene;
|
||||
World *world = (scene->world) ? scene->world : EEVEE_world_default_get();
|
||||
|
||||
const int options = VAR_WORLD_BACKGROUND;
|
||||
GPUMaterial *gpumat = EEVEE_material_get(vedata, scene, NULL, world, options);
|
||||
GPUMaterial *gpumat = EEVEE_material_get(vedata, scene, nullptr, world, options);
|
||||
|
||||
grp = DRW_shgroup_material_create(gpumat, psl->background_ps);
|
||||
DRW_shgroup_uniform_float(grp, "backgroundAlpha", &stl->g_data->background_alpha, 1);
|
||||
@@ -404,7 +411,7 @@ void EEVEE_materials_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
DRW_shgroup_uniform_texture_ref(grp, "probeCubes", &stl->g_data->light_cache->cube_tx.tex);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "irradianceGrid", &stl->g_data->light_cache->grid_tx.tex);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "maxzBuffer", &vedata->txl->maxzbuffer);
|
||||
DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), NULL);
|
||||
DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), nullptr);
|
||||
}
|
||||
|
||||
#define EEVEE_PASS_CREATE(pass, state) \
|
||||
@@ -448,10 +455,10 @@ void EEVEE_materials_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
psl->material_sss_ps,
|
||||
psl->material_sss_cull_ps,
|
||||
};
|
||||
DRWPass *first = NULL, *last = NULL;
|
||||
DRWPass *first = nullptr, *last = nullptr;
|
||||
for (int i = 0; i < ARRAY_SIZE(passes); i++) {
|
||||
DRWPass *pass = DRW_pass_create_instance("Renderpass Accumulation", passes[i], state);
|
||||
if (first == NULL) {
|
||||
if (first == nullptr) {
|
||||
first = last = pass;
|
||||
}
|
||||
else {
|
||||
@@ -474,7 +481,7 @@ void EEVEE_materials_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, psl->update_noise_pass);
|
||||
DRW_shgroup_uniform_texture(grp, "blueNoise", e_data.noise_tex);
|
||||
DRW_shgroup_uniform_vec3(grp, "offsets", e_data.noise_offsets, 1);
|
||||
DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), NULL);
|
||||
DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -499,7 +506,7 @@ BLI_INLINE void material_shadow(EEVEE_Data *vedata,
|
||||
SET_FLAG_FROM_TEST(mat_options, use_shadow_shader, VAR_MAT_HASH);
|
||||
SET_FLAG_FROM_TEST(mat_options, is_hair, VAR_MAT_HAIR);
|
||||
GPUMaterial *gpumat = (use_shadow_shader) ?
|
||||
EEVEE_material_get(vedata, scene, ma, NULL, mat_options) :
|
||||
EEVEE_material_get(vedata, scene, ma, nullptr, mat_options) :
|
||||
EEVEE_material_default_get(scene, ma, mat_options);
|
||||
|
||||
/* Avoid possible confusion with depth pre-pass options. */
|
||||
@@ -519,7 +526,7 @@ BLI_INLINE void material_shadow(EEVEE_Data *vedata,
|
||||
else {
|
||||
*grp_p = grp = DRW_shgroup_create(sh, psl->shadow_pass);
|
||||
EEVEE_material_bind_resources(
|
||||
grp, gpumat, sldata, vedata, NULL, NULL, alpha_clip_threshold, false, false);
|
||||
grp, gpumat, sldata, vedata, nullptr, nullptr, alpha_clip_threshold, false, false);
|
||||
}
|
||||
|
||||
DRW_shgroup_add_material_resources(grp, gpumat);
|
||||
@@ -528,8 +535,8 @@ BLI_INLINE void material_shadow(EEVEE_Data *vedata,
|
||||
emc->shadow_grp_p = grp_p;
|
||||
}
|
||||
else {
|
||||
emc->shadow_grp = NULL;
|
||||
emc->shadow_grp_p = NULL;
|
||||
emc->shadow_grp = nullptr;
|
||||
emc->shadow_grp_p = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -560,7 +567,7 @@ static EeveeMaterialCache material_opaque(EEVEE_Data *vedata,
|
||||
return **emc_p;
|
||||
}
|
||||
|
||||
*emc_p = emc = BLI_memblock_alloc(sldata->material_cache);
|
||||
*emc_p = emc = static_cast<EeveeMaterialCache *>(BLI_memblock_alloc(sldata->material_cache));
|
||||
|
||||
material_shadow(vedata, sldata, ma, is_hair, emc);
|
||||
|
||||
@@ -571,13 +578,13 @@ static EeveeMaterialCache material_opaque(EEVEE_Data *vedata,
|
||||
SET_FLAG_FROM_TEST(mat_options, use_depth_shader, VAR_MAT_HASH);
|
||||
SET_FLAG_FROM_TEST(mat_options, is_hair, VAR_MAT_HAIR);
|
||||
GPUMaterial *gpumat = (use_depth_shader) ?
|
||||
EEVEE_material_get(vedata, scene, ma, NULL, mat_options) :
|
||||
EEVEE_material_get(vedata, scene, ma, nullptr, mat_options) :
|
||||
EEVEE_material_default_get(scene, ma, mat_options);
|
||||
|
||||
int option = 0;
|
||||
SET_FLAG_FROM_TEST(option, do_cull, KEY_CULL);
|
||||
SET_FLAG_FROM_TEST(option, use_ssrefract, KEY_REFRACT);
|
||||
DRWPass *depth_ps = (DRWPass *[]){
|
||||
DRWPass *depth_ps = std::array{
|
||||
psl->depth_ps,
|
||||
psl->depth_cull_ps,
|
||||
psl->depth_refract_ps,
|
||||
@@ -599,7 +606,7 @@ static EeveeMaterialCache material_opaque(EEVEE_Data *vedata,
|
||||
else {
|
||||
*grp_p = grp = DRW_shgroup_create(sh, depth_ps);
|
||||
EEVEE_material_bind_resources(
|
||||
grp, gpumat, sldata, vedata, NULL, NULL, alpha_clip_threshold, false, false);
|
||||
grp, gpumat, sldata, vedata, nullptr, nullptr, alpha_clip_threshold, false, false);
|
||||
}
|
||||
|
||||
DRW_shgroup_add_material_resources(grp, gpumat);
|
||||
@@ -612,12 +619,12 @@ static EeveeMaterialCache material_opaque(EEVEE_Data *vedata,
|
||||
int mat_options = VAR_MAT_MESH;
|
||||
SET_FLAG_FROM_TEST(mat_options, use_ssrefract, VAR_MAT_REFRACT);
|
||||
SET_FLAG_FROM_TEST(mat_options, is_hair, VAR_MAT_HAIR);
|
||||
GPUMaterial *gpumat = EEVEE_material_get(vedata, scene, ma, NULL, mat_options);
|
||||
GPUMaterial *gpumat = EEVEE_material_get(vedata, scene, ma, nullptr, mat_options);
|
||||
const bool use_sss = GPU_material_flag_get(gpumat, GPU_MATFLAG_SUBSURFACE);
|
||||
|
||||
int ssr_id = (((effects->enabled_effects & EFFECT_SSR) != 0) && !use_ssrefract) ? 1 : 0;
|
||||
int option = (use_ssrefract ? 0 : (use_sss ? 1 : 2)) * 2 + do_cull;
|
||||
DRWPass *shading_pass = (DRWPass *[]){
|
||||
DRWPass *shading_pass = std::array{
|
||||
psl->material_refract_ps,
|
||||
psl->material_refract_cull_ps,
|
||||
psl->material_sss_ps,
|
||||
@@ -697,16 +704,17 @@ static EeveeMaterialCache material_transparent(EEVEE_Data *vedata,
|
||||
if (use_prepass) {
|
||||
/* Depth prepass */
|
||||
int mat_options = VAR_MAT_MESH | VAR_MAT_DEPTH;
|
||||
GPUMaterial *gpumat = EEVEE_material_get(vedata, scene, ma, NULL, mat_options);
|
||||
GPUMaterial *gpumat = EEVEE_material_get(vedata, scene, ma, nullptr, mat_options);
|
||||
GPUShader *sh = GPU_material_get_shader(gpumat);
|
||||
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, psl->transparent_pass);
|
||||
|
||||
EEVEE_material_bind_resources(grp, gpumat, sldata, vedata, NULL, NULL, -1.0f, false, true);
|
||||
EEVEE_material_bind_resources(
|
||||
grp, gpumat, sldata, vedata, nullptr, nullptr, -1.0f, false, true);
|
||||
DRW_shgroup_add_material_resources(grp, gpumat);
|
||||
|
||||
cur_state = DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS_EQUAL;
|
||||
cur_state |= (do_cull) ? DRW_STATE_CULL_BACK : 0;
|
||||
cur_state |= (do_cull) ? DRW_STATE_CULL_BACK : DRWState(0);
|
||||
|
||||
DRW_shgroup_state_disable(grp, all_state);
|
||||
DRW_shgroup_state_enable(grp, cur_state);
|
||||
@@ -718,7 +726,7 @@ static EeveeMaterialCache material_transparent(EEVEE_Data *vedata,
|
||||
int ssr_id = -1; /* TODO: transparent SSR. */
|
||||
int mat_options = VAR_MAT_MESH | VAR_MAT_BLEND;
|
||||
SET_FLAG_FROM_TEST(mat_options, use_ssrefract, VAR_MAT_REFRACT);
|
||||
GPUMaterial *gpumat = EEVEE_material_get(vedata, scene, ma, NULL, mat_options);
|
||||
GPUMaterial *gpumat = EEVEE_material_get(vedata, scene, ma, nullptr, mat_options);
|
||||
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(GPU_material_get_shader(gpumat),
|
||||
psl->transparent_pass);
|
||||
@@ -729,7 +737,7 @@ static EeveeMaterialCache material_transparent(EEVEE_Data *vedata,
|
||||
|
||||
cur_state = DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_CUSTOM;
|
||||
cur_state |= (use_prepass) ? DRW_STATE_DEPTH_EQUAL : DRW_STATE_DEPTH_LESS_EQUAL;
|
||||
cur_state |= (do_cull) ? DRW_STATE_CULL_BACK : 0;
|
||||
cur_state |= (do_cull) ? DRW_STATE_CULL_BACK : DRWState(0);
|
||||
|
||||
/* Disable other blend modes and use the one we want. */
|
||||
DRW_shgroup_state_disable(grp, all_state);
|
||||
@@ -748,7 +756,7 @@ BLI_INLINE Material *eevee_object_material_get(Object *ob, int slot, bool holdou
|
||||
return BKE_material_default_holdout();
|
||||
}
|
||||
Material *ma = BKE_object_material_get_eval(ob, slot + 1);
|
||||
if (ma == NULL) {
|
||||
if (ma == nullptr) {
|
||||
if (ob->type == OB_VOLUME) {
|
||||
ma = BKE_material_default_volume();
|
||||
}
|
||||
@@ -862,7 +870,7 @@ void EEVEE_materials_cache_populate(EEVEE_Data *vedata,
|
||||
|
||||
if (mat_geom) {
|
||||
for (int i = 0; i < materials_len; i++) {
|
||||
if (mat_geom[i] == NULL) {
|
||||
if (mat_geom[i] == nullptr) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -875,7 +883,7 @@ void EEVEE_materials_cache_populate(EEVEE_Data *vedata,
|
||||
|
||||
/* XXX TODO: rewrite this to include the dupli objects.
|
||||
* This means we cannot exclude dupli objects from reflections!!! */
|
||||
EEVEE_ObjectEngineData *oedata = NULL;
|
||||
EEVEE_ObjectEngineData *oedata = nullptr;
|
||||
if ((ob->base_flag & BASE_FROM_DUPLI) == 0) {
|
||||
oedata = EEVEE_object_data_ensure(ob);
|
||||
oedata->ob = ob;
|
||||
@@ -885,7 +893,7 @@ void EEVEE_materials_cache_populate(EEVEE_Data *vedata,
|
||||
ADD_SHGROUP_CALL(matcache[i].shading_grp, ob, mat_geom[i], oedata);
|
||||
ADD_SHGROUP_CALL_SAFE(matcache[i].depth_grp, ob, mat_geom[i], oedata);
|
||||
ADD_SHGROUP_CALL_SAFE(matcache[i].shadow_grp, ob, mat_geom[i], oedata);
|
||||
*cast_shadow = *cast_shadow || (matcache[i].shadow_grp != NULL);
|
||||
*cast_shadow = *cast_shadow || (matcache[i].shadow_grp != nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -935,7 +943,7 @@ void EEVEE_particle_hair_cache_populate(EEVEE_Data *vedata,
|
||||
|
||||
if (matcache.depth_grp) {
|
||||
*matcache.depth_grp_p = DRW_shgroup_hair_create_sub(
|
||||
ob, psys, md, matcache.depth_grp, NULL);
|
||||
ob, psys, md, matcache.depth_grp, nullptr);
|
||||
}
|
||||
if (matcache.shading_grp) {
|
||||
*matcache.shading_grp_p = DRW_shgroup_hair_create_sub(
|
||||
@@ -943,7 +951,7 @@ void EEVEE_particle_hair_cache_populate(EEVEE_Data *vedata,
|
||||
}
|
||||
if (matcache.shadow_grp) {
|
||||
*matcache.shadow_grp_p = DRW_shgroup_hair_create_sub(
|
||||
ob, psys, md, matcache.shadow_grp, NULL);
|
||||
ob, psys, md, matcache.shadow_grp, nullptr);
|
||||
*cast_shadow = true;
|
||||
}
|
||||
|
||||
@@ -962,27 +970,27 @@ void EEVEE_object_curves_cache_populate(EEVEE_Data *vedata,
|
||||
vedata, sldata, ob, CURVES_MATERIAL_NR - 1, true);
|
||||
|
||||
if (matcache.depth_grp) {
|
||||
*matcache.depth_grp_p = DRW_shgroup_curves_create_sub(ob, matcache.depth_grp, NULL);
|
||||
*matcache.depth_grp_p = DRW_shgroup_curves_create_sub(ob, matcache.depth_grp, nullptr);
|
||||
}
|
||||
if (matcache.shading_grp) {
|
||||
*matcache.shading_grp_p = DRW_shgroup_curves_create_sub(
|
||||
ob, matcache.shading_grp, matcache.shading_gpumat);
|
||||
}
|
||||
if (matcache.shadow_grp) {
|
||||
*matcache.shadow_grp_p = DRW_shgroup_curves_create_sub(ob, matcache.shadow_grp, NULL);
|
||||
*matcache.shadow_grp_p = DRW_shgroup_curves_create_sub(ob, matcache.shadow_grp, nullptr);
|
||||
*cast_shadow = true;
|
||||
}
|
||||
|
||||
EEVEE_motion_blur_curves_cache_populate(sldata, vedata, ob);
|
||||
}
|
||||
|
||||
void EEVEE_materials_cache_finish(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
|
||||
void EEVEE_materials_cache_finish(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data *vedata)
|
||||
{
|
||||
EEVEE_PrivateData *pd = vedata->stl->g_data;
|
||||
EEVEE_EffectsInfo *effects = vedata->stl->effects;
|
||||
|
||||
BLI_ghash_free(pd->material_hash, NULL, NULL);
|
||||
pd->material_hash = NULL;
|
||||
BLI_ghash_free(pd->material_hash, nullptr, nullptr);
|
||||
pd->material_hash = nullptr;
|
||||
|
||||
SET_FLAG_FROM_TEST(effects->enabled_effects, effects->sss_surface_count > 0, EFFECT_SSS);
|
||||
}
|
||||
@@ -1014,7 +1022,7 @@ void EEVEE_material_renderpasses_init(EEVEE_Data *vedata)
|
||||
|
||||
static void material_renderpass_init(GPUTexture **output_tx, const eGPUTextureFormat format)
|
||||
{
|
||||
DRW_texture_ensure_fullscreen_2d(output_tx, format, 0);
|
||||
DRW_texture_ensure_fullscreen_2d(output_tx, format, DRWTextureFlag(0));
|
||||
}
|
||||
|
||||
void EEVEE_material_output_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata, uint tot_samples)
|
||||
@@ -1095,27 +1103,32 @@ void EEVEE_material_output_accumulate(EEVEE_ViewLayerData *sldata, EEVEE_Data *v
|
||||
EEVEE_EffectsInfo *effects = vedata->stl->effects;
|
||||
EEVEE_TextureList *txl = vedata->txl;
|
||||
|
||||
if (fbl->material_accum_fb != NULL) {
|
||||
if (fbl->material_accum_fb != nullptr) {
|
||||
DRWPass *material_accum_ps = psl->material_accum_ps;
|
||||
DRWPass *background_accum_ps = psl->background_accum_ps;
|
||||
if (pd->render_passes & EEVEE_RENDER_PASS_ENVIRONMENT) {
|
||||
material_renderpass_accumulate(effects,
|
||||
fbl,
|
||||
background_accum_ps,
|
||||
NULL,
|
||||
nullptr,
|
||||
pd,
|
||||
txl->env_accum,
|
||||
sldata->renderpass_ubo.environment);
|
||||
}
|
||||
if (pd->render_passes & EEVEE_RENDER_PASS_EMIT) {
|
||||
material_renderpass_accumulate(
|
||||
effects, fbl, material_accum_ps, NULL, pd, txl->emit_accum, sldata->renderpass_ubo.emit);
|
||||
material_renderpass_accumulate(effects,
|
||||
fbl,
|
||||
material_accum_ps,
|
||||
nullptr,
|
||||
pd,
|
||||
txl->emit_accum,
|
||||
sldata->renderpass_ubo.emit);
|
||||
}
|
||||
if (pd->render_passes & EEVEE_RENDER_PASS_DIFFUSE_COLOR) {
|
||||
material_renderpass_accumulate(effects,
|
||||
fbl,
|
||||
material_accum_ps,
|
||||
NULL,
|
||||
nullptr,
|
||||
pd,
|
||||
txl->diff_color_accum,
|
||||
sldata->renderpass_ubo.diff_color);
|
||||
@@ -1124,7 +1137,7 @@ void EEVEE_material_output_accumulate(EEVEE_ViewLayerData *sldata, EEVEE_Data *v
|
||||
material_renderpass_accumulate(effects,
|
||||
fbl,
|
||||
material_accum_ps,
|
||||
NULL,
|
||||
nullptr,
|
||||
pd,
|
||||
txl->diff_light_accum,
|
||||
sldata->renderpass_ubo.diff_light);
|
||||
@@ -1143,7 +1156,7 @@ void EEVEE_material_output_accumulate(EEVEE_ViewLayerData *sldata, EEVEE_Data *v
|
||||
material_renderpass_accumulate(effects,
|
||||
fbl,
|
||||
material_accum_ps,
|
||||
NULL,
|
||||
nullptr,
|
||||
pd,
|
||||
txl->spec_color_accum,
|
||||
sldata->renderpass_ubo.spec_color);
|
||||
@@ -1156,7 +1169,7 @@ void EEVEE_material_output_accumulate(EEVEE_ViewLayerData *sldata, EEVEE_Data *v
|
||||
material_renderpass_accumulate(effects,
|
||||
fbl,
|
||||
material_accum_ps,
|
||||
NULL,
|
||||
nullptr,
|
||||
pd,
|
||||
txl->spec_light_accum,
|
||||
sldata->renderpass_ubo.spec_light);
|
||||
@@ -1198,8 +1211,9 @@ void EEVEE_material_transparent_output_init(EEVEE_Data *vedata)
|
||||
/* Intermediate result to blend objects on. */
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT;
|
||||
DRW_texture_ensure_fullscreen_2d_ex(
|
||||
&txl->transparent_depth_tmp, GPU_DEPTH24_STENCIL8, usage, 0);
|
||||
DRW_texture_ensure_fullscreen_2d_ex(&txl->transparent_color_tmp, GPU_RGBA16F, usage, 0);
|
||||
&txl->transparent_depth_tmp, GPU_DEPTH24_STENCIL8, usage, DRWTextureFlag(0));
|
||||
DRW_texture_ensure_fullscreen_2d_ex(
|
||||
&txl->transparent_color_tmp, GPU_RGBA16F, usage, DRWTextureFlag(0));
|
||||
GPU_framebuffer_ensure_config(&fbl->transparent_rpass_fb,
|
||||
{GPU_ATTACHMENT_TEXTURE(txl->transparent_depth_tmp),
|
||||
GPU_ATTACHMENT_TEXTURE(txl->transparent_color_tmp)});
|
||||
@@ -1208,7 +1222,8 @@ void EEVEE_material_transparent_output_init(EEVEE_Data *vedata)
|
||||
const eGPUTextureFormat texture_format = (true) ? GPU_RGBA32F : GPU_RGBA16F;
|
||||
eGPUTextureUsage usage_accum = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_HOST_READ |
|
||||
GPU_TEXTURE_USAGE_ATTACHMENT;
|
||||
DRW_texture_ensure_fullscreen_2d_ex(&txl->transparent_accum, texture_format, usage_accum, 0);
|
||||
DRW_texture_ensure_fullscreen_2d_ex(
|
||||
&txl->transparent_accum, texture_format, usage_accum, DRWTextureFlag(0));
|
||||
GPU_framebuffer_ensure_config(
|
||||
&fbl->transparent_rpass_accum_fb,
|
||||
{GPU_ATTACHMENT_NONE, GPU_ATTACHMENT_TEXTURE(txl->transparent_accum)});
|
||||
@@ -1220,7 +1235,7 @@ void EEVEE_material_transparent_output_init(EEVEE_Data *vedata)
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(EEVEE_shaders_renderpasses_accumulate_sh_get(),
|
||||
psl->transparent_accum_ps);
|
||||
DRW_shgroup_uniform_texture(grp, "inputBuffer", txl->transparent_color_tmp);
|
||||
DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), NULL);
|
||||
DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), nullptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -31,7 +31,7 @@ void EEVEE_mist_output_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
|
||||
/* Create FrameBuffer. */
|
||||
/* Should be enough precision for many samples. */
|
||||
DRW_texture_ensure_fullscreen_2d(&txl->mist_accum, GPU_R32F, 0);
|
||||
DRW_texture_ensure_fullscreen_2d(&txl->mist_accum, GPU_R32F, DRWTextureFlag(0));
|
||||
|
||||
GPU_framebuffer_ensure_config(&fbl->mist_accum_fb,
|
||||
{GPU_ATTACHMENT_NONE, GPU_ATTACHMENT_TEXTURE(txl->mist_accum)});
|
||||
@@ -54,8 +54,8 @@ void EEVEE_mist_output_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
}
|
||||
}
|
||||
else {
|
||||
float near = DRW_view_near_distance_get(NULL);
|
||||
float far = DRW_view_far_distance_get(NULL);
|
||||
float near = DRW_view_near_distance_get(nullptr);
|
||||
float far = DRW_view_far_distance_get(nullptr);
|
||||
/* Fallback */
|
||||
g_data->mist_start = near;
|
||||
g_data->mist_inv_dist = 1.0f / fabsf(far - near);
|
||||
@@ -73,16 +73,16 @@ void EEVEE_mist_output_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
DRW_shgroup_uniform_block(grp, "common_block", sldata->common_ubo);
|
||||
DRW_shgroup_uniform_block(grp, "renderpass_block", sldata->renderpass_ubo.combined);
|
||||
DRW_shgroup_uniform_vec3(grp, "mistSettings", &g_data->mist_start, 1);
|
||||
DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), NULL);
|
||||
DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), nullptr);
|
||||
}
|
||||
|
||||
void EEVEE_mist_output_accumulate(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
|
||||
void EEVEE_mist_output_accumulate(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data *vedata)
|
||||
{
|
||||
EEVEE_FramebufferList *fbl = vedata->fbl;
|
||||
EEVEE_PassList *psl = vedata->psl;
|
||||
EEVEE_EffectsInfo *effects = vedata->stl->effects;
|
||||
|
||||
if (fbl->mist_accum_fb != NULL) {
|
||||
if (fbl->mist_accum_fb != nullptr) {
|
||||
GPU_framebuffer_bind(fbl->mist_accum_fb);
|
||||
|
||||
/* Clear texture. */
|
||||
@@ -36,7 +36,7 @@
|
||||
#include "GPU_texture.h"
|
||||
#include "eevee_private.h"
|
||||
|
||||
int EEVEE_motion_blur_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
|
||||
int EEVEE_motion_blur_init(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data *vedata)
|
||||
{
|
||||
EEVEE_StorageList *stl = vedata->stl;
|
||||
EEVEE_FramebufferList *fbl = vedata->fbl;
|
||||
@@ -55,9 +55,9 @@ int EEVEE_motion_blur_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *veda
|
||||
if ((effects->motion_blur_max > 0) && (scene->eevee.flag & SCE_EEVEE_MOTION_BLUR_ENABLED)) {
|
||||
if (DRW_state_is_scene_render()) {
|
||||
int mb_step = effects->motion_blur_step;
|
||||
DRW_view_viewmat_get(NULL, effects->motion_blur.camera[mb_step].viewmat, false);
|
||||
DRW_view_persmat_get(NULL, effects->motion_blur.camera[mb_step].persmat, false);
|
||||
DRW_view_persmat_get(NULL, effects->motion_blur.camera[mb_step].persinv, true);
|
||||
DRW_view_viewmat_get(nullptr, effects->motion_blur.camera[mb_step].viewmat, false);
|
||||
DRW_view_persmat_get(nullptr, effects->motion_blur.camera[mb_step].persmat, false);
|
||||
DRW_view_persmat_get(nullptr, effects->motion_blur.camera[mb_step].persinv, true);
|
||||
}
|
||||
|
||||
const float *fs_size = DRW_viewport_size_get();
|
||||
@@ -98,16 +98,16 @@ static void eevee_motion_blur_sync_camera(EEVEE_Data *vedata)
|
||||
EEVEE_EffectsInfo *effects = vedata->stl->effects;
|
||||
if (DRW_state_is_scene_render()) {
|
||||
int mb_step = effects->motion_blur_step;
|
||||
DRW_view_viewmat_get(NULL, effects->motion_blur.camera[mb_step].viewmat, false);
|
||||
DRW_view_persmat_get(NULL, effects->motion_blur.camera[mb_step].persmat, false);
|
||||
DRW_view_persmat_get(NULL, effects->motion_blur.camera[mb_step].persinv, true);
|
||||
DRW_view_viewmat_get(nullptr, effects->motion_blur.camera[mb_step].viewmat, false);
|
||||
DRW_view_persmat_get(nullptr, effects->motion_blur.camera[mb_step].persmat, false);
|
||||
DRW_view_persmat_get(nullptr, effects->motion_blur.camera[mb_step].persinv, true);
|
||||
}
|
||||
|
||||
effects->motion_blur_near_far[0] = fabsf(DRW_view_near_distance_get(NULL));
|
||||
effects->motion_blur_near_far[1] = fabsf(DRW_view_far_distance_get(NULL));
|
||||
effects->motion_blur_near_far[0] = fabsf(DRW_view_near_distance_get(nullptr));
|
||||
effects->motion_blur_near_far[1] = fabsf(DRW_view_far_distance_get(nullptr));
|
||||
}
|
||||
|
||||
void EEVEE_motion_blur_cache_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
|
||||
void EEVEE_motion_blur_cache_init(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data *vedata)
|
||||
{
|
||||
EEVEE_PassList *psl = vedata->psl;
|
||||
EEVEE_StorageList *stl = vedata->stl;
|
||||
@@ -135,17 +135,19 @@ void EEVEE_motion_blur_cache_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Dat
|
||||
GPUShader *sh = EEVEE_shaders_effect_motion_blur_velocity_tiles_sh_get();
|
||||
grp = DRW_shgroup_create(sh, psl->velocity_tiles_x);
|
||||
DRW_shgroup_uniform_texture(grp, "velocityBuffer", effects->velocity_tx);
|
||||
DRW_shgroup_uniform_ivec2_copy(grp, "velocityBufferSize", (int[2]){fs_size[0], fs_size[1]});
|
||||
DRW_shgroup_uniform_ivec2_copy(
|
||||
grp, "velocityBufferSize", blender::int2{int(fs_size[0]), int(fs_size[1])});
|
||||
DRW_shgroup_uniform_vec2(grp, "viewportSize", DRW_viewport_size_get(), 1);
|
||||
DRW_shgroup_uniform_vec2(grp, "viewportSizeInv", DRW_viewport_invert_size_get(), 1);
|
||||
DRW_shgroup_uniform_ivec2_copy(grp, "gatherStep", (int[2]){1, 0});
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_uniform_ivec2_copy(grp, "gatherStep", blender::int2{1, 0});
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
|
||||
grp = DRW_shgroup_create(sh, psl->velocity_tiles);
|
||||
DRW_shgroup_uniform_texture(grp, "velocityBuffer", effects->velocity_tiles_x_tx);
|
||||
DRW_shgroup_uniform_ivec2_copy(grp, "velocityBufferSize", (int[2]){tx_size[0], fs_size[1]});
|
||||
DRW_shgroup_uniform_ivec2_copy(grp, "gatherStep", (int[2]){0, 1});
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_uniform_ivec2_copy(
|
||||
grp, "velocityBufferSize", blender::int2{tx_size[0], int(fs_size[1])});
|
||||
DRW_shgroup_uniform_ivec2_copy(grp, "gatherStep", blender::int2{0, 1});
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
|
||||
/* Expand max tiles by keeping the max tile in each tile neighborhood. */
|
||||
DRW_PASS_CREATE(psl->velocity_tiles_expand[0], DRW_STATE_WRITE_COLOR);
|
||||
@@ -158,12 +160,12 @@ void EEVEE_motion_blur_cache_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Dat
|
||||
DRW_shgroup_uniform_texture(grp, "velocityBuffer", tile_tx);
|
||||
DRW_shgroup_uniform_vec2(grp, "viewportSize", DRW_viewport_size_get(), 1);
|
||||
DRW_shgroup_uniform_vec2(grp, "viewportSizeInv", DRW_viewport_invert_size_get(), 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
}
|
||||
{
|
||||
DRW_PASS_CREATE(psl->motion_blur, DRW_STATE_WRITE_COLOR);
|
||||
const GPUSamplerState state = GPU_SAMPLER_DEFAULT;
|
||||
const GPUSamplerState state = GPUSamplerState::default_sampler();
|
||||
int expand_steps = 1 + (max_ii(0, effects->motion_blur_max - 1) / EEVEE_VELOCITY_TILE_SIZE);
|
||||
GPUTexture *tile_tx = (expand_steps & 1) ? effects->velocity_tiles_x_tx :
|
||||
effects->velocity_tiles_tx;
|
||||
@@ -176,11 +178,11 @@ void EEVEE_motion_blur_cache_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Dat
|
||||
DRW_shgroup_uniform_texture(grp, "tileMaxBuffer", tile_tx);
|
||||
DRW_shgroup_uniform_float_copy(grp, "depthScale", scene->eevee.motion_blur_depth_scale);
|
||||
DRW_shgroup_uniform_vec2(grp, "nearFar", effects->motion_blur_near_far, 1);
|
||||
DRW_shgroup_uniform_bool_copy(grp, "isPerspective", DRW_view_is_persp_get(NULL));
|
||||
DRW_shgroup_uniform_bool_copy(grp, "isPerspective", DRW_view_is_persp_get(nullptr));
|
||||
DRW_shgroup_uniform_vec2(grp, "viewportSize", DRW_viewport_size_get(), 1);
|
||||
DRW_shgroup_uniform_vec2(grp, "viewportSizeInv", DRW_viewport_invert_size_get(), 1);
|
||||
DRW_shgroup_uniform_ivec2_copy(grp, "tileBufferSize", tx_size);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
{
|
||||
DRW_PASS_CREATE(psl->velocity_object, DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_EQUAL);
|
||||
@@ -205,13 +207,13 @@ void EEVEE_motion_blur_cache_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Dat
|
||||
EEVEE_motion_blur_data_init(mb_data);
|
||||
}
|
||||
else {
|
||||
psl->motion_blur = NULL;
|
||||
psl->velocity_object = NULL;
|
||||
psl->velocity_hair = NULL;
|
||||
psl->motion_blur = nullptr;
|
||||
psl->velocity_object = nullptr;
|
||||
psl->velocity_hair = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void EEVEE_motion_blur_hair_cache_populate(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
void EEVEE_motion_blur_hair_cache_populate(EEVEE_ViewLayerData * /*sldata*/,
|
||||
EEVEE_Data *vedata,
|
||||
Object *ob,
|
||||
ParticleSystem *psys,
|
||||
@@ -220,9 +222,9 @@ void EEVEE_motion_blur_hair_cache_populate(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
EEVEE_PassList *psl = vedata->psl;
|
||||
EEVEE_StorageList *stl = vedata->stl;
|
||||
EEVEE_EffectsInfo *effects = stl->effects;
|
||||
DRWShadingGroup *grp = NULL;
|
||||
DRWShadingGroup *grp = nullptr;
|
||||
|
||||
if (!DRW_state_is_scene_render() || psl->velocity_hair == NULL) {
|
||||
if (!DRW_state_is_scene_render() || psl->velocity_hair == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -236,7 +238,7 @@ void EEVEE_motion_blur_hair_cache_populate(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
DRW_hair_duplimat_get(ob, psys, md, mb_data->obmat[mb_step]);
|
||||
|
||||
EEVEE_HairMotionData *mb_hair = EEVEE_motion_blur_hair_data_get(mb_data, ob);
|
||||
int psys_id = (md != NULL) ? BLI_findindex(&ob->modifiers, md) : 0;
|
||||
int psys_id = (md != nullptr) ? BLI_findindex(&ob->modifiers, md) : 0;
|
||||
|
||||
if (psys_id >= mb_hair->psys_len) {
|
||||
/* This should never happen. It means the modifier list was changed by frame evaluation. */
|
||||
@@ -256,7 +258,7 @@ void EEVEE_motion_blur_hair_cache_populate(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
GPUTexture *tex_prev = mb_hair->psys[psys_id].step_data[MB_PREV].hair_pos_tx;
|
||||
GPUTexture *tex_next = mb_hair->psys[psys_id].step_data[MB_NEXT].hair_pos_tx;
|
||||
|
||||
grp = DRW_shgroup_hair_create_sub(ob, psys, md, effects->motion_blur.hair_grp, NULL);
|
||||
grp = DRW_shgroup_hair_create_sub(ob, psys, md, effects->motion_blur.hair_grp, nullptr);
|
||||
DRW_shgroup_uniform_mat4(grp, "prevModelMatrix", mb_data->obmat[MB_PREV]);
|
||||
DRW_shgroup_uniform_mat4(grp, "currModelMatrix", mb_data->obmat[MB_CURR]);
|
||||
DRW_shgroup_uniform_mat4(grp, "nextModelMatrix", mb_data->obmat[MB_NEXT]);
|
||||
@@ -272,7 +274,7 @@ void EEVEE_motion_blur_hair_cache_populate(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
}
|
||||
}
|
||||
|
||||
void EEVEE_motion_blur_curves_cache_populate(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
void EEVEE_motion_blur_curves_cache_populate(EEVEE_ViewLayerData * /*sldata*/,
|
||||
EEVEE_Data *vedata,
|
||||
Object *ob)
|
||||
{
|
||||
@@ -280,14 +282,14 @@ void EEVEE_motion_blur_curves_cache_populate(EEVEE_ViewLayerData *UNUSED(sldata)
|
||||
EEVEE_StorageList *stl = vedata->stl;
|
||||
EEVEE_EffectsInfo *effects = stl->effects;
|
||||
|
||||
if (!DRW_state_is_scene_render() || psl->velocity_hair == NULL) {
|
||||
if (!DRW_state_is_scene_render() || psl->velocity_hair == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* For now we assume curves objects are always moving. */
|
||||
EEVEE_ObjectMotionData *mb_data = EEVEE_motion_blur_object_data_get(
|
||||
&effects->motion_blur, ob, false);
|
||||
if (mb_data == NULL) {
|
||||
if (mb_data == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -309,7 +311,8 @@ void EEVEE_motion_blur_curves_cache_populate(EEVEE_ViewLayerData *UNUSED(sldata)
|
||||
GPUTexture *tex_prev = mb_curves->psys[0].step_data[MB_PREV].hair_pos_tx;
|
||||
GPUTexture *tex_next = mb_curves->psys[0].step_data[MB_NEXT].hair_pos_tx;
|
||||
|
||||
DRWShadingGroup *grp = DRW_shgroup_curves_create_sub(ob, effects->motion_blur.hair_grp, NULL);
|
||||
DRWShadingGroup *grp = DRW_shgroup_curves_create_sub(
|
||||
ob, effects->motion_blur.hair_grp, nullptr);
|
||||
DRW_shgroup_uniform_mat4(grp, "prevModelMatrix", mb_data->obmat[MB_PREV]);
|
||||
DRW_shgroup_uniform_mat4(grp, "currModelMatrix", mb_data->obmat[MB_CURR]);
|
||||
DRW_shgroup_uniform_mat4(grp, "nextModelMatrix", mb_data->obmat[MB_NEXT]);
|
||||
@@ -324,16 +327,16 @@ void EEVEE_motion_blur_curves_cache_populate(EEVEE_ViewLayerData *UNUSED(sldata)
|
||||
}
|
||||
}
|
||||
|
||||
void EEVEE_motion_blur_cache_populate(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
void EEVEE_motion_blur_cache_populate(EEVEE_ViewLayerData * /*sldata*/,
|
||||
EEVEE_Data *vedata,
|
||||
Object *ob)
|
||||
{
|
||||
EEVEE_PassList *psl = vedata->psl;
|
||||
EEVEE_StorageList *stl = vedata->stl;
|
||||
EEVEE_EffectsInfo *effects = stl->effects;
|
||||
DRWShadingGroup *grp = NULL;
|
||||
DRWShadingGroup *grp = nullptr;
|
||||
|
||||
if (!DRW_state_is_scene_render() || psl->velocity_object == NULL) {
|
||||
if (!DRW_state_is_scene_render() || psl->velocity_object == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -369,7 +372,7 @@ void EEVEE_motion_blur_cache_populate(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
|
||||
if (mb_step == MB_CURR) {
|
||||
GPUBatch *batch = DRW_cache_object_surface_get(ob);
|
||||
if (batch == NULL) {
|
||||
if (batch == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -406,10 +409,10 @@ void EEVEE_motion_blur_cache_populate(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
else if (is_deform) {
|
||||
/* Store vertex position buffer. */
|
||||
mb_geom->vbo[mb_step] = DRW_cache_object_pos_vertbuf_get(ob);
|
||||
mb_geom->use_deform = (mb_geom->vbo[mb_step] != NULL);
|
||||
mb_geom->use_deform = (mb_geom->vbo[mb_step] != nullptr);
|
||||
}
|
||||
else {
|
||||
mb_geom->vbo[mb_step] = NULL;
|
||||
mb_geom->vbo[mb_step] = nullptr;
|
||||
mb_geom->use_deform = false;
|
||||
}
|
||||
}
|
||||
@@ -423,7 +426,7 @@ static void motion_blur_remove_vbo_reference_from_batch(GPUBatch *batch,
|
||||
for (int i = 0; i < GPU_BATCH_VBO_MAX_LEN; i++) {
|
||||
if (ELEM(batch->verts[i], vbo1, vbo2)) {
|
||||
/* Avoid double reference of the VBOs. */
|
||||
batch->verts[i] = NULL;
|
||||
batch->verts[i] = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -456,10 +459,11 @@ void EEVEE_motion_blur_cache_finish(EEVEE_Data *vedata)
|
||||
BLI_ghashIterator_done(&ghi) == false;
|
||||
BLI_ghashIterator_step(&ghi))
|
||||
{
|
||||
EEVEE_ObjectMotionData *mb_data = BLI_ghashIterator_getValue(&ghi);
|
||||
EEVEE_ObjectMotionData *mb_data = static_cast<EEVEE_ObjectMotionData *>(
|
||||
BLI_ghashIterator_getValue(&ghi));
|
||||
EEVEE_HairMotionData *mb_hair = mb_data->hair_data;
|
||||
EEVEE_GeometryMotionData *mb_geom = mb_data->geometry_data;
|
||||
if (mb_hair != NULL && mb_hair->use_deform) {
|
||||
if (mb_hair != nullptr && mb_hair->use_deform) {
|
||||
if (mb_step == MB_CURR) {
|
||||
/* TODO(fclem): Check if vertex count mismatch. */
|
||||
mb_hair->use_deform = true;
|
||||
@@ -467,7 +471,7 @@ void EEVEE_motion_blur_cache_finish(EEVEE_Data *vedata)
|
||||
else {
|
||||
for (int i = 0; i < mb_hair->psys_len; i++) {
|
||||
GPUVertBuf *vbo = mb_hair->psys[i].step_data[mb_step].hair_pos;
|
||||
if (vbo == NULL) {
|
||||
if (vbo == nullptr) {
|
||||
continue;
|
||||
}
|
||||
EEVEE_HairMotionStepData **step_data_cache_ptr;
|
||||
@@ -475,8 +479,8 @@ void EEVEE_motion_blur_cache_finish(EEVEE_Data *vedata)
|
||||
vbo,
|
||||
(void ***)&step_data_cache_ptr))
|
||||
{
|
||||
EEVEE_HairMotionStepData *new_step_data = MEM_callocN(sizeof(EEVEE_HairMotionStepData),
|
||||
__func__);
|
||||
EEVEE_HairMotionStepData *new_step_data = static_cast<EEVEE_HairMotionStepData *>(
|
||||
MEM_callocN(sizeof(EEVEE_HairMotionStepData), __func__));
|
||||
/* Duplicate the vbo, otherwise it would be lost when evaluating another frame. */
|
||||
new_step_data->hair_pos = GPU_vertbuf_duplicate(vbo);
|
||||
/* Create vbo immediately to bind to texture buffer. */
|
||||
@@ -489,7 +493,7 @@ void EEVEE_motion_blur_cache_finish(EEVEE_Data *vedata)
|
||||
}
|
||||
}
|
||||
}
|
||||
if (mb_geom != NULL && mb_geom->use_deform) {
|
||||
if (mb_geom != nullptr && mb_geom->use_deform) {
|
||||
if (mb_step == MB_CURR) {
|
||||
/* Modify batch to have data from adjacent frames. */
|
||||
GPUBatch *batch = mb_geom->batch;
|
||||
@@ -560,29 +564,29 @@ void EEVEE_motion_blur_swap_data(EEVEE_Data *vedata)
|
||||
/* Swap #position_vbo_cache pointers. */
|
||||
if (effects->motion_blur.position_vbo_cache[MB_PREV]) {
|
||||
BLI_ghash_free(effects->motion_blur.position_vbo_cache[MB_PREV],
|
||||
NULL,
|
||||
nullptr,
|
||||
(GHashValFreeFP)GPU_vertbuf_discard);
|
||||
}
|
||||
effects->motion_blur.position_vbo_cache[MB_PREV] =
|
||||
effects->motion_blur.position_vbo_cache[MB_NEXT];
|
||||
effects->motion_blur.position_vbo_cache[MB_NEXT] = NULL;
|
||||
effects->motion_blur.position_vbo_cache[MB_NEXT] = nullptr;
|
||||
|
||||
/* Swap #hair_motion_step_cache pointers. */
|
||||
if (effects->motion_blur.hair_motion_step_cache[MB_PREV]) {
|
||||
BLI_ghash_free(effects->motion_blur.hair_motion_step_cache[MB_PREV],
|
||||
NULL,
|
||||
nullptr,
|
||||
(GHashValFreeFP)EEVEE_motion_hair_step_free);
|
||||
}
|
||||
effects->motion_blur.hair_motion_step_cache[MB_PREV] =
|
||||
effects->motion_blur.hair_motion_step_cache[MB_NEXT];
|
||||
effects->motion_blur.hair_motion_step_cache[MB_NEXT] = NULL;
|
||||
effects->motion_blur.hair_motion_step_cache[MB_NEXT] = nullptr;
|
||||
|
||||
/* Rename attributes in #position_vbo_cache. */
|
||||
for (BLI_ghashIterator_init(&ghi, effects->motion_blur.position_vbo_cache[MB_PREV]);
|
||||
!BLI_ghashIterator_done(&ghi);
|
||||
BLI_ghashIterator_step(&ghi))
|
||||
{
|
||||
GPUVertBuf *vbo = BLI_ghashIterator_getValue(&ghi);
|
||||
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(BLI_ghashIterator_getValue(&ghi));
|
||||
GPUVertFormat *format = (GPUVertFormat *)GPU_vertbuf_get_format(vbo);
|
||||
int attrib_id = GPU_vertformat_attr_id_get(format, "nxt");
|
||||
GPU_vertformat_attr_rename(format, attrib_id, "prv");
|
||||
@@ -592,29 +596,30 @@ void EEVEE_motion_blur_swap_data(EEVEE_Data *vedata)
|
||||
for (BLI_ghashIterator_init(&ghi, effects->motion_blur.object); !BLI_ghashIterator_done(&ghi);
|
||||
BLI_ghashIterator_step(&ghi))
|
||||
{
|
||||
EEVEE_ObjectMotionData *mb_data = BLI_ghashIterator_getValue(&ghi);
|
||||
EEVEE_ObjectMotionData *mb_data = static_cast<EEVEE_ObjectMotionData *>(
|
||||
BLI_ghashIterator_getValue(&ghi));
|
||||
EEVEE_GeometryMotionData *mb_geom = mb_data->geometry_data;
|
||||
EEVEE_HairMotionData *mb_hair = mb_data->hair_data;
|
||||
|
||||
copy_m4_m4(mb_data->obmat[MB_PREV], mb_data->obmat[MB_NEXT]);
|
||||
|
||||
if (mb_hair != NULL) {
|
||||
if (mb_hair != nullptr) {
|
||||
for (int i = 0; i < mb_hair->psys_len; i++) {
|
||||
mb_hair->psys[i].step_data[MB_PREV].hair_pos =
|
||||
mb_hair->psys[i].step_data[MB_NEXT].hair_pos;
|
||||
mb_hair->psys[i].step_data[MB_PREV].hair_pos_tx =
|
||||
mb_hair->psys[i].step_data[MB_NEXT].hair_pos_tx;
|
||||
mb_hair->psys[i].step_data[MB_NEXT].hair_pos = NULL;
|
||||
mb_hair->psys[i].step_data[MB_NEXT].hair_pos_tx = NULL;
|
||||
mb_hair->psys[i].step_data[MB_NEXT].hair_pos = nullptr;
|
||||
mb_hair->psys[i].step_data[MB_NEXT].hair_pos_tx = nullptr;
|
||||
}
|
||||
}
|
||||
if (mb_geom != NULL) {
|
||||
if (mb_geom->batch != NULL) {
|
||||
if (mb_geom != nullptr) {
|
||||
if (mb_geom->batch != nullptr) {
|
||||
motion_blur_remove_vbo_reference_from_batch(
|
||||
mb_geom->batch, mb_geom->vbo[MB_PREV], mb_geom->vbo[MB_NEXT]);
|
||||
}
|
||||
mb_geom->vbo[MB_PREV] = mb_geom->vbo[MB_NEXT];
|
||||
mb_geom->vbo[MB_NEXT] = NULL;
|
||||
mb_geom->vbo[MB_NEXT] = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -24,7 +24,7 @@
|
||||
|
||||
static struct {
|
||||
GPUTexture *dummy_horizon_tx;
|
||||
} e_data = {NULL}; /* Engine data */
|
||||
} e_data = {nullptr}; /* Engine data */
|
||||
|
||||
int EEVEE_occlusion_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
{
|
||||
@@ -80,7 +80,7 @@ int EEVEE_occlusion_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
{GPU_ATTACHMENT_NONE, GPU_ATTACHMENT_TEXTURE(effects->gtao_horizons_debug)});
|
||||
}
|
||||
else {
|
||||
effects->gtao_horizons_debug = NULL;
|
||||
effects->gtao_horizons_debug = nullptr;
|
||||
}
|
||||
|
||||
effects->gtao_horizons = (scene_eval->eevee.flag & SCE_EEVEE_GTAO_ENABLED) ?
|
||||
@@ -112,7 +112,7 @@ void EEVEE_occlusion_output_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata
|
||||
DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
|
||||
|
||||
/* Should be enough precision for many samples. */
|
||||
DRW_texture_ensure_fullscreen_2d(&txl->ao_accum, texture_format, 0);
|
||||
DRW_texture_ensure_fullscreen_2d(&txl->ao_accum, texture_format, DRWTextureFlag(0));
|
||||
|
||||
GPU_framebuffer_ensure_config(&fbl->ao_accum_fb,
|
||||
{GPU_ATTACHMENT_NONE, GPU_ATTACHMENT_TEXTURE(txl->ao_accum)});
|
||||
@@ -129,7 +129,7 @@ void EEVEE_occlusion_output_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata
|
||||
DRW_shgroup_uniform_texture_ref(grp, "horizonBuffer", &effects->gtao_horizons_renderpass);
|
||||
DRW_shgroup_uniform_block(grp, "common_block", sldata->common_ubo);
|
||||
DRW_shgroup_uniform_block(grp, "renderpass_block", sldata->renderpass_ubo.combined);
|
||||
DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), NULL);
|
||||
DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), nullptr);
|
||||
}
|
||||
|
||||
void EEVEE_occlusion_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
@@ -160,7 +160,7 @@ void EEVEE_occlusion_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
DRW_shgroup_uniform_texture_ref(grp, "maxzBuffer", &txl->maxzbuffer);
|
||||
DRW_shgroup_uniform_block(grp, "common_block", sldata->common_ubo);
|
||||
DRW_shgroup_uniform_block(grp, "renderpass_block", sldata->renderpass_ubo.combined);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
|
||||
if (G.debug_value == 6) {
|
||||
DRW_PASS_CREATE(psl->ao_horizon_debug, DRW_STATE_WRITE_COLOR);
|
||||
@@ -173,7 +173,7 @@ void EEVEE_occlusion_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
DRW_shgroup_uniform_texture_ref(grp, "horizonBuffer", &effects->gtao_horizons_renderpass);
|
||||
DRW_shgroup_uniform_block(grp, "common_block", sldata->common_ubo);
|
||||
DRW_shgroup_uniform_block(grp, "renderpass_block", sldata->renderpass_ubo.combined);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -218,7 +218,7 @@ void EEVEE_occlusion_compute(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
}
|
||||
}
|
||||
|
||||
void EEVEE_occlusion_draw_debug(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
|
||||
void EEVEE_occlusion_draw_debug(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data *vedata)
|
||||
{
|
||||
EEVEE_PassList *psl = vedata->psl;
|
||||
EEVEE_FramebufferList *fbl = vedata->fbl;
|
||||
@@ -244,7 +244,7 @@ void EEVEE_occlusion_output_accumulate(EEVEE_ViewLayerData *sldata, EEVEE_Data *
|
||||
EEVEE_PassList *psl = vedata->psl;
|
||||
EEVEE_EffectsInfo *effects = vedata->stl->effects;
|
||||
|
||||
if (fbl->ao_accum_fb != NULL) {
|
||||
if (fbl->ao_accum_fb != nullptr) {
|
||||
DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
|
||||
|
||||
/* Update the min_max/horizon buffers so the refraction materials appear in it. */
|
||||
@@ -684,6 +684,7 @@ typedef enum EEVEE_EffectsFlag {
|
||||
EFFECT_TAA_REPROJECT = (1 << 13), /* should be mutually exclusive with EFFECT_TAA */
|
||||
EFFECT_DEPTH_DOUBLE_BUFFER = (1 << 14), /* Not really an effect but a feature */
|
||||
} EEVEE_EffectsFlag;
|
||||
ENUM_OPERATORS(EEVEE_EffectsFlag, EFFECT_DEPTH_DOUBLE_BUFFER)
|
||||
|
||||
typedef struct EEVEE_EffectsInfo {
|
||||
EEVEE_EffectsFlag enabled_effects;
|
||||
|
||||
@@ -54,7 +54,7 @@ bool EEVEE_render_init(EEVEE_Data *ved, RenderEngine *engine, Depsgraph *depsgra
|
||||
|
||||
/* Alloc transient data. */
|
||||
if (!stl->g_data) {
|
||||
stl->g_data = MEM_callocN(sizeof(*stl->g_data), __func__);
|
||||
stl->g_data = static_cast<EEVEE_PrivateData *>(MEM_callocN(sizeof(*stl->g_data), __func__));
|
||||
}
|
||||
EEVEE_PrivateData *g_data = stl->g_data;
|
||||
g_data->background_alpha = DRW_state_draw_background() ? 1.0f : 0.0f;
|
||||
@@ -66,7 +66,7 @@ bool EEVEE_render_init(EEVEE_Data *ved, RenderEngine *engine, Depsgraph *depsgra
|
||||
g_data->overscan = scene->eevee.overscan / 100.0f;
|
||||
g_data->overscan_pixels = roundf(max_ff(size_orig[0], size_orig[1]) * g_data->overscan);
|
||||
|
||||
madd_v2_v2v2fl(size_final, size_orig, (float[2]){2.0f, 2.0f}, g_data->overscan_pixels);
|
||||
madd_v2_v2v2fl(size_final, size_orig, blender::float2{2.0f, 2.0f}, g_data->overscan_pixels);
|
||||
|
||||
camtexcofac[0] = size_final[0] / size_orig[0];
|
||||
camtexcofac[1] = size_final[1] / size_orig[1];
|
||||
@@ -82,8 +82,8 @@ bool EEVEE_render_init(EEVEE_Data *ved, RenderEngine *engine, Depsgraph *depsgra
|
||||
}
|
||||
|
||||
const int final_res[2] = {
|
||||
size_orig[0] + g_data->overscan_pixels * 2.0f,
|
||||
size_orig[1] + g_data->overscan_pixels * 2.0f,
|
||||
int(size_orig[0] + g_data->overscan_pixels * 2.0f),
|
||||
int(size_orig[1] + g_data->overscan_pixels * 2.0f),
|
||||
};
|
||||
|
||||
int max_dim = max_ii(final_res[0], final_res[1]);
|
||||
@@ -102,7 +102,7 @@ bool EEVEE_render_init(EEVEE_Data *ved, RenderEngine *engine, Depsgraph *depsgra
|
||||
DRW_render_viewport_size_set(final_res);
|
||||
|
||||
/* TODO: 32 bit depth. */
|
||||
DRW_texture_ensure_fullscreen_2d(&dtxl->depth, GPU_DEPTH24_STENCIL8, 0);
|
||||
DRW_texture_ensure_fullscreen_2d(&dtxl->depth, GPU_DEPTH24_STENCIL8, DRWTextureFlag(0));
|
||||
DRW_texture_ensure_fullscreen_2d(&txl->color, GPU_RGBA32F, DRW_TEX_FILTER);
|
||||
|
||||
GPU_framebuffer_ensure_config(
|
||||
@@ -153,7 +153,7 @@ void EEVEE_render_view_sync(EEVEE_Data *vedata, RenderEngine *engine, Depsgraph
|
||||
|
||||
invert_m4_m4(viewmat, viewinv);
|
||||
|
||||
DRWView *view = DRW_view_create(viewmat, winmat, NULL, NULL, NULL);
|
||||
DRWView *view = DRW_view_create(viewmat, winmat, nullptr, nullptr, nullptr);
|
||||
DRW_view_reset();
|
||||
DRW_view_default_set(view);
|
||||
DRW_view_set_active(view);
|
||||
@@ -180,13 +180,13 @@ void EEVEE_render_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
void EEVEE_render_cache(void *vedata, Object *ob, RenderEngine *engine, Depsgraph *depsgraph)
|
||||
{
|
||||
EEVEE_ViewLayerData *sldata = EEVEE_view_layer_data_ensure();
|
||||
EEVEE_Data *data = vedata;
|
||||
EEVEE_Data *data = static_cast<EEVEE_Data *>(vedata);
|
||||
EEVEE_StorageList *stl = data->stl;
|
||||
EEVEE_PrivateData *g_data = stl->g_data;
|
||||
EEVEE_LightProbesInfo *pinfo = sldata->probes;
|
||||
bool cast_shadow = false;
|
||||
|
||||
const bool do_cryptomatte = (engine != NULL) &&
|
||||
const bool do_cryptomatte = (engine != nullptr) &&
|
||||
((g_data->render_passes & EEVEE_RENDER_PASS_CRYPTOMATTE) != 0);
|
||||
|
||||
eevee_id_update(vedata, &ob->id);
|
||||
@@ -207,12 +207,13 @@ void EEVEE_render_cache(void *vedata, Object *ob, RenderEngine *engine, Depsgrap
|
||||
if (engine && (ob->base_flag & BASE_FROM_DUPLI) == 0) {
|
||||
char info[42];
|
||||
SNPRINTF(info, "Syncing %s", ob->id.name + 2);
|
||||
RE_engine_update_stats(engine, NULL, info);
|
||||
RE_engine_update_stats(engine, nullptr, info);
|
||||
}
|
||||
|
||||
const int ob_visibility = DRW_object_visibility_in_active_context(ob);
|
||||
if (ob_visibility & OB_VISIBLE_PARTICLES) {
|
||||
EEVEE_particle_hair_cache_populate(vedata, sldata, ob, &cast_shadow);
|
||||
EEVEE_particle_hair_cache_populate(
|
||||
static_cast<EEVEE_Data *>(vedata), sldata, ob, &cast_shadow);
|
||||
if (do_cryptomatte) {
|
||||
EEVEE_cryptomatte_particle_hair_cache_populate(data, sldata, ob);
|
||||
}
|
||||
@@ -220,23 +221,24 @@ void EEVEE_render_cache(void *vedata, Object *ob, RenderEngine *engine, Depsgrap
|
||||
|
||||
if (ob_visibility & OB_VISIBLE_SELF) {
|
||||
if (ob->type == OB_MESH) {
|
||||
EEVEE_materials_cache_populate(vedata, sldata, ob, &cast_shadow);
|
||||
EEVEE_materials_cache_populate(static_cast<EEVEE_Data *>(vedata), sldata, ob, &cast_shadow);
|
||||
if (do_cryptomatte) {
|
||||
EEVEE_cryptomatte_cache_populate(data, sldata, ob);
|
||||
}
|
||||
}
|
||||
else if (ob->type == OB_CURVES) {
|
||||
EEVEE_object_curves_cache_populate(vedata, sldata, ob, &cast_shadow);
|
||||
EEVEE_object_curves_cache_populate(
|
||||
static_cast<EEVEE_Data *>(vedata), sldata, ob, &cast_shadow);
|
||||
if (do_cryptomatte) {
|
||||
EEVEE_cryptomatte_object_curves_cache_populate(data, sldata, ob);
|
||||
}
|
||||
}
|
||||
else if (ob->type == OB_VOLUME) {
|
||||
Scene *scene = DEG_get_evaluated_scene(depsgraph);
|
||||
EEVEE_volumes_cache_object_add(sldata, vedata, scene, ob);
|
||||
EEVEE_volumes_cache_object_add(sldata, static_cast<EEVEE_Data *>(vedata), scene, ob);
|
||||
}
|
||||
else if (ob->type == OB_LIGHTPROBE) {
|
||||
EEVEE_lightprobes_cache_add(sldata, vedata, ob);
|
||||
EEVEE_lightprobes_cache_add(sldata, static_cast<EEVEE_Data *>(vedata), ob);
|
||||
}
|
||||
else if (ob->type == OB_LAMP) {
|
||||
EEVEE_lights_cache_add(sldata, ob);
|
||||
@@ -257,7 +259,7 @@ static void eevee_render_color_result(RenderLayer *rl,
|
||||
EEVEE_Data *vedata)
|
||||
{
|
||||
RenderPass *rp = RE_pass_find_by_name(rl, render_pass_name, viewname);
|
||||
if (rp == NULL) {
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
GPU_framebuffer_bind(framebuffer);
|
||||
@@ -276,7 +278,7 @@ static void eevee_render_result_combined(RenderLayer *rl,
|
||||
const char *viewname,
|
||||
const rcti *rect,
|
||||
EEVEE_Data *vedata,
|
||||
EEVEE_ViewLayerData *UNUSED(sldata))
|
||||
EEVEE_ViewLayerData * /*sldata*/)
|
||||
{
|
||||
eevee_render_color_result(
|
||||
rl, viewname, rect, RE_PASSNAME_COMBINED, 4, vedata->stl->effects->final_fb, vedata);
|
||||
@@ -586,7 +588,7 @@ void EEVEE_render_draw(EEVEE_Data *vedata, RenderEngine *engine, RenderLayer *rl
|
||||
else if ((render_samples % 25) == 0 || (render_samples + 1) == tot_sample) {
|
||||
char info[42];
|
||||
SNPRINTF(info, "Rendering %u / %u samples", render_samples + 1, tot_sample);
|
||||
RE_engine_update_stats(engine, NULL, info);
|
||||
RE_engine_update_stats(engine, nullptr, info);
|
||||
}
|
||||
|
||||
/* Copy previous persmat to UBO data */
|
||||
@@ -20,7 +20,7 @@
|
||||
|
||||
#include "eevee_private.h"
|
||||
|
||||
typedef enum eRenderPassPostProcessType {
|
||||
enum eRenderPassPostProcessType {
|
||||
PASS_POST_UNDEFINED = 0,
|
||||
PASS_POST_ACCUMULATED_COLOR = 1,
|
||||
PASS_POST_ACCUMULATED_COLOR_ALPHA = 2,
|
||||
@@ -31,7 +31,7 @@ typedef enum eRenderPassPostProcessType {
|
||||
PASS_POST_NORMAL = 7,
|
||||
PASS_POST_TWO_LIGHT_BUFFERS = 8,
|
||||
PASS_POST_ACCUMULATED_TRANSMITTANCE_COLOR = 9,
|
||||
} eRenderPassPostProcessType;
|
||||
};
|
||||
|
||||
/* bitmask containing all renderpasses that need post-processing */
|
||||
#define EEVEE_RENDERPASSES_WITH_POST_PROCESSING \
|
||||
@@ -78,7 +78,7 @@ void EEVEE_renderpasses_init(EEVEE_Data *vedata)
|
||||
|
||||
if (v3d) {
|
||||
const Scene *scene = draw_ctx->scene;
|
||||
eViewLayerEEVEEPassType render_pass = v3d->shading.render_pass;
|
||||
eViewLayerEEVEEPassType render_pass = eViewLayerEEVEEPassType(v3d->shading.render_pass);
|
||||
g_data->aov_hash = 0;
|
||||
|
||||
if (render_pass == EEVEE_RENDER_PASS_BLOOM &&
|
||||
@@ -86,9 +86,9 @@ void EEVEE_renderpasses_init(EEVEE_Data *vedata)
|
||||
render_pass = EEVEE_RENDER_PASS_COMBINED;
|
||||
}
|
||||
if (render_pass == EEVEE_RENDER_PASS_AOV) {
|
||||
ViewLayerAOV *aov = BLI_findstring(
|
||||
&view_layer->aovs, v3d->shading.aov_name, offsetof(ViewLayerAOV, name));
|
||||
if (aov != NULL) {
|
||||
ViewLayerAOV *aov = static_cast<ViewLayerAOV *>(
|
||||
BLI_findstring(&view_layer->aovs, v3d->shading.aov_name, offsetof(ViewLayerAOV, name)));
|
||||
if (aov != nullptr) {
|
||||
g_data->aov_hash = EEVEE_renderpasses_aov_hash(aov);
|
||||
}
|
||||
else {
|
||||
@@ -100,7 +100,8 @@ void EEVEE_renderpasses_init(EEVEE_Data *vedata)
|
||||
g_data->render_passes = render_pass;
|
||||
}
|
||||
else {
|
||||
eViewLayerEEVEEPassType enabled_render_passes = view_layer->eevee.render_passes;
|
||||
eViewLayerEEVEEPassType enabled_render_passes = eViewLayerEEVEEPassType(
|
||||
view_layer->eevee.render_passes);
|
||||
|
||||
#define ENABLE_FROM_LEGACY(name_legacy, name_eevee) \
|
||||
SET_FLAG_FROM_TEST(enabled_render_passes, \
|
||||
@@ -166,7 +167,7 @@ void EEVEE_renderpasses_output_init(EEVEE_ViewLayerData *sldata,
|
||||
/* Should be enough to store the data needs for a single pass.
|
||||
* Some passes will use less, but it is only relevant for final renderings and
|
||||
* when renderpasses other than `EEVEE_RENDER_PASS_COMBINED` are requested */
|
||||
DRW_texture_ensure_fullscreen_2d(&txl->renderpass, GPU_RGBA16F, 0);
|
||||
DRW_texture_ensure_fullscreen_2d(&txl->renderpass, GPU_RGBA16F, DRWTextureFlag(0));
|
||||
GPU_framebuffer_ensure_config(&fbl->renderpass_fb,
|
||||
{GPU_ATTACHMENT_NONE, GPU_ATTACHMENT_TEXTURE(txl->renderpass)});
|
||||
|
||||
@@ -237,14 +238,14 @@ void EEVEE_renderpasses_cache_finish(EEVEE_ViewLayerData *sldata, EEVEE_Data *ve
|
||||
DRW_shgroup_uniform_int(grp, "currentSample", &g_data->renderpass_current_sample, 1);
|
||||
DRW_shgroup_uniform_int(grp, "renderpassType", &g_data->renderpass_type, 1);
|
||||
DRW_shgroup_uniform_int(grp, "postProcessType", &g_data->renderpass_postprocess, 1);
|
||||
DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), NULL);
|
||||
DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), nullptr);
|
||||
}
|
||||
else {
|
||||
psl->renderpass_pass = NULL;
|
||||
psl->renderpass_pass = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void EEVEE_renderpasses_postprocess(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
void EEVEE_renderpasses_postprocess(EEVEE_ViewLayerData * /*sldata*/,
|
||||
EEVEE_Data *vedata,
|
||||
eViewLayerEEVEEPassType renderpass_type,
|
||||
int aov_index)
|
||||
@@ -467,7 +468,7 @@ void EEVEE_renderpasses_draw_debug(EEVEE_Data *vedata)
|
||||
EEVEE_StorageList *stl = vedata->stl;
|
||||
EEVEE_EffectsInfo *effects = stl->effects;
|
||||
|
||||
GPUTexture *tx = NULL;
|
||||
GPUTexture *tx = nullptr;
|
||||
/* Debug : Output buffer to view. */
|
||||
switch (G.debug_value) {
|
||||
case 1:
|
||||
@@ -33,7 +33,7 @@ int EEVEE_screen_raytrace_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
if (scene_eval->eevee.flag & SCE_EEVEE_SSR_ENABLED) {
|
||||
const bool use_refraction = (scene_eval->eevee.flag & SCE_EEVEE_SSR_REFRACTION) != 0;
|
||||
|
||||
const bool is_persp = DRW_view_is_persp_get(NULL);
|
||||
const bool is_persp = DRW_view_is_persp_get(nullptr);
|
||||
if (effects->ssr_was_persp != is_persp) {
|
||||
effects->ssr_was_persp = is_persp;
|
||||
DRW_viewport_request_redraw();
|
||||
@@ -75,15 +75,15 @@ int EEVEE_screen_raytrace_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
/* MRT for the shading pass in order to output needed data for the SSR pass. */
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT;
|
||||
effects->ssr_specrough_input = DRW_texture_pool_query_2d_ex(
|
||||
UNPACK2(size_fs), format, usage, owner);
|
||||
UNPACK2(size_fs), format, usage, static_cast<DrawEngineType *>(owner));
|
||||
|
||||
GPU_framebuffer_texture_attach(fbl->main_fb, effects->ssr_specrough_input, 2, 0);
|
||||
|
||||
/* Ray-tracing output. */
|
||||
effects->ssr_hit_output = DRW_texture_pool_query_2d_ex(
|
||||
UNPACK2(tracing_res), GPU_RGBA16F, usage, owner);
|
||||
UNPACK2(tracing_res), GPU_RGBA16F, usage, static_cast<DrawEngineType *>(owner));
|
||||
effects->ssr_hit_depth = DRW_texture_pool_query_2d_ex(
|
||||
UNPACK2(tracing_res), GPU_R16F, usage, owner);
|
||||
UNPACK2(tracing_res), GPU_R16F, usage, static_cast<DrawEngineType *>(owner));
|
||||
|
||||
GPU_framebuffer_ensure_config(&fbl->screen_tracing_fb,
|
||||
{
|
||||
@@ -104,8 +104,8 @@ int EEVEE_screen_raytrace_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
|
||||
/* Cleanup to release memory */
|
||||
GPU_FRAMEBUFFER_FREE_SAFE(fbl->screen_tracing_fb);
|
||||
effects->ssr_specrough_input = NULL;
|
||||
effects->ssr_hit_output = NULL;
|
||||
effects->ssr_specrough_input = nullptr;
|
||||
effects->ssr_hit_output = nullptr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -151,12 +151,13 @@ void EEVEE_screen_raytrace_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *v
|
||||
DRW_shgroup_uniform_block(grp, "planar_block", sldata->planar_ubo);
|
||||
DRW_shgroup_uniform_block(grp, "common_block", sldata->common_ubo);
|
||||
DRW_shgroup_uniform_block(grp, "renderpass_block", sldata->renderpass_ubo.combined);
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "targetSize", (float[2]){hitbuf_size[0], hitbuf_size[1]});
|
||||
DRW_shgroup_uniform_vec2_copy(
|
||||
grp, "targetSize", blender::float2{float(hitbuf_size[0]), float(hitbuf_size[1])});
|
||||
DRW_shgroup_uniform_float_copy(
|
||||
grp, "randomScale", effects->reflection_trace_full ? 0.0f : 0.5f);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
|
||||
GPUSamplerState no_filter = GPU_SAMPLER_DEFAULT;
|
||||
GPUSamplerState no_filter = GPUSamplerState::default_sampler();
|
||||
|
||||
if (effects->use_split_ssr_pass) {
|
||||
/* Prepare passes for split reflections resolve variant. */
|
||||
@@ -195,7 +196,7 @@ void EEVEE_screen_raytrace_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *v
|
||||
DRW_shgroup_uniform_block(grp, "renderpass_block", sldata->renderpass_ubo.combined);
|
||||
DRW_shgroup_uniform_int(grp, "samplePoolOffset", &effects->taa_current_sample, 1);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "horizonBuffer", &effects->gtao_horizons);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
}
|
||||
else {
|
||||
@@ -224,12 +225,12 @@ void EEVEE_screen_raytrace_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *v
|
||||
DRW_shgroup_uniform_block(grp, "renderpass_block", sldata->renderpass_ubo.combined);
|
||||
DRW_shgroup_uniform_int(grp, "samplePoolOffset", &effects->taa_current_sample, 1);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "horizonBuffer", &effects->gtao_horizons);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void EEVEE_refraction_compute(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
|
||||
void EEVEE_refraction_compute(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data *vedata)
|
||||
{
|
||||
EEVEE_FramebufferList *fbl = vedata->fbl;
|
||||
EEVEE_TextureList *txl = vedata->txl;
|
||||
@@ -244,7 +245,7 @@ void EEVEE_refraction_compute(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *v
|
||||
}
|
||||
}
|
||||
|
||||
void EEVEE_reflection_compute(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
|
||||
void EEVEE_reflection_compute(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data *vedata)
|
||||
{
|
||||
EEVEE_PassList *psl = vedata->psl;
|
||||
EEVEE_FramebufferList *fbl = vedata->fbl;
|
||||
@@ -278,7 +279,7 @@ void EEVEE_reflection_compute(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *v
|
||||
}
|
||||
}
|
||||
|
||||
void EEVEE_reflection_output_init(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
void EEVEE_reflection_output_init(EEVEE_ViewLayerData * /*sldata*/,
|
||||
EEVEE_Data *vedata,
|
||||
uint tot_samples)
|
||||
{
|
||||
@@ -287,13 +288,13 @@ void EEVEE_reflection_output_init(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
|
||||
/* Create FrameBuffer. */
|
||||
const eGPUTextureFormat texture_format = (tot_samples > 256) ? GPU_RGBA32F : GPU_RGBA16F;
|
||||
DRW_texture_ensure_fullscreen_2d(&txl->ssr_accum, texture_format, 0);
|
||||
DRW_texture_ensure_fullscreen_2d(&txl->ssr_accum, texture_format, DRWTextureFlag(0));
|
||||
|
||||
GPU_framebuffer_ensure_config(&fbl->ssr_accum_fb,
|
||||
{GPU_ATTACHMENT_NONE, GPU_ATTACHMENT_TEXTURE(txl->ssr_accum)});
|
||||
}
|
||||
|
||||
void EEVEE_reflection_output_accumulate(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
|
||||
void EEVEE_reflection_output_accumulate(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data *vedata)
|
||||
{
|
||||
EEVEE_FramebufferList *fbl = vedata->fbl;
|
||||
EEVEE_PassList *psl = vedata->psl;
|
||||
@@ -34,13 +34,15 @@ void EEVEE_shadows_init(EEVEE_ViewLayerData *sldata)
|
||||
const Scene *scene_eval = DEG_get_evaluated_scene(draw_ctx->depsgraph);
|
||||
|
||||
if (!sldata->lights) {
|
||||
sldata->lights = MEM_callocN(sizeof(EEVEE_LightsInfo), "EEVEE_LightsInfo");
|
||||
sldata->light_ubo = GPU_uniformbuf_create_ex(sizeof(EEVEE_Light) * MAX_LIGHT, NULL, "evLight");
|
||||
sldata->shadow_ubo = GPU_uniformbuf_create_ex(shadow_ubo_size, NULL, "evShadow");
|
||||
sldata->lights = static_cast<EEVEE_LightsInfo *>(
|
||||
MEM_callocN(sizeof(EEVEE_LightsInfo), "EEVEE_LightsInfo"));
|
||||
sldata->light_ubo = GPU_uniformbuf_create_ex(
|
||||
sizeof(EEVEE_Light) * MAX_LIGHT, nullptr, "evLight");
|
||||
sldata->shadow_ubo = GPU_uniformbuf_create_ex(shadow_ubo_size, nullptr, "evShadow");
|
||||
|
||||
for (int i = 0; i < 2; i++) {
|
||||
sldata->shcasters_buffers[i].bbox = MEM_mallocN(
|
||||
sizeof(EEVEE_BoundBox) * SH_CASTER_ALLOC_CHUNK, __func__);
|
||||
sldata->shcasters_buffers[i].bbox = static_cast<EEVEE_BoundBox *>(
|
||||
MEM_mallocN(sizeof(EEVEE_BoundBox) * SH_CASTER_ALLOC_CHUNK, __func__));
|
||||
sldata->shcasters_buffers[i].update = BLI_BITMAP_NEW(SH_CASTER_ALLOC_CHUNK, __func__);
|
||||
sldata->shcasters_buffers[i].alloc_count = SH_CASTER_ALLOC_CHUNK;
|
||||
sldata->shcasters_buffers[i].count = 0;
|
||||
@@ -122,8 +124,8 @@ void EEVEE_shadows_caster_register(EEVEE_ViewLayerData *sldata, Object *ob)
|
||||
if (id >= frontbuffer->alloc_count) {
|
||||
/* Double capacity to prevent exponential slowdown. */
|
||||
frontbuffer->alloc_count *= 2;
|
||||
frontbuffer->bbox = MEM_reallocN(frontbuffer->bbox,
|
||||
sizeof(EEVEE_BoundBox) * frontbuffer->alloc_count);
|
||||
frontbuffer->bbox = static_cast<EEVEE_BoundBox *>(
|
||||
MEM_reallocN(frontbuffer->bbox, sizeof(EEVEE_BoundBox) * frontbuffer->alloc_count));
|
||||
BLI_BITMAP_RESIZE(frontbuffer->update, frontbuffer->alloc_count);
|
||||
}
|
||||
|
||||
@@ -143,7 +145,7 @@ void EEVEE_shadows_caster_register(EEVEE_ViewLayerData *sldata, Object *ob)
|
||||
update = oedata->need_update;
|
||||
|
||||
/* Always update shadow buffers in sculpt modes. */
|
||||
update |= ob->sculpt != NULL;
|
||||
update |= ob->sculpt != nullptr;
|
||||
|
||||
oedata->need_update = false;
|
||||
}
|
||||
@@ -217,13 +219,14 @@ void EEVEE_shadows_update(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
|
||||
eGPUTextureUsage shadow_usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_SHADER_READ;
|
||||
if (!sldata->shadow_cube_pool) {
|
||||
sldata->shadow_cube_pool = DRW_texture_create_2d_array_ex(linfo->shadow_cube_size,
|
||||
linfo->shadow_cube_size,
|
||||
max_ii(1, linfo->num_cube_layer * 6),
|
||||
shadow_pool_format,
|
||||
shadow_usage,
|
||||
DRW_TEX_FILTER | DRW_TEX_COMPARE,
|
||||
NULL);
|
||||
sldata->shadow_cube_pool = DRW_texture_create_2d_array_ex(
|
||||
linfo->shadow_cube_size,
|
||||
linfo->shadow_cube_size,
|
||||
max_ii(1, linfo->num_cube_layer * 6),
|
||||
shadow_pool_format,
|
||||
shadow_usage,
|
||||
DRWTextureFlag(DRW_TEX_FILTER | DRW_TEX_COMPARE),
|
||||
nullptr);
|
||||
}
|
||||
|
||||
if (!sldata->shadow_cascade_pool) {
|
||||
@@ -233,11 +236,11 @@ void EEVEE_shadows_update(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
max_ii(1, linfo->num_cascade_layer),
|
||||
shadow_pool_format,
|
||||
shadow_usage,
|
||||
DRW_TEX_FILTER | DRW_TEX_COMPARE,
|
||||
NULL);
|
||||
DRWTextureFlag(DRW_TEX_FILTER | DRW_TEX_COMPARE),
|
||||
nullptr);
|
||||
}
|
||||
|
||||
if (sldata->shadow_fb == NULL) {
|
||||
if (sldata->shadow_fb == nullptr) {
|
||||
sldata->shadow_fb = GPU_framebuffer_create("shadow_fb");
|
||||
}
|
||||
|
||||
@@ -286,8 +289,8 @@ void EEVEE_shadows_update(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
frontbuffer->alloc_count = divide_ceil_u(max_ii(1, frontbuffer->count),
|
||||
SH_CASTER_ALLOC_CHUNK) *
|
||||
SH_CASTER_ALLOC_CHUNK;
|
||||
frontbuffer->bbox = MEM_reallocN(frontbuffer->bbox,
|
||||
sizeof(EEVEE_BoundBox) * frontbuffer->alloc_count);
|
||||
frontbuffer->bbox = static_cast<EEVEE_BoundBox *>(
|
||||
MEM_reallocN(frontbuffer->bbox, sizeof(EEVEE_BoundBox) * frontbuffer->alloc_count));
|
||||
BLI_BITMAP_RESIZE(frontbuffer->update, frontbuffer->alloc_count);
|
||||
}
|
||||
}
|
||||
@@ -347,7 +350,7 @@ void EEVEE_shadows_draw(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata, DRWView
|
||||
|
||||
void EEVEE_shadow_output_init(EEVEE_ViewLayerData *sldata,
|
||||
EEVEE_Data *vedata,
|
||||
uint UNUSED(tot_samples))
|
||||
uint /*tot_samples*/)
|
||||
{
|
||||
EEVEE_FramebufferList *fbl = vedata->fbl;
|
||||
EEVEE_TextureList *txl = vedata->txl;
|
||||
@@ -356,7 +359,7 @@ void EEVEE_shadow_output_init(EEVEE_ViewLayerData *sldata,
|
||||
|
||||
/* Create FrameBuffer. */
|
||||
const eGPUTextureFormat texture_format = GPU_R32F;
|
||||
DRW_texture_ensure_fullscreen_2d(&txl->shadow_accum, texture_format, 0);
|
||||
DRW_texture_ensure_fullscreen_2d(&txl->shadow_accum, texture_format, DRWTextureFlag(0));
|
||||
|
||||
GPU_framebuffer_ensure_config(&fbl->shadow_accum_fb,
|
||||
{GPU_ATTACHMENT_NONE, GPU_ATTACHMENT_TEXTURE(txl->shadow_accum)});
|
||||
@@ -378,16 +381,16 @@ void EEVEE_shadow_output_init(EEVEE_ViewLayerData *sldata,
|
||||
DRW_shgroup_uniform_texture_ref(grp, "shadowCubeTexture", &sldata->shadow_cube_pool);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "shadowCascadeTexture", &sldata->shadow_cascade_pool);
|
||||
|
||||
DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), NULL);
|
||||
DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), nullptr);
|
||||
}
|
||||
|
||||
void EEVEE_shadow_output_accumulate(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
|
||||
void EEVEE_shadow_output_accumulate(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data *vedata)
|
||||
{
|
||||
EEVEE_FramebufferList *fbl = vedata->fbl;
|
||||
EEVEE_PassList *psl = vedata->psl;
|
||||
EEVEE_EffectsInfo *effects = vedata->stl->effects;
|
||||
|
||||
if (fbl->shadow_accum_fb != NULL) {
|
||||
if (fbl->shadow_accum_fb != nullptr) {
|
||||
GPU_framebuffer_bind(fbl->shadow_accum_fb);
|
||||
|
||||
/* Clear texture. */
|
||||
@@ -51,7 +51,7 @@ static void shadow_cascade_random_matrix_set(float mat[4][4], float radius, int
|
||||
EEVEE_sample_ellipse(i, mat[0], mat[1], radius, radius, jitter);
|
||||
float p[3];
|
||||
add_v3_v3v3(p, jitter, mat[2]);
|
||||
DRW_debug_sphere(p, 0.01f, (float[4]){1.0f, (sample_ofs == i) ? 1.0f : 0.0f, 0.0f, 1.0f});
|
||||
DRW_debug_sphere(p, 0.01f, blender::float4{1.0f, (sample_ofs == i) ? 1.0f : 0.0f, 0.0f, 1.0f});
|
||||
}
|
||||
#endif
|
||||
add_v3_v3(mat[2], jitter);
|
||||
@@ -372,11 +372,12 @@ static void eevee_ensure_cascade_views(EEVEE_ShadowCascadeRender *csm_render,
|
||||
DRWView *view[MAX_CASCADE_NUM])
|
||||
{
|
||||
for (int i = 0; i < csm_render->cascade_count; i++) {
|
||||
if (view[i] == NULL) {
|
||||
view[i] = DRW_view_create(csm_render->viewmat, csm_render->projmat[i], NULL, NULL, NULL);
|
||||
if (view[i] == nullptr) {
|
||||
view[i] = DRW_view_create(
|
||||
csm_render->viewmat, csm_render->projmat[i], nullptr, nullptr, nullptr);
|
||||
}
|
||||
else {
|
||||
DRW_view_update(view[i], csm_render->viewmat, csm_render->projmat[i], NULL, NULL);
|
||||
DRW_view_update(view[i], csm_render->viewmat, csm_render->projmat[i], nullptr, nullptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -73,7 +73,7 @@ start:
|
||||
#ifdef DEBUG_SHADOW_DISTRIBUTION
|
||||
float p[3];
|
||||
add_v3_v3v3(p, jitter, ws_sample_pos);
|
||||
DRW_debug_sphere(p, 0.01f, (float[4]){1.0f, (sample_ofs == i) ? 1.0f : 0.0f, 0.0f, 1.0f});
|
||||
DRW_debug_sphere(p, 0.01f, blender::float4{1.0f, (sample_ofs == i) ? 1.0f : 0.0f, 0.0f, 1.0f});
|
||||
if (i++ < sample_ofs) {
|
||||
goto start;
|
||||
}
|
||||
@@ -141,11 +141,11 @@ static void eevee_ensure_cube_views(
|
||||
float tmp[4][4];
|
||||
mul_m4_m4m4(tmp, cubefacemat[i], viewmat);
|
||||
|
||||
if (view[i] == NULL) {
|
||||
view[i] = DRW_view_create(tmp, winmat, NULL, NULL, NULL);
|
||||
if (view[i] == nullptr) {
|
||||
view[i] = DRW_view_create(tmp, winmat, nullptr, nullptr, nullptr);
|
||||
}
|
||||
else {
|
||||
DRW_view_update(view[i], tmp, winmat, NULL, NULL);
|
||||
DRW_view_update(view[i], tmp, winmat, nullptr, nullptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -20,7 +20,7 @@
|
||||
|
||||
#include "eevee_private.h"
|
||||
|
||||
void EEVEE_subsurface_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *UNUSED(vedata)) {}
|
||||
void EEVEE_subsurface_init(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data * /*vedata*/) {}
|
||||
|
||||
void EEVEE_subsurface_draw_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
{
|
||||
@@ -93,16 +93,16 @@ void EEVEE_subsurface_draw_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
GPU_FRAMEBUFFER_FREE_SAFE(fbl->sss_clear_fb);
|
||||
GPU_FRAMEBUFFER_FREE_SAFE(fbl->sss_accum_fb);
|
||||
DRW_TEXTURE_FREE_SAFE(txl->sss_accum);
|
||||
effects->sss_stencil = NULL;
|
||||
effects->sss_blur = NULL;
|
||||
effects->sss_irradiance = NULL;
|
||||
effects->sss_radius = NULL;
|
||||
effects->sss_stencil = nullptr;
|
||||
effects->sss_blur = nullptr;
|
||||
effects->sss_irradiance = nullptr;
|
||||
effects->sss_radius = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void EEVEE_subsurface_output_init(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
void EEVEE_subsurface_output_init(EEVEE_ViewLayerData * /*sldata*/,
|
||||
EEVEE_Data *vedata,
|
||||
uint UNUSED(tot_samples))
|
||||
uint /*tot_samples*/)
|
||||
{
|
||||
EEVEE_FramebufferList *fbl = vedata->fbl;
|
||||
EEVEE_TextureList *txl = vedata->txl;
|
||||
@@ -110,8 +110,8 @@ void EEVEE_subsurface_output_init(EEVEE_ViewLayerData *UNUSED(sldata),
|
||||
EEVEE_EffectsInfo *effects = stl->effects;
|
||||
|
||||
const eGPUTextureFormat texture_format_light = GPU_RGBA32F;
|
||||
const bool texture_created = txl->sss_accum == NULL;
|
||||
DRW_texture_ensure_fullscreen_2d(&txl->sss_accum, texture_format_light, 0);
|
||||
const bool texture_created = txl->sss_accum == nullptr;
|
||||
DRW_texture_ensure_fullscreen_2d(&txl->sss_accum, texture_format_light, DRWTextureFlag(0));
|
||||
|
||||
GPUTexture *stencil_tex = effects->sss_stencil;
|
||||
|
||||
@@ -172,7 +172,7 @@ void EEVEE_subsurface_add_pass(EEVEE_ViewLayerData *sldata,
|
||||
DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
|
||||
GPUTexture **depth_src = GPU_depth_blitting_workaround() ? &effects->sss_stencil : &dtxl->depth;
|
||||
|
||||
GPUTexture *sss_tex_profile = NULL;
|
||||
GPUTexture *sss_tex_profile = nullptr;
|
||||
GPUUniformBuf *sss_profile = GPU_material_sss_profile_get(
|
||||
gpumat, stl->effects->sss_sample_count, &sss_tex_profile);
|
||||
|
||||
@@ -193,7 +193,7 @@ void EEVEE_subsurface_add_pass(EEVEE_ViewLayerData *sldata,
|
||||
DRW_shgroup_stencil_mask(shgrp, sss_id);
|
||||
|
||||
{
|
||||
GPUSamplerState state = GPU_SAMPLER_DEFAULT;
|
||||
GPUSamplerState state = GPUSamplerState::default_sampler();
|
||||
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(EEVEE_shaders_subsurface_first_pass_sh_get(),
|
||||
psl->sss_blur_ps);
|
||||
@@ -205,7 +205,7 @@ void EEVEE_subsurface_add_pass(EEVEE_ViewLayerData *sldata,
|
||||
DRW_shgroup_uniform_block(grp, "common_block", sldata->common_ubo);
|
||||
DRW_shgroup_uniform_block(grp, "renderpass_block", sldata->renderpass_ubo.combined);
|
||||
DRW_shgroup_stencil_mask(grp, sss_id);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
|
||||
grp = DRW_shgroup_create(EEVEE_shaders_subsurface_second_pass_sh_get(), psl->sss_resolve_ps);
|
||||
DRW_shgroup_uniform_texture(grp, "utilTex", EEVEE_materials_get_util_tex());
|
||||
@@ -217,7 +217,7 @@ void EEVEE_subsurface_add_pass(EEVEE_ViewLayerData *sldata,
|
||||
DRW_shgroup_uniform_block(grp, "common_block", sldata->common_ubo);
|
||||
DRW_shgroup_uniform_block(grp, "renderpass_block", sldata->renderpass_ubo.combined);
|
||||
DRW_shgroup_stencil_mask(grp, sss_id);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
|
||||
if (ma->blend_flag & MA_BL_TRANSLUCENCY) {
|
||||
@@ -235,11 +235,11 @@ void EEVEE_subsurface_add_pass(EEVEE_ViewLayerData *sldata,
|
||||
DRW_shgroup_uniform_block(grp, "common_block", sldata->common_ubo);
|
||||
DRW_shgroup_uniform_block(grp, "renderpass_block", sldata->renderpass_ubo.combined);
|
||||
DRW_shgroup_stencil_mask(grp, sss_id);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
}
|
||||
|
||||
void EEVEE_subsurface_data_render(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
|
||||
void EEVEE_subsurface_data_render(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data *vedata)
|
||||
{
|
||||
EEVEE_PassList *psl = vedata->psl;
|
||||
EEVEE_FramebufferList *fbl = vedata->fbl;
|
||||
@@ -327,14 +327,14 @@ void EEVEE_subsurface_compute(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
}
|
||||
}
|
||||
|
||||
void EEVEE_subsurface_output_accumulate(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
|
||||
void EEVEE_subsurface_output_accumulate(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data *vedata)
|
||||
{
|
||||
EEVEE_PassList *psl = vedata->psl;
|
||||
EEVEE_FramebufferList *fbl = vedata->fbl;
|
||||
EEVEE_StorageList *stl = vedata->stl;
|
||||
EEVEE_EffectsInfo *effects = stl->effects;
|
||||
|
||||
if (((effects->enabled_effects & EFFECT_SSS) != 0) && (fbl->sss_accum_fb != NULL)) {
|
||||
if (((effects->enabled_effects & EFFECT_SSS) != 0) && (fbl->sss_accum_fb != nullptr)) {
|
||||
/* Copy stencil channel, could be avoided (see EEVEE_subsurface_init) */
|
||||
GPU_framebuffer_blit(fbl->main_fb, 0, fbl->sss_accum_fb, 0, GPU_STENCIL_BIT);
|
||||
|
||||
@@ -27,7 +27,7 @@ static struct {
|
||||
float inverted_cdf[FILTER_CDF_TABLE_SIZE];
|
||||
} e_data = {false}; /* Engine data */
|
||||
|
||||
static float UNUSED_FUNCTION(filter_box)(float UNUSED(x))
|
||||
static float UNUSED_FUNCTION(filter_box)(float /*x*/)
|
||||
{
|
||||
return 1.0f;
|
||||
}
|
||||
@@ -92,7 +92,8 @@ static float eval_table(const float *table, float x)
|
||||
|
||||
static void eevee_create_cdf_table_temporal_sampling(void)
|
||||
{
|
||||
float *cdf_table = MEM_mallocN(sizeof(float) * FILTER_CDF_TABLE_SIZE, "Eevee Filter CDF table");
|
||||
float *cdf_table = static_cast<float *>(
|
||||
MEM_mallocN(sizeof(float) * FILTER_CDF_TABLE_SIZE, "Eevee Filter CDF table"));
|
||||
|
||||
float filter_width = 2.0f; /* Use a 2 pixel footprint by default. */
|
||||
|
||||
@@ -129,10 +130,10 @@ void EEVEE_temporal_sampling_matrices_calc(EEVEE_EffectsInfo *effects, const dou
|
||||
RenderData *rd = &scene->r;
|
||||
|
||||
float persmat[4][4], viewmat[4][4], winmat[4][4], wininv[4][4];
|
||||
DRW_view_persmat_get(NULL, persmat, false);
|
||||
DRW_view_viewmat_get(NULL, viewmat, false);
|
||||
DRW_view_winmat_get(NULL, winmat, false);
|
||||
DRW_view_winmat_get(NULL, wininv, true);
|
||||
DRW_view_persmat_get(nullptr, persmat, false);
|
||||
DRW_view_viewmat_get(nullptr, viewmat, false);
|
||||
DRW_view_winmat_get(nullptr, winmat, false);
|
||||
DRW_view_winmat_get(nullptr, wininv, true);
|
||||
|
||||
float ofs[2];
|
||||
EEVEE_temporal_sampling_offset_calc(ht_point, rd->gauss, ofs);
|
||||
@@ -176,7 +177,7 @@ void EEVEE_temporal_sampling_matrices_calc(EEVEE_EffectsInfo *effects, const dou
|
||||
}
|
||||
}
|
||||
|
||||
BLI_assert(effects->taa_view != NULL);
|
||||
BLI_assert(effects->taa_view != nullptr);
|
||||
|
||||
/* When rendering just update the view. This avoids recomputing the culling. */
|
||||
DRW_view_update_sub(effects->taa_view, viewmat, winmat);
|
||||
@@ -213,7 +214,7 @@ void EEVEE_temporal_sampling_create_view(EEVEE_Data *vedata)
|
||||
DRW_view_viewmat_get(default_view, viewmat, false);
|
||||
DRW_view_winmat_get(default_view, winmat, false);
|
||||
effects->taa_view = DRW_view_create_sub(default_view, viewmat, winmat);
|
||||
DRW_view_clip_planes_set(effects->taa_view, NULL, 0);
|
||||
DRW_view_clip_planes_set(effects->taa_view, nullptr, 0);
|
||||
}
|
||||
|
||||
int EEVEE_temporal_sampling_sample_count_get(const Scene *scene, const EEVEE_StorageList *stl)
|
||||
@@ -226,12 +227,13 @@ int EEVEE_temporal_sampling_sample_count_get(const Scene *scene, const EEVEE_Sto
|
||||
sample_count = (sample_count == 0) ? TAA_MAX_SAMPLE : sample_count;
|
||||
sample_count = divide_ceil_u(sample_count, timesteps);
|
||||
|
||||
int dof_sample_count = EEVEE_depth_of_field_sample_count_get(stl->effects, sample_count, NULL);
|
||||
int dof_sample_count = EEVEE_depth_of_field_sample_count_get(
|
||||
stl->effects, sample_count, nullptr);
|
||||
sample_count = dof_sample_count * divide_ceil_u(sample_count, dof_sample_count);
|
||||
return sample_count;
|
||||
}
|
||||
|
||||
int EEVEE_temporal_sampling_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
|
||||
int EEVEE_temporal_sampling_init(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data *vedata)
|
||||
{
|
||||
EEVEE_StorageList *stl = vedata->stl;
|
||||
EEVEE_EffectsInfo *effects = stl->effects;
|
||||
@@ -270,9 +272,9 @@ int EEVEE_temporal_sampling_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data
|
||||
|
||||
view_is_valid = view_is_valid && (stl->g_data->view_updated == false);
|
||||
|
||||
if (draw_ctx->evil_C != NULL) {
|
||||
if (draw_ctx->evil_C != nullptr) {
|
||||
struct wmWindowManager *wm = CTX_wm_manager(draw_ctx->evil_C);
|
||||
view_is_valid = view_is_valid && (ED_screen_animation_no_scrub(wm) == NULL);
|
||||
view_is_valid = view_is_valid && (ED_screen_animation_no_scrub(wm) == nullptr);
|
||||
}
|
||||
|
||||
effects->taa_total_sample = EEVEE_temporal_sampling_sample_count_get(scene_eval, stl);
|
||||
@@ -284,7 +286,7 @@ int EEVEE_temporal_sampling_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data
|
||||
|
||||
/* Motion blur steps could reset the sampling when camera is animated (see #79970). */
|
||||
if (!DRW_state_is_scene_render()) {
|
||||
DRW_view_persmat_get(NULL, persmat, false);
|
||||
DRW_view_persmat_get(nullptr, persmat, false);
|
||||
view_is_valid = view_is_valid && compare_m4m4(persmat, effects->prev_drw_persmat, FLT_MIN);
|
||||
}
|
||||
|
||||
@@ -355,7 +357,7 @@ void EEVEE_temporal_sampling_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data
|
||||
else {
|
||||
DRW_shgroup_uniform_float(grp, "alpha", &effects->taa_alpha, 1);
|
||||
}
|
||||
DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), NULL);
|
||||
DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -421,6 +423,6 @@ void EEVEE_temporal_sampling_draw(EEVEE_Data *vedata)
|
||||
}
|
||||
}
|
||||
|
||||
DRW_view_persmat_get(NULL, effects->prev_drw_persmat, false);
|
||||
DRW_view_persmat_get(nullptr, effects->prev_drw_persmat, false);
|
||||
}
|
||||
}
|
||||
@@ -45,7 +45,7 @@ static struct {
|
||||
|
||||
GPUTexture *dummy_scatter;
|
||||
GPUTexture *dummy_transmit;
|
||||
} e_data = {NULL}; /* Engine data */
|
||||
} e_data = {nullptr}; /* Engine data */
|
||||
|
||||
void EEVEE_volumes_set_jitter(EEVEE_ViewLayerData *sldata, uint current_sample)
|
||||
{
|
||||
@@ -120,7 +120,7 @@ void EEVEE_volumes_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
}
|
||||
|
||||
/* Like frostbite's paper, 5% blend of the new frame. */
|
||||
common_data->vol_history_alpha = (txl->volume_prop_scattering == NULL) ? 0.0f : 0.95f;
|
||||
common_data->vol_history_alpha = (txl->volume_prop_scattering == nullptr) ? 0.0f : 0.95f;
|
||||
|
||||
/* Temporal Super sampling jitter */
|
||||
uint ht_primes[3] = {3, 7, 2};
|
||||
@@ -129,9 +129,9 @@ void EEVEE_volumes_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
/* If TAA is in use do not use the history buffer. */
|
||||
bool do_taa = ((effects->enabled_effects & EFFECT_TAA) != 0);
|
||||
|
||||
if (draw_ctx->evil_C != NULL) {
|
||||
if (draw_ctx->evil_C != nullptr) {
|
||||
struct wmWindowManager *wm = CTX_wm_manager(draw_ctx->evil_C);
|
||||
do_taa = do_taa && (ED_screen_animation_no_scrub(wm) == NULL);
|
||||
do_taa = do_taa && (ED_screen_animation_no_scrub(wm) == nullptr);
|
||||
}
|
||||
|
||||
if (do_taa) {
|
||||
@@ -158,11 +158,11 @@ void EEVEE_volumes_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
common_data->vol_shadow_steps = 0;
|
||||
}
|
||||
|
||||
if (DRW_view_is_persp_get(NULL)) {
|
||||
if (DRW_view_is_persp_get(nullptr)) {
|
||||
float sample_distribution = scene_eval->eevee.volumetric_sample_distribution;
|
||||
sample_distribution = 4.0f * max_ff(1.0f - sample_distribution, 1e-2f);
|
||||
|
||||
const float clip_start = DRW_view_near_distance_get(NULL);
|
||||
const float clip_start = DRW_view_near_distance_get(nullptr);
|
||||
/* Negate */
|
||||
float near = integration_start = min_ff(-integration_start, clip_start - 1e-4f);
|
||||
float far = integration_end = min_ff(-integration_end, near - 1e-4f);
|
||||
@@ -173,8 +173,8 @@ void EEVEE_volumes_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
common_data->vol_depth_param[2] = sample_distribution;
|
||||
}
|
||||
else {
|
||||
const float clip_start = DRW_view_near_distance_get(NULL);
|
||||
const float clip_end = DRW_view_far_distance_get(NULL);
|
||||
const float clip_start = DRW_view_near_distance_get(nullptr);
|
||||
const float clip_end = DRW_view_far_distance_get(nullptr);
|
||||
integration_start = min_ff(integration_end, clip_start);
|
||||
integration_end = max_ff(-integration_end, clip_end);
|
||||
|
||||
@@ -211,7 +211,7 @@ void EEVEE_volumes_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
|
||||
const DRWContextState *draw_ctx = DRW_context_state_get();
|
||||
Scene *scene = draw_ctx->scene;
|
||||
DRWShadingGroup *grp = NULL;
|
||||
DRWShadingGroup *grp = nullptr;
|
||||
|
||||
/* Quick breakdown of the Volumetric rendering:
|
||||
*
|
||||
@@ -245,9 +245,10 @@ void EEVEE_volumes_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
|
||||
/* World Volumetric */
|
||||
World *wo = scene->world;
|
||||
if (wo != NULL && wo->use_nodes && wo->nodetree && !LOOK_DEV_STUDIO_LIGHT_ENABLED(draw_ctx->v3d))
|
||||
if (wo != nullptr && wo->use_nodes && wo->nodetree &&
|
||||
!LOOK_DEV_STUDIO_LIGHT_ENABLED(draw_ctx->v3d))
|
||||
{
|
||||
GPUMaterial *mat = EEVEE_material_get(vedata, scene, NULL, wo, VAR_MAT_VOLUME);
|
||||
GPUMaterial *mat = EEVEE_material_get(vedata, scene, nullptr, wo, VAR_MAT_VOLUME);
|
||||
|
||||
if (mat && GPU_material_has_volume_output(mat)) {
|
||||
grp = DRW_shgroup_material_create(mat, psl->volumetric_world_ps);
|
||||
@@ -264,15 +265,15 @@ void EEVEE_volumes_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
DRW_shgroup_uniform_block(grp, "renderpass_block", sldata->renderpass_ubo.combined);
|
||||
|
||||
/* Fix principle volumetric not working with world materials. */
|
||||
grp = DRW_shgroup_volume_create_sub(NULL, NULL, grp, mat);
|
||||
grp = DRW_shgroup_volume_create_sub(nullptr, nullptr, grp, mat);
|
||||
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, common_data->vol_tex_size[2]);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, common_data->vol_tex_size[2]);
|
||||
|
||||
effects->enabled_effects |= (EFFECT_VOLUMETRIC | EFFECT_POST_BUFFER);
|
||||
}
|
||||
}
|
||||
|
||||
if (grp == NULL) {
|
||||
if (grp == nullptr) {
|
||||
/* If no world or volume material is present just clear the buffer with this drawcall */
|
||||
grp = DRW_shgroup_create(EEVEE_shaders_volumes_clear_sh_get(), psl->volumetric_world_ps);
|
||||
DRW_shgroup_uniform_block(grp, "common_block", sldata->common_ubo);
|
||||
@@ -283,7 +284,7 @@ void EEVEE_volumes_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
DRW_shgroup_uniform_block(grp, "shadow_block", sldata->shadow_ubo);
|
||||
DRW_shgroup_uniform_block(grp, "renderpass_block", sldata->renderpass_ubo.combined);
|
||||
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, common_data->vol_tex_size[2]);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, common_data->vol_tex_size[2]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -294,7 +295,7 @@ void EEVEE_volumes_cache_object_add(EEVEE_ViewLayerData *sldata,
|
||||
{
|
||||
Material *ma = BKE_object_material_get_eval(ob, 1);
|
||||
|
||||
if (ma == NULL) {
|
||||
if (ma == nullptr) {
|
||||
if (ob->type == OB_VOLUME) {
|
||||
ma = BKE_material_default_volume();
|
||||
}
|
||||
@@ -312,15 +313,15 @@ void EEVEE_volumes_cache_object_add(EEVEE_ViewLayerData *sldata,
|
||||
}
|
||||
|
||||
int mat_options = VAR_MAT_VOLUME | VAR_MAT_MESH;
|
||||
GPUMaterial *mat = EEVEE_material_get(vedata, scene, ma, NULL, mat_options);
|
||||
GPUMaterial *mat = EEVEE_material_get(vedata, scene, ma, nullptr, mat_options);
|
||||
|
||||
/* If shader failed to compile or is currently compiling. */
|
||||
if (mat == NULL) {
|
||||
if (mat == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
GPUShader *sh = GPU_material_get_shader(mat);
|
||||
if (sh == NULL) {
|
||||
if (sh == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -330,7 +331,7 @@ void EEVEE_volumes_cache_object_add(EEVEE_ViewLayerData *sldata,
|
||||
|
||||
grp = DRW_shgroup_volume_create_sub(scene, ob, grp, mat);
|
||||
|
||||
if (grp == NULL) {
|
||||
if (grp == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -382,7 +383,7 @@ void EEVEE_volumes_cache_finish(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
DRW_shgroup_uniform_block(grp, "probe_block", sldata->probe_ubo);
|
||||
DRW_shgroup_uniform_block(grp, "renderpass_block", sldata->renderpass_ubo.combined);
|
||||
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, common_data->vol_tex_size[2]);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, common_data->vol_tex_size[2]);
|
||||
|
||||
DRW_PASS_CREATE(psl->volumetric_integration_ps, DRW_STATE_WRITE_COLOR);
|
||||
grp = DRW_shgroup_create(EEVEE_shaders_volumes_integration_sh_get(),
|
||||
@@ -398,7 +399,7 @@ void EEVEE_volumes_cache_finish(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
}
|
||||
|
||||
DRW_shgroup_call_procedural_triangles(
|
||||
grp, NULL, USE_VOLUME_OPTI ? 1 : common_data->vol_tex_size[2]);
|
||||
grp, nullptr, USE_VOLUME_OPTI ? 1 : common_data->vol_tex_size[2]);
|
||||
|
||||
DRW_PASS_CREATE(psl->volumetric_resolve_ps, DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_CUSTOM);
|
||||
grp = DRW_shgroup_create(EEVEE_shaders_volumes_resolve_sh_get(false),
|
||||
@@ -412,7 +413,7 @@ void EEVEE_volumes_cache_finish(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
DRW_shgroup_uniform_block(grp, "renderpass_block", sldata->renderpass_ubo.combined);
|
||||
DRW_shgroup_uniform_block(grp, "shadow_block", sldata->shadow_ubo);
|
||||
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -426,18 +427,33 @@ void EEVEE_volumes_draw_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
if ((effects->enabled_effects & EFFECT_VOLUMETRIC) != 0) {
|
||||
int *tex_size = common_data->vol_tex_size;
|
||||
|
||||
if (txl->volume_prop_scattering == NULL) {
|
||||
if (txl->volume_prop_scattering == nullptr) {
|
||||
/* Volume properties: We evaluate all volumetric objects
|
||||
* and store their final properties into each froxel */
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_SHADER_READ;
|
||||
txl->volume_prop_scattering = DRW_texture_create_3d_ex(
|
||||
tex_size[0], tex_size[1], tex_size[2], GPU_R11F_G11F_B10F, usage, DRW_TEX_FILTER, NULL);
|
||||
txl->volume_prop_extinction = DRW_texture_create_3d_ex(
|
||||
tex_size[0], tex_size[1], tex_size[2], GPU_R11F_G11F_B10F, usage, DRW_TEX_FILTER, NULL);
|
||||
txl->volume_prop_emission = DRW_texture_create_3d_ex(
|
||||
tex_size[0], tex_size[1], tex_size[2], GPU_R11F_G11F_B10F, usage, DRW_TEX_FILTER, NULL);
|
||||
txl->volume_prop_scattering = DRW_texture_create_3d_ex(tex_size[0],
|
||||
tex_size[1],
|
||||
tex_size[2],
|
||||
GPU_R11F_G11F_B10F,
|
||||
usage,
|
||||
DRW_TEX_FILTER,
|
||||
nullptr);
|
||||
txl->volume_prop_extinction = DRW_texture_create_3d_ex(tex_size[0],
|
||||
tex_size[1],
|
||||
tex_size[2],
|
||||
GPU_R11F_G11F_B10F,
|
||||
usage,
|
||||
DRW_TEX_FILTER,
|
||||
nullptr);
|
||||
txl->volume_prop_emission = DRW_texture_create_3d_ex(tex_size[0],
|
||||
tex_size[1],
|
||||
tex_size[2],
|
||||
GPU_R11F_G11F_B10F,
|
||||
usage,
|
||||
DRW_TEX_FILTER,
|
||||
nullptr);
|
||||
txl->volume_prop_phase = DRW_texture_create_3d_ex(
|
||||
tex_size[0], tex_size[1], tex_size[2], GPU_RG16F, usage, DRW_TEX_FILTER, NULL);
|
||||
tex_size[0], tex_size[1], tex_size[2], GPU_RG16F, usage, DRW_TEX_FILTER, nullptr);
|
||||
|
||||
/* Volume scattering: We compute for each froxel the
|
||||
* Scattered light towards the view. We also resolve temporal
|
||||
@@ -450,14 +466,14 @@ void EEVEE_volumes_draw_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
GPU_R11F_G11F_B10F,
|
||||
usage_write,
|
||||
DRW_TEX_FILTER,
|
||||
NULL);
|
||||
nullptr);
|
||||
txl->volume_transmit = DRW_texture_create_3d_ex(tex_size[0],
|
||||
tex_size[1],
|
||||
tex_size[2],
|
||||
GPU_R11F_G11F_B10F,
|
||||
usage_write,
|
||||
DRW_TEX_FILTER,
|
||||
NULL);
|
||||
nullptr);
|
||||
|
||||
/* Final integration: We compute for each froxel the
|
||||
* amount of scattered light and extinction coefficient at this
|
||||
@@ -469,14 +485,14 @@ void EEVEE_volumes_draw_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
GPU_R11F_G11F_B10F,
|
||||
usage_write,
|
||||
DRW_TEX_FILTER,
|
||||
NULL);
|
||||
nullptr);
|
||||
txl->volume_transmit_history = DRW_texture_create_3d_ex(tex_size[0],
|
||||
tex_size[1],
|
||||
tex_size[2],
|
||||
GPU_R11F_G11F_B10F,
|
||||
usage_write,
|
||||
DRW_TEX_FILTER,
|
||||
NULL);
|
||||
nullptr);
|
||||
}
|
||||
|
||||
GPU_framebuffer_ensure_config(&fbl->volumetric_fb,
|
||||
@@ -558,7 +574,7 @@ void EEVEE_volumes_compute(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
|
||||
}
|
||||
}
|
||||
|
||||
void EEVEE_volumes_resolve(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
|
||||
void EEVEE_volumes_resolve(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data *vedata)
|
||||
{
|
||||
EEVEE_PassList *psl = vedata->psl;
|
||||
EEVEE_FramebufferList *fbl = vedata->fbl;
|
||||
@@ -608,8 +624,10 @@ void EEVEE_volumes_output_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata,
|
||||
|
||||
/* Should be enough precision for many samples. */
|
||||
const eGPUTextureFormat texture_format_accum = (tot_samples > 128) ? GPU_RGBA32F : GPU_RGBA16F;
|
||||
DRW_texture_ensure_fullscreen_2d(&txl->volume_scatter_accum, texture_format_accum, 0);
|
||||
DRW_texture_ensure_fullscreen_2d(&txl->volume_transmittance_accum, texture_format_accum, 0);
|
||||
DRW_texture_ensure_fullscreen_2d(
|
||||
&txl->volume_scatter_accum, texture_format_accum, DRWTextureFlag(0));
|
||||
DRW_texture_ensure_fullscreen_2d(
|
||||
&txl->volume_transmittance_accum, texture_format_accum, DRWTextureFlag(0));
|
||||
|
||||
GPU_framebuffer_ensure_config(&fbl->volumetric_accum_fb,
|
||||
{GPU_ATTACHMENT_NONE,
|
||||
@@ -618,7 +636,7 @@ void EEVEE_volumes_output_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata,
|
||||
|
||||
/* Create Pass and shgroup. */
|
||||
DRW_PASS_CREATE(psl->volumetric_accum_ps, DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_ADD_FULL);
|
||||
DRWShadingGroup *grp = NULL;
|
||||
DRWShadingGroup *grp = nullptr;
|
||||
if ((effects->enabled_effects & EFFECT_VOLUMETRIC) != 0) {
|
||||
grp = DRW_shgroup_create(EEVEE_shaders_volumes_resolve_sh_get(true), psl->volumetric_accum_ps);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "inScattering", &txl->volume_scatter);
|
||||
@@ -632,16 +650,16 @@ void EEVEE_volumes_output_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata,
|
||||
* value. */
|
||||
grp = DRW_shgroup_create(EEVEE_shaders_volumes_accum_sh_get(), psl->volumetric_accum_ps);
|
||||
}
|
||||
DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), NULL);
|
||||
DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), nullptr);
|
||||
}
|
||||
|
||||
void EEVEE_volumes_output_accumulate(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
|
||||
void EEVEE_volumes_output_accumulate(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data *vedata)
|
||||
{
|
||||
EEVEE_FramebufferList *fbl = vedata->fbl;
|
||||
EEVEE_PassList *psl = vedata->psl;
|
||||
EEVEE_EffectsInfo *effects = vedata->stl->effects;
|
||||
|
||||
if (fbl->volumetric_accum_fb != NULL) {
|
||||
if (fbl->volumetric_accum_fb != nullptr) {
|
||||
/* Accumulation pass. */
|
||||
GPU_framebuffer_bind(fbl->volumetric_accum_fb);
|
||||
|
||||
@@ -26,8 +26,6 @@
|
||||
|
||||
namespace blender::eevee {
|
||||
|
||||
ENUM_OPERATORS(eViewLayerEEVEEPassType, 1 << EEVEE_RENDER_PASS_MAX_BIT)
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
/** \name Arbitrary Output Variables
|
||||
* \{ */
|
||||
|
||||
@@ -37,10 +37,10 @@
|
||||
|
||||
#define EXTERNAL_ENGINE "BLENDER_EXTERNAL"
|
||||
|
||||
extern char datatoc_basic_depth_frag_glsl[];
|
||||
extern char datatoc_basic_depth_vert_glsl[];
|
||||
extern "C" char datatoc_basic_depth_frag_glsl[];
|
||||
extern "C" char datatoc_basic_depth_vert_glsl[];
|
||||
|
||||
extern char datatoc_common_view_lib_glsl[];
|
||||
extern "C" char datatoc_common_view_lib_glsl[];
|
||||
|
||||
/* *********** LISTS *********** */
|
||||
|
||||
|
||||
@@ -37,20 +37,20 @@ void GPENCIL_antialiasing_init(GPENCIL_Data *vedata)
|
||||
DRW_shgroup_uniform_bool_copy(grp, "onlyAlpha", pd->draw_wireframe);
|
||||
DRW_shgroup_uniform_vec4_copy(grp, "viewportMetrics", metrics);
|
||||
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT;
|
||||
|
||||
if (txl->smaa_search_tx == NULL) {
|
||||
if (txl->smaa_search_tx == nullptr) {
|
||||
|
||||
txl->smaa_search_tx = GPU_texture_create_2d(
|
||||
"smaa_search", SEARCHTEX_WIDTH, SEARCHTEX_HEIGHT, 1, GPU_R8, usage, NULL);
|
||||
"smaa_search", SEARCHTEX_WIDTH, SEARCHTEX_HEIGHT, 1, GPU_R8, usage, nullptr);
|
||||
GPU_texture_update(txl->smaa_search_tx, GPU_DATA_UBYTE, searchTexBytes);
|
||||
|
||||
txl->smaa_area_tx = GPU_texture_create_2d(
|
||||
"smaa_area", AREATEX_WIDTH, AREATEX_HEIGHT, 1, GPU_RG8, usage, NULL);
|
||||
"smaa_area", AREATEX_WIDTH, AREATEX_HEIGHT, 1, GPU_RG8, usage, nullptr);
|
||||
GPU_texture_update(txl->smaa_area_tx, GPU_DATA_UBYTE, areaTexBytes);
|
||||
|
||||
GPU_texture_filter_mode(txl->smaa_search_tx, true);
|
||||
@@ -89,7 +89,7 @@ void GPENCIL_antialiasing_init(GPENCIL_Data *vedata)
|
||||
grp, "lumaWeight", pd->scene->grease_pencil_settings.smaa_threshold);
|
||||
|
||||
DRW_shgroup_clear_framebuffer(grp, GPU_COLOR_BIT, 0, 0, 0, 0, 0.0f, 0x0);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
{
|
||||
/* Stage 2: Blend Weight/Coord. */
|
||||
@@ -103,7 +103,7 @@ void GPENCIL_antialiasing_init(GPENCIL_Data *vedata)
|
||||
DRW_shgroup_uniform_vec4_copy(grp, "viewportMetrics", metrics);
|
||||
|
||||
DRW_shgroup_clear_framebuffer(grp, GPU_COLOR_BIT, 0, 0, 0, 0, 0.0f, 0x0);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
{
|
||||
/* Stage 3: Resolve. */
|
||||
@@ -118,7 +118,7 @@ void GPENCIL_antialiasing_init(GPENCIL_Data *vedata)
|
||||
DRW_shgroup_uniform_bool_copy(grp, "onlyAlpha", pd->draw_wireframe);
|
||||
DRW_shgroup_uniform_vec4_copy(grp, "viewportMetrics", metrics);
|
||||
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,10 +36,10 @@
|
||||
GPENCIL_tObject *gpencil_object_cache_add(GPENCIL_PrivateData *pd, Object *ob)
|
||||
{
|
||||
bGPdata *gpd = (bGPdata *)ob->data;
|
||||
GPENCIL_tObject *tgp_ob = BLI_memblock_alloc(pd->gp_object_pool);
|
||||
GPENCIL_tObject *tgp_ob = static_cast<GPENCIL_tObject *>(BLI_memblock_alloc(pd->gp_object_pool));
|
||||
|
||||
tgp_ob->layers.first = tgp_ob->layers.last = NULL;
|
||||
tgp_ob->vfx.first = tgp_ob->vfx.last = NULL;
|
||||
tgp_ob->layers.first = tgp_ob->layers.last = nullptr;
|
||||
tgp_ob->vfx.first = tgp_ob->vfx.last = nullptr;
|
||||
tgp_ob->camera_z = dot_v3v3(pd->camera_z_axis, ob->object_to_world[3]);
|
||||
tgp_ob->is_drawmode3d = (gpd->draw_mode == GP_DRAWMODE_3D) || pd->draw_depth_only;
|
||||
tgp_ob->object_scale = mat4_to_scale(ob->object_to_world);
|
||||
@@ -49,7 +49,7 @@ GPENCIL_tObject *gpencil_object_cache_add(GPENCIL_PrivateData *pd, Object *ob)
|
||||
const int tot_materials = BKE_object_material_count_eval(ob);
|
||||
for (int i = 0; i < tot_materials; i++) {
|
||||
MaterialGPencilStyle *gp_style = BKE_gpencil_material_settings(ob, i + 1);
|
||||
if (((gp_style != NULL) && (gp_style->flag & GP_MATERIAL_IS_STROKE_HOLDOUT)) ||
|
||||
if (((gp_style != nullptr) && (gp_style->flag & GP_MATERIAL_IS_STROKE_HOLDOUT)) ||
|
||||
(gp_style->flag & GP_MATERIAL_IS_FILL_HOLDOUT))
|
||||
{
|
||||
tgp_ob->do_mat_holdout = true;
|
||||
@@ -74,7 +74,7 @@ GPENCIL_tObject *gpencil_object_cache_add(GPENCIL_PrivateData *pd, Object *ob)
|
||||
rescale_m4(mat, size);
|
||||
/* BBox space to World. */
|
||||
mul_m4_m4m4(mat, ob->object_to_world, mat);
|
||||
if (DRW_view_is_persp_get(NULL)) {
|
||||
if (DRW_view_is_persp_get(nullptr)) {
|
||||
/* BBox center to camera vector. */
|
||||
sub_v3_v3v3(tgp_ob->plane_normal, pd->camera_pos, mat[3]);
|
||||
}
|
||||
@@ -101,7 +101,7 @@ GPENCIL_tObject *gpencil_object_cache_add(GPENCIL_PrivateData *pd, Object *ob)
|
||||
mul_mat3_m4_v3(ob->object_to_world, size);
|
||||
float radius = len_v3(size);
|
||||
mul_m4_v3(ob->object_to_world, center);
|
||||
rescale_m4(tgp_ob->plane_mat, (float[3]){radius, radius, radius});
|
||||
rescale_m4(tgp_ob->plane_mat, blender::float3{radius, radius, radius});
|
||||
copy_v3_v3(tgp_ob->plane_mat[3], center);
|
||||
|
||||
/* Add to corresponding list if is in front. */
|
||||
@@ -158,8 +158,8 @@ void gpencil_object_cache_sort(GPENCIL_PrivateData *pd)
|
||||
}
|
||||
|
||||
/* Join both lists, adding in front. */
|
||||
if (pd->tobjects_infront.first != NULL) {
|
||||
if (pd->tobjects.last != NULL) {
|
||||
if (pd->tobjects_infront.first != nullptr) {
|
||||
if (pd->tobjects.last != nullptr) {
|
||||
pd->tobjects.last->next = pd->tobjects_infront.first;
|
||||
pd->tobjects.last = pd->tobjects_infront.last;
|
||||
}
|
||||
@@ -205,7 +205,7 @@ static void gpencil_layer_final_tint_and_alpha_get(const GPENCIL_PrivateData *pd
|
||||
float r_tint[4],
|
||||
float *r_alpha)
|
||||
{
|
||||
const bool use_onion = (gpf != NULL) && (gpf->runtime.onion_id != 0.0f);
|
||||
const bool use_onion = (gpf != nullptr) && (gpf->runtime.onion_id != 0.0f);
|
||||
if (use_onion) {
|
||||
const bool use_onion_custom_col = (gpd->onion_flag & GP_ONION_GHOST_PREVCOL) != 0;
|
||||
const bool use_onion_fade = (gpd->onion_flag & GP_ONION_FADE) != 0;
|
||||
@@ -282,20 +282,21 @@ GPENCIL_tLayer *gpencil_layer_cache_add(GPENCIL_PrivateData *pd,
|
||||
gpencil_layer_final_tint_and_alpha_get(pd, gpd, gpl, gpf, layer_tint, &layer_alpha);
|
||||
|
||||
/* Create the new layer descriptor. */
|
||||
GPENCIL_tLayer *tgp_layer = BLI_memblock_alloc(pd->gp_layer_pool);
|
||||
GPENCIL_tLayer *tgp_layer = static_cast<GPENCIL_tLayer *>(BLI_memblock_alloc(pd->gp_layer_pool));
|
||||
BLI_LINKS_APPEND(&tgp_ob->layers, tgp_layer);
|
||||
tgp_layer->layer_id = BLI_findindex(&gpd->layers, gpl);
|
||||
tgp_layer->mask_bits = NULL;
|
||||
tgp_layer->mask_invert_bits = NULL;
|
||||
tgp_layer->blend_ps = NULL;
|
||||
tgp_layer->mask_bits = nullptr;
|
||||
tgp_layer->mask_invert_bits = nullptr;
|
||||
tgp_layer->blend_ps = nullptr;
|
||||
|
||||
/* Masking: Go through mask list and extract valid masks in a bitmap. */
|
||||
if (is_masked) {
|
||||
bool valid_mask = false;
|
||||
/* WARNING: only #GP_MAX_MASKBITS amount of bits.
|
||||
* TODO(fclem): Find a better system without any limitation. */
|
||||
tgp_layer->mask_bits = BLI_memblock_alloc(pd->gp_maskbit_pool);
|
||||
tgp_layer->mask_invert_bits = BLI_memblock_alloc(pd->gp_maskbit_pool);
|
||||
tgp_layer->mask_bits = static_cast<BLI_bitmap *>(BLI_memblock_alloc(pd->gp_maskbit_pool));
|
||||
tgp_layer->mask_invert_bits = static_cast<BLI_bitmap *>(
|
||||
BLI_memblock_alloc(pd->gp_maskbit_pool));
|
||||
BLI_bitmap_set_all(tgp_layer->mask_bits, false, GP_MAX_MASKBITS);
|
||||
|
||||
LISTBASE_FOREACH (bGPDlayer_Mask *, mask, &gpl->mask_layers) {
|
||||
@@ -317,7 +318,7 @@ GPENCIL_tLayer *gpencil_layer_cache_add(GPENCIL_PrivateData *pd,
|
||||
pd->use_mask_fb = true;
|
||||
}
|
||||
else {
|
||||
tgp_layer->mask_bits = NULL;
|
||||
tgp_layer->mask_bits = nullptr;
|
||||
}
|
||||
is_masked = valid_mask;
|
||||
}
|
||||
@@ -357,7 +358,7 @@ GPENCIL_tLayer *gpencil_layer_cache_add(GPENCIL_PrivateData *pd,
|
||||
DRW_shgroup_uniform_texture_ref(grp, "revealBuf", &pd->reveal_layer_tx);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "maskBuf", (is_masked) ? &pd->mask_tx : &pd->dummy_tx);
|
||||
DRW_shgroup_stencil_mask(grp, 0xFF);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
|
||||
if (gpl->blend_mode == eGplBlendMode_HardLight) {
|
||||
/* We cannot do custom blending on Multi-Target frame-buffers.
|
||||
@@ -366,7 +367,7 @@ GPENCIL_tLayer *gpencil_layer_cache_add(GPENCIL_PrivateData *pd,
|
||||
DRW_shgroup_state_disable(grp, DRW_STATE_BLEND_MUL);
|
||||
DRW_shgroup_state_enable(grp, DRW_STATE_BLEND_ADD_FULL);
|
||||
DRW_shgroup_uniform_int_copy(grp, "blendMode", 999);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
|
||||
pd->use_layer_fb = true;
|
||||
@@ -417,14 +418,14 @@ GPENCIL_tLayer *gpencil_layer_cache_get(GPENCIL_tObject *tgp_ob, int number)
|
||||
{
|
||||
if (number >= 0) {
|
||||
GPENCIL_tLayer *layer = tgp_ob->layers.first;
|
||||
while (layer != NULL) {
|
||||
while (layer != nullptr) {
|
||||
if (layer->layer_id == number) {
|
||||
return layer;
|
||||
}
|
||||
layer = layer->next;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/** \} */
|
||||
@@ -28,10 +28,11 @@
|
||||
|
||||
static GPENCIL_MaterialPool *gpencil_material_pool_add(GPENCIL_PrivateData *pd)
|
||||
{
|
||||
GPENCIL_MaterialPool *matpool = BLI_memblock_alloc(pd->gp_material_pool);
|
||||
matpool->next = NULL;
|
||||
GPENCIL_MaterialPool *matpool = static_cast<GPENCIL_MaterialPool *>(
|
||||
BLI_memblock_alloc(pd->gp_material_pool));
|
||||
matpool->next = nullptr;
|
||||
matpool->used_count = 0;
|
||||
if (matpool->ubo == NULL) {
|
||||
if (matpool->ubo == nullptr) {
|
||||
matpool->ubo = GPU_uniformbuf_create(sizeof(matpool->mat_data));
|
||||
}
|
||||
pd->last_material_pool = matpool;
|
||||
@@ -41,13 +42,13 @@ static GPENCIL_MaterialPool *gpencil_material_pool_add(GPENCIL_PrivateData *pd)
|
||||
static GPUTexture *gpencil_image_texture_get(Image *image, bool *r_alpha_premult)
|
||||
{
|
||||
ImBuf *ibuf;
|
||||
ImageUser iuser = {NULL};
|
||||
GPUTexture *gpu_tex = NULL;
|
||||
ImageUser iuser = {nullptr};
|
||||
GPUTexture *gpu_tex = nullptr;
|
||||
void *lock;
|
||||
|
||||
ibuf = BKE_image_acquire_ibuf(image, &iuser, &lock);
|
||||
|
||||
if (ibuf != NULL && ibuf->byte_buffer.data != NULL) {
|
||||
if (ibuf != nullptr && ibuf->byte_buffer.data != nullptr) {
|
||||
gpu_tex = BKE_image_get_gpu_texture(image, &iuser, ibuf);
|
||||
*r_alpha_premult = (image->alpha_mode == IMA_ALPHA_PREMUL);
|
||||
}
|
||||
@@ -68,7 +69,7 @@ static void gpencil_uv_transform_get(const float ofs[2],
|
||||
/* Offset to center. */
|
||||
translate_m4(mat, 0.5f, 0.5f, 0.0f);
|
||||
/* Reversed order. */
|
||||
rescale_m4(mat, (float[3]){1.0f / scale[0], 1.0f / scale[1], 0.0});
|
||||
rescale_m4(mat, blender::float3{1.0f / scale[0], 1.0f / scale[1], 0.0});
|
||||
rotate_m4(mat, 'Z', -rotation);
|
||||
translate_m4(mat, ofs[0], ofs[1], 0.0f);
|
||||
/* Convert to 3x2 */
|
||||
@@ -113,7 +114,7 @@ static MaterialGPencilStyle *gpencil_viewport_material_overrides(
|
||||
gp_style->fill_style = GP_MATERIAL_FILL_STYLE_SOLID;
|
||||
break;
|
||||
case V3D_SHADING_TEXTURE_COLOR:
|
||||
memcpy(&gp_style_tmp, gp_style, sizeof(*gp_style));
|
||||
gp_style_tmp = blender::dna::shallow_copy(*gp_style);
|
||||
gp_style = &gp_style_tmp;
|
||||
if ((gp_style->stroke_style == GP_MATERIAL_STROKE_STYLE_TEXTURE) && (gp_style->sima)) {
|
||||
copy_v4_fl(gp_style->stroke_rgba, 1.0f);
|
||||
@@ -185,8 +186,8 @@ GPENCIL_MaterialPool *gpencil_material_pool_create(GPENCIL_PrivateData *pd, Obje
|
||||
int color_type = (pd->v3d_color_type != -1 && GPENCIL_VERTEX_MODE(gpd)) ?
|
||||
V3D_SHADING_VERTEX_COLOR :
|
||||
pd->v3d_color_type;
|
||||
const eV3DShadingLightingMode lighting_mode = (pd->v3d != NULL) ? pd->v3d->shading.light :
|
||||
V3D_LIGHTING_STUDIO;
|
||||
const eV3DShadingLightingMode lighting_mode = eV3DShadingLightingMode(
|
||||
(pd->v3d != nullptr) ? pd->v3d->shading.light : V3D_LIGHTING_STUDIO);
|
||||
|
||||
GPENCIL_MaterialPool *pool = matpool;
|
||||
for (int i = 0; i < mat_len; i++) {
|
||||
@@ -258,7 +259,7 @@ GPENCIL_MaterialPool *gpencil_material_pool_create(GPENCIL_PrivateData *pd, Obje
|
||||
mat_data->stroke_u_scale = 500.0f / gp_style->texture_pixsize;
|
||||
}
|
||||
else /* if (gp_style->stroke_style == GP_MATERIAL_STROKE_STYLE_SOLID) */ {
|
||||
pool->tex_stroke[mat_id] = NULL;
|
||||
pool->tex_stroke[mat_id] = nullptr;
|
||||
mat_data->flag &= ~GP_STROKE_TEXTURE_USE;
|
||||
copy_v4_v4(mat_data->stroke_color, gp_style->stroke_rgba);
|
||||
mat_data->stroke_texture_mix = 0.0f;
|
||||
@@ -275,20 +276,20 @@ GPENCIL_MaterialPool *gpencil_material_pool_create(GPENCIL_PrivateData *pd, Obje
|
||||
gpencil_uv_transform_get(gp_style->texture_offset,
|
||||
gp_style->texture_scale,
|
||||
gp_style->texture_angle,
|
||||
(float(*)[2])mat_data->fill_uv_rot_scale,
|
||||
reinterpret_cast<float(*)[2]>(&mat_data->fill_uv_rot_scale),
|
||||
mat_data->fill_uv_offset);
|
||||
copy_v4_v4(mat_data->fill_color, gp_style->fill_rgba);
|
||||
mat_data->fill_texture_mix = 1.0f - gp_style->mix_factor;
|
||||
}
|
||||
else if (gp_style->fill_style == GP_MATERIAL_FILL_STYLE_GRADIENT) {
|
||||
bool use_radial = (gp_style->gradient_type == GP_MATERIAL_GRADIENT_RADIAL);
|
||||
pool->tex_fill[mat_id] = NULL;
|
||||
pool->tex_fill[mat_id] = nullptr;
|
||||
mat_data->flag |= GP_FILL_GRADIENT_USE;
|
||||
mat_data->flag |= use_radial ? GP_FILL_GRADIENT_RADIAL : 0;
|
||||
gpencil_uv_transform_get(gp_style->texture_offset,
|
||||
gp_style->texture_scale,
|
||||
gp_style->texture_angle,
|
||||
(float(*)[2])mat_data->fill_uv_rot_scale,
|
||||
reinterpret_cast<float(*)[2]>(&mat_data->fill_uv_rot_scale),
|
||||
mat_data->fill_uv_offset);
|
||||
copy_v4_v4(mat_data->fill_color, gp_style->fill_rgba);
|
||||
copy_v4_v4(mat_data->fill_mix_color, gp_style->mix_rgba);
|
||||
@@ -298,7 +299,7 @@ GPENCIL_MaterialPool *gpencil_material_pool_create(GPENCIL_PrivateData *pd, Obje
|
||||
}
|
||||
}
|
||||
else /* if (gp_style->fill_style == GP_MATERIAL_FILL_STYLE_SOLID) */ {
|
||||
pool->tex_fill[mat_id] = NULL;
|
||||
pool->tex_fill[mat_id] = nullptr;
|
||||
copy_v4_v4(mat_data->fill_color, gp_style->fill_rgba);
|
||||
mat_data->fill_texture_mix = 0.0f;
|
||||
}
|
||||
@@ -336,11 +337,12 @@ void gpencil_material_resources_get(GPENCIL_MaterialPool *first_pool,
|
||||
|
||||
GPENCIL_LightPool *gpencil_light_pool_add(GPENCIL_PrivateData *pd)
|
||||
{
|
||||
GPENCIL_LightPool *lightpool = BLI_memblock_alloc(pd->gp_light_pool);
|
||||
GPENCIL_LightPool *lightpool = static_cast<GPENCIL_LightPool *>(
|
||||
BLI_memblock_alloc(pd->gp_light_pool));
|
||||
lightpool->light_used = 0;
|
||||
/* Tag light list end. */
|
||||
lightpool->light_data[0].color[0] = -1.0;
|
||||
if (lightpool->ubo == NULL) {
|
||||
if (lightpool->ubo == nullptr) {
|
||||
lightpool->ubo = GPU_uniformbuf_create(sizeof(lightpool->light_data));
|
||||
}
|
||||
pd->last_light_pool = lightpool;
|
||||
@@ -385,7 +387,7 @@ void gpencil_light_pool_populate(GPENCIL_LightPool *lightpool, Object *ob)
|
||||
}
|
||||
|
||||
gpLight *gp_light = &lightpool->light_data[lightpool->light_used];
|
||||
float(*mat)[4] = (float(*)[4])gp_light->right;
|
||||
float(*mat)[4] = reinterpret_cast<float(*)[4]>(&gp_light->right);
|
||||
|
||||
if (la->type == LA_SPOT) {
|
||||
copy_m4_m4(mat, ob->world_to_object);
|
||||
@@ -420,11 +422,11 @@ void gpencil_light_pool_populate(GPENCIL_LightPool *lightpool, Object *ob)
|
||||
}
|
||||
}
|
||||
|
||||
GPENCIL_LightPool *gpencil_light_pool_create(GPENCIL_PrivateData *pd, Object *UNUSED(ob))
|
||||
GPENCIL_LightPool *gpencil_light_pool_create(GPENCIL_PrivateData *pd, Object * /*ob*/)
|
||||
{
|
||||
GPENCIL_LightPool *lightpool = pd->last_light_pool;
|
||||
|
||||
if (lightpool == NULL) {
|
||||
if (lightpool == nullptr) {
|
||||
lightpool = gpencil_light_pool_add(pd);
|
||||
}
|
||||
/* TODO(fclem): Light linking. */
|
||||
@@ -457,10 +459,10 @@ static void gpencil_view_layer_data_free(void *storage)
|
||||
|
||||
BLI_memblock_destroy(vldata->gp_light_pool, gpencil_light_pool_free);
|
||||
BLI_memblock_destroy(vldata->gp_material_pool, gpencil_material_pool_free);
|
||||
BLI_memblock_destroy(vldata->gp_maskbit_pool, NULL);
|
||||
BLI_memblock_destroy(vldata->gp_object_pool, NULL);
|
||||
BLI_memblock_destroy(vldata->gp_layer_pool, NULL);
|
||||
BLI_memblock_destroy(vldata->gp_vfx_pool, NULL);
|
||||
BLI_memblock_destroy(vldata->gp_maskbit_pool, nullptr);
|
||||
BLI_memblock_destroy(vldata->gp_object_pool, nullptr);
|
||||
BLI_memblock_destroy(vldata->gp_layer_pool, nullptr);
|
||||
BLI_memblock_destroy(vldata->gp_vfx_pool, nullptr);
|
||||
}
|
||||
|
||||
GPENCIL_ViewLayerData *GPENCIL_view_layer_data_ensure(void)
|
||||
@@ -471,8 +473,9 @@ GPENCIL_ViewLayerData *GPENCIL_view_layer_data_ensure(void)
|
||||
/* NOTE(@fclem): Putting this stuff in view-layer means it is shared by all viewports.
|
||||
* For now it is ok, but in the future, it could become a problem if we implement
|
||||
* the caching system. */
|
||||
if (*vldata == NULL) {
|
||||
*vldata = MEM_callocN(sizeof(**vldata), "GPENCIL_ViewLayerData");
|
||||
if (*vldata == nullptr) {
|
||||
*vldata = static_cast<GPENCIL_ViewLayerData *>(
|
||||
MEM_callocN(sizeof(**vldata), "GPENCIL_ViewLayerData"));
|
||||
|
||||
(*vldata)->gp_light_pool = BLI_memblock_create(sizeof(GPENCIL_LightPool));
|
||||
(*vldata)->gp_material_pool = BLI_memblock_create(sizeof(GPENCIL_MaterialPool));
|
||||
@@ -54,10 +54,11 @@ void GPENCIL_engine_init(void *ved)
|
||||
const View3D *v3d = ctx->v3d;
|
||||
|
||||
if (!stl->pd) {
|
||||
stl->pd = MEM_callocN(sizeof(GPENCIL_PrivateData), "GPENCIL_PrivateData");
|
||||
stl->pd = static_cast<GPENCIL_PrivateData *>(
|
||||
MEM_callocN(sizeof(GPENCIL_PrivateData), "GPENCIL_PrivateData"));
|
||||
}
|
||||
|
||||
if (txl->dummy_texture == NULL) {
|
||||
if (txl->dummy_texture == nullptr) {
|
||||
const float pixels[1][4] = {{1.0f, 0.0f, 1.0f, 1.0f}};
|
||||
txl->dummy_texture = DRW_texture_create_2d(1, 1, GPU_RGBA8, DRW_TEX_WRAP, (float *)pixels);
|
||||
}
|
||||
@@ -67,10 +68,10 @@ void GPENCIL_engine_init(void *ved)
|
||||
/* Resize and reset memblocks. */
|
||||
BLI_memblock_clear(vldata->gp_light_pool, gpencil_light_pool_free);
|
||||
BLI_memblock_clear(vldata->gp_material_pool, gpencil_material_pool_free);
|
||||
BLI_memblock_clear(vldata->gp_object_pool, NULL);
|
||||
BLI_memblock_clear(vldata->gp_layer_pool, NULL);
|
||||
BLI_memblock_clear(vldata->gp_vfx_pool, NULL);
|
||||
BLI_memblock_clear(vldata->gp_maskbit_pool, NULL);
|
||||
BLI_memblock_clear(vldata->gp_object_pool, nullptr);
|
||||
BLI_memblock_clear(vldata->gp_layer_pool, nullptr);
|
||||
BLI_memblock_clear(vldata->gp_vfx_pool, nullptr);
|
||||
BLI_memblock_clear(vldata->gp_maskbit_pool, nullptr);
|
||||
|
||||
stl->pd->gp_light_pool = vldata->gp_light_pool;
|
||||
stl->pd->gp_material_pool = vldata->gp_material_pool;
|
||||
@@ -81,26 +82,26 @@ void GPENCIL_engine_init(void *ved)
|
||||
stl->pd->view_layer = ctx->view_layer;
|
||||
stl->pd->scene = ctx->scene;
|
||||
stl->pd->v3d = ctx->v3d;
|
||||
stl->pd->last_light_pool = NULL;
|
||||
stl->pd->last_material_pool = NULL;
|
||||
stl->pd->tobjects.first = NULL;
|
||||
stl->pd->tobjects.last = NULL;
|
||||
stl->pd->tobjects_infront.first = NULL;
|
||||
stl->pd->tobjects_infront.last = NULL;
|
||||
stl->pd->sbuffer_tobjects.first = NULL;
|
||||
stl->pd->sbuffer_tobjects.last = NULL;
|
||||
stl->pd->last_light_pool = nullptr;
|
||||
stl->pd->last_material_pool = nullptr;
|
||||
stl->pd->tobjects.first = nullptr;
|
||||
stl->pd->tobjects.last = nullptr;
|
||||
stl->pd->tobjects_infront.first = nullptr;
|
||||
stl->pd->tobjects_infront.last = nullptr;
|
||||
stl->pd->sbuffer_tobjects.first = nullptr;
|
||||
stl->pd->sbuffer_tobjects.last = nullptr;
|
||||
stl->pd->dummy_tx = txl->dummy_texture;
|
||||
stl->pd->draw_depth_only = !DRW_state_is_fbo();
|
||||
stl->pd->draw_wireframe = (v3d && v3d->shading.type == OB_WIRE) && !stl->pd->draw_depth_only;
|
||||
stl->pd->scene_depth_tx = stl->pd->draw_depth_only ? txl->dummy_texture : dtxl->depth;
|
||||
stl->pd->scene_fb = dfbl->default_fb;
|
||||
stl->pd->is_render = txl->render_depth_tx || (v3d && v3d->shading.type == OB_RENDER);
|
||||
stl->pd->is_viewport = (v3d != NULL);
|
||||
stl->pd->is_viewport = (v3d != nullptr);
|
||||
stl->pd->global_light_pool = gpencil_light_pool_add(stl->pd);
|
||||
stl->pd->shadeless_light_pool = gpencil_light_pool_add(stl->pd);
|
||||
/* Small HACK: we don't want the global pool to be reused,
|
||||
* so we set the last light pool to NULL. */
|
||||
stl->pd->last_light_pool = NULL;
|
||||
* so we set the last light pool to nullptr. */
|
||||
stl->pd->last_light_pool = nullptr;
|
||||
|
||||
bool use_scene_lights = false;
|
||||
bool use_scene_world = false;
|
||||
@@ -139,15 +140,15 @@ void GPENCIL_engine_init(void *ved)
|
||||
stl->pd->use_lighting = (v3d && v3d->shading.type > OB_SOLID) || stl->pd->is_render;
|
||||
stl->pd->use_lights = use_scene_lights;
|
||||
|
||||
if (txl->render_depth_tx != NULL) {
|
||||
if (txl->render_depth_tx != nullptr) {
|
||||
stl->pd->scene_depth_tx = txl->render_depth_tx;
|
||||
stl->pd->scene_fb = fbl->render_fb;
|
||||
}
|
||||
|
||||
gpencil_light_ambient_add(stl->pd->shadeless_light_pool, (float[3]){1.0f, 1.0f, 1.0f});
|
||||
gpencil_light_ambient_add(stl->pd->shadeless_light_pool, blender::float3{1.0f, 1.0f, 1.0f});
|
||||
|
||||
World *world = ctx->scene->world;
|
||||
if (world != NULL && use_scene_world) {
|
||||
if (world != nullptr && use_scene_world) {
|
||||
gpencil_light_ambient_add(stl->pd->global_light_pool, &world->horr);
|
||||
}
|
||||
else if (v3d) {
|
||||
@@ -157,16 +158,16 @@ void GPENCIL_engine_init(void *ved)
|
||||
}
|
||||
|
||||
float viewmatinv[4][4];
|
||||
DRW_view_viewmat_get(NULL, viewmatinv, true);
|
||||
DRW_view_viewmat_get(nullptr, viewmatinv, true);
|
||||
copy_v3_v3(stl->pd->camera_z_axis, viewmatinv[2]);
|
||||
copy_v3_v3(stl->pd->camera_pos, viewmatinv[3]);
|
||||
stl->pd->camera_z_offset = dot_v3v3(viewmatinv[3], viewmatinv[2]);
|
||||
|
||||
if (ctx && ctx->rv3d && v3d) {
|
||||
stl->pd->camera = (ctx->rv3d->persp == RV3D_CAMOB) ? v3d->camera : NULL;
|
||||
stl->pd->camera = (ctx->rv3d->persp == RV3D_CAMOB) ? v3d->camera : nullptr;
|
||||
}
|
||||
else {
|
||||
stl->pd->camera = NULL;
|
||||
stl->pd->camera = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -191,9 +192,9 @@ void GPENCIL_cache_init(void *ved)
|
||||
if (draw_ctx->v3d) {
|
||||
const bool hide_overlay = ((draw_ctx->v3d->flag2 & V3D_HIDE_OVERLAYS) != 0);
|
||||
const bool show_onion = ((draw_ctx->v3d->gp_flag & V3D_GP_SHOW_ONION_SKIN) != 0);
|
||||
const bool playing = (draw_ctx->evil_C != NULL) ?
|
||||
const bool playing = (draw_ctx->evil_C != nullptr) ?
|
||||
ED_screen_animation_playing(CTX_wm_manager(draw_ctx->evil_C)) !=
|
||||
NULL :
|
||||
nullptr :
|
||||
false;
|
||||
pd->do_onion = show_onion && !hide_overlay && !playing;
|
||||
pd->playing = playing;
|
||||
@@ -229,11 +230,11 @@ void GPENCIL_cache_init(void *ved)
|
||||
}
|
||||
|
||||
{
|
||||
pd->sbuffer_stroke = NULL;
|
||||
pd->sbuffer_gpd = NULL;
|
||||
pd->sbuffer_layer = NULL;
|
||||
pd->stroke_batch = NULL;
|
||||
pd->fill_batch = NULL;
|
||||
pd->sbuffer_stroke = nullptr;
|
||||
pd->sbuffer_gpd = nullptr;
|
||||
pd->sbuffer_layer = nullptr;
|
||||
pd->stroke_batch = nullptr;
|
||||
pd->fill_batch = nullptr;
|
||||
pd->do_fast_drawing = false;
|
||||
|
||||
pd->obact = draw_ctx->obact;
|
||||
@@ -250,11 +251,14 @@ void GPENCIL_cache_init(void *ved)
|
||||
}
|
||||
|
||||
if (pd->do_fast_drawing) {
|
||||
pd->snapshot_buffer_dirty = (txl->snapshot_color_tx == NULL);
|
||||
pd->snapshot_buffer_dirty = (txl->snapshot_color_tx == nullptr);
|
||||
const float *size = DRW_viewport_size_get();
|
||||
DRW_texture_ensure_2d(&txl->snapshot_depth_tx, size[0], size[1], GPU_DEPTH24_STENCIL8, 0);
|
||||
DRW_texture_ensure_2d(&txl->snapshot_color_tx, size[0], size[1], GPU_R11F_G11F_B10F, 0);
|
||||
DRW_texture_ensure_2d(&txl->snapshot_reveal_tx, size[0], size[1], GPU_R11F_G11F_B10F, 0);
|
||||
DRW_texture_ensure_2d(
|
||||
&txl->snapshot_depth_tx, size[0], size[1], GPU_DEPTH24_STENCIL8, DRWTextureFlag(0));
|
||||
DRW_texture_ensure_2d(
|
||||
&txl->snapshot_color_tx, size[0], size[1], GPU_R11F_G11F_B10F, DRWTextureFlag(0));
|
||||
DRW_texture_ensure_2d(
|
||||
&txl->snapshot_reveal_tx, size[0], size[1], GPU_R11F_G11F_B10F, DRWTextureFlag(0));
|
||||
|
||||
GPU_framebuffer_ensure_config(&fbl->snapshot_fb,
|
||||
{
|
||||
@@ -280,7 +284,7 @@ void GPENCIL_cache_init(void *ved)
|
||||
DRW_shgroup_uniform_texture_ref(grp, "depthBuf", &pd->depth_tx);
|
||||
DRW_shgroup_uniform_bool(grp, "strokeOrder3d", &pd->is_stroke_order_3d, 1);
|
||||
DRW_shgroup_uniform_vec4(grp, "gpModelMatrix", pd->object_bound_mat[0], 4);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
{
|
||||
DRWState state = DRW_STATE_WRITE_COLOR | DRW_STATE_LOGIC_INVERT;
|
||||
@@ -288,10 +292,11 @@ void GPENCIL_cache_init(void *ved)
|
||||
|
||||
GPUShader *sh = GPENCIL_shader_mask_invert_get();
|
||||
grp = DRW_shgroup_create(sh, psl->mask_invert_ps);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
|
||||
Camera *cam = (pd->camera != NULL && pd->camera->type == OB_CAMERA) ? pd->camera->data : NULL;
|
||||
Camera *cam = static_cast<Camera *>(
|
||||
(pd->camera != nullptr && pd->camera->type == OB_CAMERA) ? pd->camera->data : nullptr);
|
||||
|
||||
/* Pseudo DOF setup. */
|
||||
if (cam && (cam->dof.flag & CAM_DOF_ENABLED)) {
|
||||
@@ -307,7 +312,7 @@ void GPENCIL_cache_init(void *ved)
|
||||
float focal_len_scaled = scale_camera * focal_len;
|
||||
float sensor_scaled = scale_camera * sensor;
|
||||
|
||||
if (draw_ctx->rv3d != NULL) {
|
||||
if (draw_ctx->rv3d != nullptr) {
|
||||
sensor_scaled *= draw_ctx->rv3d->viewcamtexcofac[0];
|
||||
}
|
||||
|
||||
@@ -317,13 +322,13 @@ void GPENCIL_cache_init(void *ved)
|
||||
}
|
||||
else {
|
||||
/* Disable DoF blur scaling. */
|
||||
pd->camera = NULL;
|
||||
pd->camera = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
#define DRAW_NOW 2
|
||||
|
||||
typedef struct gpIterPopulateData {
|
||||
struct gpIterPopulateData {
|
||||
Object *ob;
|
||||
GPENCIL_tObject *tgp_ob;
|
||||
GPENCIL_PrivateData *pd;
|
||||
@@ -345,19 +350,19 @@ typedef struct gpIterPopulateData {
|
||||
/* Infos for call batching. */
|
||||
GPUBatch *geom;
|
||||
int vfirst, vcount;
|
||||
} gpIterPopulateData;
|
||||
};
|
||||
|
||||
#define DISABLE_BATCHING 0
|
||||
|
||||
static void gpencil_drawcall_flush(gpIterPopulateData *iter)
|
||||
{
|
||||
#if !DISABLE_BATCHING
|
||||
if (iter->geom != NULL) {
|
||||
if (iter->geom != nullptr) {
|
||||
DRW_shgroup_call_range(iter->grp, iter->ob, iter->geom, iter->vfirst, iter->vcount);
|
||||
}
|
||||
#endif
|
||||
|
||||
iter->geom = NULL;
|
||||
iter->geom = nullptr;
|
||||
iter->vfirst = -1;
|
||||
iter->vcount = 0;
|
||||
}
|
||||
@@ -408,7 +413,7 @@ static void gpencil_sbuffer_cache_populate(gpIterPopulateData *iter)
|
||||
DRW_shgroup_uniform_texture(iter->grp, "gpSceneDepthTexture", iter->pd->dummy_tx);
|
||||
}
|
||||
|
||||
gpencil_stroke_cache_populate(NULL, NULL, iter->pd->sbuffer_stroke, iter);
|
||||
gpencil_stroke_cache_populate(nullptr, nullptr, iter->pd->sbuffer_stroke, iter);
|
||||
gpencil_drawcall_flush(iter);
|
||||
|
||||
iter->stroke_index_offset = iter->pd->sbuffer_stroke->totpoints + 1;
|
||||
@@ -417,7 +422,7 @@ static void gpencil_sbuffer_cache_populate(gpIterPopulateData *iter)
|
||||
|
||||
static void gpencil_layer_cache_populate(bGPDlayer *gpl,
|
||||
bGPDframe *gpf,
|
||||
bGPDstroke *UNUSED(gps),
|
||||
bGPDstroke * /*gps*/,
|
||||
void *thunk)
|
||||
{
|
||||
gpIterPopulateData *iter = (gpIterPopulateData *)thunk;
|
||||
@@ -432,7 +437,7 @@ static void gpencil_layer_cache_populate(bGPDlayer *gpl,
|
||||
else {
|
||||
iter->do_sbuffer_call = !pd->do_fast_drawing && (gpd == pd->sbuffer_gpd) &&
|
||||
(gpl == pd->sbuffer_layer) &&
|
||||
(gpf == NULL || gpf->runtime.onion_id == 0.0f);
|
||||
(gpf == nullptr || gpf->runtime.onion_id == 0.0f);
|
||||
}
|
||||
|
||||
GPENCIL_tLayer *tgp_layer = gpencil_layer_cache_add(pd, iter->ob, gpl, gpf, iter->tgp_ob);
|
||||
@@ -442,7 +447,7 @@ static void gpencil_layer_cache_populate(bGPDlayer *gpl,
|
||||
|
||||
iter->ubo_lights = (use_lights) ? pd->global_light_pool->ubo : pd->shadeless_light_pool->ubo;
|
||||
|
||||
gpencil_material_resources_get(iter->matpool, 0, NULL, NULL, &iter->ubo_mat);
|
||||
gpencil_material_resources_get(iter->matpool, 0, nullptr, nullptr, &iter->ubo_mat);
|
||||
|
||||
/* Iterator dependent uniforms. */
|
||||
DRWShadingGroup *grp = iter->grp = tgp_layer->base_shgrp;
|
||||
@@ -462,7 +467,7 @@ static void gpencil_stroke_cache_populate(bGPDlayer *gpl,
|
||||
{
|
||||
gpIterPopulateData *iter = (gpIterPopulateData *)thunk;
|
||||
|
||||
bGPdata *gpd = iter->ob->data;
|
||||
bGPdata *gpd = static_cast<bGPdata *>(iter->ob->data);
|
||||
MaterialGPencilStyle *gp_style = BKE_gpencil_material_settings(iter->ob, gps->mat_nr + 1);
|
||||
|
||||
const bool is_render = iter->pd->is_render;
|
||||
@@ -555,14 +560,14 @@ static void gpencil_sbuffer_cache_populate_fast(GPENCIL_Data *vedata, gpIterPopu
|
||||
iter->tgp_ob = gpencil_object_cache_add(iter->pd, iter->ob);
|
||||
/* Remove from the main list. */
|
||||
iter->pd->tobjects.last = last_tgp_ob;
|
||||
last_tgp_ob->next = NULL;
|
||||
last_tgp_ob->next = nullptr;
|
||||
/* Add to sbuffer tgpobject list. */
|
||||
BLI_LINKS_APPEND(&iter->pd->sbuffer_tobjects, iter->tgp_ob);
|
||||
/* Remove depth test with scene (avoid self occlusion). */
|
||||
iter->pd->scene_depth_tx = txl->dummy_texture;
|
||||
|
||||
gpencil_layer_cache_populate(
|
||||
iter->pd->sbuffer_layer, iter->pd->sbuffer_layer->actframe, NULL, iter);
|
||||
iter->pd->sbuffer_layer, iter->pd->sbuffer_layer->actframe, nullptr, iter);
|
||||
|
||||
const DRWContextState *ctx = DRW_context_state_get();
|
||||
ToolSettings *ts = ctx->scene->toolsettings;
|
||||
@@ -572,7 +577,7 @@ static void gpencil_sbuffer_cache_populate_fast(GPENCIL_Data *vedata, gpIterPopu
|
||||
}
|
||||
|
||||
iter->do_sbuffer_call = DRAW_NOW;
|
||||
gpencil_stroke_cache_populate(NULL, NULL, iter->pd->sbuffer_stroke, iter);
|
||||
gpencil_stroke_cache_populate(nullptr, nullptr, iter->pd->sbuffer_stroke, iter);
|
||||
gpencil_drawcall_flush(iter);
|
||||
|
||||
gpencil_vfx_cache_populate(vedata, iter->ob, iter->tgp_ob);
|
||||
@@ -625,7 +630,7 @@ void GPENCIL_cache_populate(void *ved, Object *ob)
|
||||
}
|
||||
}
|
||||
|
||||
BKE_gpencil_visible_stroke_advanced_iter(is_final_render ? pd->view_layer : NULL,
|
||||
BKE_gpencil_visible_stroke_advanced_iter(is_final_render ? pd->view_layer : nullptr,
|
||||
ob,
|
||||
gpencil_layer_cache_populate,
|
||||
gpencil_stroke_cache_populate,
|
||||
@@ -763,7 +768,7 @@ static void GPENCIL_draw_scene_depth_only(void *ved)
|
||||
GPU_framebuffer_bind(dfbl->default_fb);
|
||||
}
|
||||
|
||||
pd->gp_object_pool = pd->gp_layer_pool = pd->gp_vfx_pool = pd->gp_maskbit_pool = NULL;
|
||||
pd->gp_object_pool = pd->gp_layer_pool = pd->gp_vfx_pool = pd->gp_maskbit_pool = nullptr;
|
||||
|
||||
/* Free temp stroke buffers. */
|
||||
if (pd->sbuffer_gpd) {
|
||||
@@ -805,7 +810,7 @@ static void gpencil_draw_mask(GPENCIL_Data *vedata, GPENCIL_tObject *ob, GPENCIL
|
||||
|
||||
GPENCIL_tLayer *mask_layer = gpencil_layer_cache_get(ob, i);
|
||||
/* When filtering by view-layer, the mask could be null and must be ignored. */
|
||||
if (mask_layer == NULL) {
|
||||
if (mask_layer == nullptr) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -887,7 +892,7 @@ static void GPENCIL_fast_draw_start(GPENCIL_Data *vedata)
|
||||
GPU_framebuffer_blit(fbl->snapshot_fb, 0, fbl->gpencil_fb, 0, GPU_COLOR_BIT);
|
||||
GPU_framebuffer_blit(fbl->snapshot_fb, 1, fbl->gpencil_fb, 1, GPU_COLOR_BIT);
|
||||
/* Bypass drawing. */
|
||||
pd->tobjects.first = pd->tobjects.last = NULL;
|
||||
pd->tobjects.first = pd->tobjects.last = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -918,7 +923,7 @@ void GPENCIL_draw_scene(void *ved)
|
||||
float clear_cols[2][4] = {{0.0f, 0.0f, 0.0f, 0.0f}, {1.0f, 1.0f, 1.0f, 1.0f}};
|
||||
|
||||
/* Fade 3D objects. */
|
||||
if ((!pd->is_render) && (pd->fade_3d_object_opacity > -1.0f) && (pd->obact != NULL) &&
|
||||
if ((!pd->is_render) && (pd->fade_3d_object_opacity > -1.0f) && (pd->obact != nullptr) &&
|
||||
(pd->obact->type == OB_GPENCIL_LEGACY))
|
||||
{
|
||||
float background_color[3];
|
||||
@@ -934,7 +939,7 @@ void GPENCIL_draw_scene(void *ved)
|
||||
return;
|
||||
}
|
||||
|
||||
if (pd->tobjects.first == NULL) {
|
||||
if (pd->tobjects.first == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -959,7 +964,7 @@ void GPENCIL_draw_scene(void *ved)
|
||||
GPENCIL_antialiasing_draw(vedata);
|
||||
}
|
||||
|
||||
pd->gp_object_pool = pd->gp_layer_pool = pd->gp_vfx_pool = pd->gp_maskbit_pool = NULL;
|
||||
pd->gp_object_pool = pd->gp_layer_pool = pd->gp_vfx_pool = pd->gp_maskbit_pool = nullptr;
|
||||
|
||||
/* Free temp stroke buffers. */
|
||||
if (pd->sbuffer_gpd) {
|
||||
@@ -975,19 +980,19 @@ static void GPENCIL_engine_free(void)
|
||||
static const DrawEngineDataSize GPENCIL_data_size = DRW_VIEWPORT_DATA_SIZE(GPENCIL_Data);
|
||||
|
||||
DrawEngineType draw_engine_gpencil_type = {
|
||||
/*next*/ NULL,
|
||||
/*prev*/ NULL,
|
||||
/*next*/ nullptr,
|
||||
/*prev*/ nullptr,
|
||||
/*idname*/ N_("GpencilMode"),
|
||||
/*vedata_size*/ &GPENCIL_data_size,
|
||||
/*engine_init*/ &GPENCIL_engine_init,
|
||||
/*engine_free*/ &GPENCIL_engine_free,
|
||||
/*instance_free*/ /*instance_free*/ NULL,
|
||||
/*instance_free*/ /*instance_free*/ nullptr,
|
||||
/*cache_init*/ &GPENCIL_cache_init,
|
||||
/*cache_populate*/ &GPENCIL_cache_populate,
|
||||
/*cache_finish*/ &GPENCIL_cache_finish,
|
||||
/*draw_scene*/ &GPENCIL_draw_scene,
|
||||
/*view_update*/ NULL,
|
||||
/*id_update*/ NULL,
|
||||
/*view_update*/ nullptr,
|
||||
/*id_update*/ nullptr,
|
||||
/*render_to_image*/ &GPENCIL_render_to_image,
|
||||
/*store_metadata*/ NULL,
|
||||
/*store_metadata*/ nullptr,
|
||||
};
|
||||
@@ -43,7 +43,7 @@ void GPENCIL_render_init(GPENCIL_Data *vedata,
|
||||
|
||||
invert_m4_m4(viewmat, viewinv);
|
||||
|
||||
DRWView *view = DRW_view_create(viewmat, winmat, NULL, NULL, NULL);
|
||||
DRWView *view = DRW_view_create(viewmat, winmat, nullptr, nullptr, nullptr);
|
||||
DRW_view_default_set(view);
|
||||
DRW_view_set_active(view);
|
||||
|
||||
@@ -52,8 +52,8 @@ void GPENCIL_render_init(GPENCIL_Data *vedata,
|
||||
RenderPass *rpass_z_src = RE_pass_find_by_name(render_layer, RE_PASSNAME_Z, viewname);
|
||||
RenderPass *rpass_col_src = RE_pass_find_by_name(render_layer, RE_PASSNAME_COMBINED, viewname);
|
||||
|
||||
float *pix_z = (rpass_z_src) ? rpass_z_src->ibuf->float_buffer.data : NULL;
|
||||
float *pix_col = (rpass_col_src) ? rpass_col_src->ibuf->float_buffer.data : NULL;
|
||||
float *pix_z = (rpass_z_src) ? rpass_z_src->ibuf->float_buffer.data : nullptr;
|
||||
float *pix_col = (rpass_col_src) ? rpass_col_src->ibuf->float_buffer.data : nullptr;
|
||||
|
||||
if (!pix_z || !pix_col) {
|
||||
RE_engine_set_error_message(engine,
|
||||
@@ -62,7 +62,7 @@ void GPENCIL_render_init(GPENCIL_Data *vedata,
|
||||
|
||||
if (pix_z) {
|
||||
/* Depth need to be remapped to [0..1] range. */
|
||||
pix_z = MEM_dupallocN(pix_z);
|
||||
pix_z = static_cast<float *>(MEM_dupallocN(pix_z));
|
||||
|
||||
int pix_num = rpass_z_src->rectx * rpass_z_src->recty;
|
||||
|
||||
@@ -96,14 +96,14 @@ void GPENCIL_render_init(GPENCIL_Data *vedata,
|
||||
}
|
||||
else {
|
||||
txl->render_depth_tx = DRW_texture_create_2d(
|
||||
size[0], size[1], GPU_DEPTH_COMPONENT24, 0, do_region ? NULL : pix_z);
|
||||
size[0], size[1], GPU_DEPTH_COMPONENT24, DRWTextureFlag(0), do_region ? nullptr : pix_z);
|
||||
}
|
||||
if (txl->render_color_tx && !do_clear_col) {
|
||||
GPU_texture_update(txl->render_color_tx, GPU_DATA_FLOAT, pix_col);
|
||||
}
|
||||
else {
|
||||
txl->render_color_tx = DRW_texture_create_2d(
|
||||
size[0], size[1], GPU_RGBA16F, 0, do_region ? NULL : pix_col);
|
||||
size[0], size[1], GPU_RGBA16F, DRWTextureFlag(0), do_region ? nullptr : pix_col);
|
||||
}
|
||||
|
||||
GPU_framebuffer_ensure_config(&fbl->render_fb,
|
||||
@@ -143,8 +143,8 @@ void GPENCIL_render_init(GPENCIL_Data *vedata,
|
||||
/* render all objects and select only grease pencil */
|
||||
static void GPENCIL_render_cache(void *vedata,
|
||||
Object *ob,
|
||||
RenderEngine *UNUSED(engine),
|
||||
Depsgraph *UNUSED(depsgraph))
|
||||
RenderEngine * /*engine*/,
|
||||
Depsgraph * /*depsgraph*/)
|
||||
{
|
||||
if (ob && ELEM(ob->type, OB_GPENCIL_LEGACY, OB_LAMP)) {
|
||||
if (DRW_object_visibility_in_active_context(ob) & OB_VISIBLE_SELF) {
|
||||
@@ -164,7 +164,7 @@ static void GPENCIL_render_result_z(RenderLayer *rl,
|
||||
return;
|
||||
}
|
||||
RenderPass *rp = RE_pass_find_by_name(rl, RE_PASSNAME_Z, viewname);
|
||||
if (rp == NULL) {
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -179,12 +179,12 @@ static void GPENCIL_render_result_z(RenderLayer *rl,
|
||||
ro_buffer_data);
|
||||
|
||||
float winmat[4][4];
|
||||
DRW_view_winmat_get(NULL, winmat, false);
|
||||
DRW_view_winmat_get(nullptr, winmat, false);
|
||||
|
||||
int pix_num = BLI_rcti_size_x(rect) * BLI_rcti_size_y(rect);
|
||||
|
||||
/* Convert GPU depth [0..1] to view Z [near..far] */
|
||||
if (DRW_view_is_persp_get(NULL)) {
|
||||
if (DRW_view_is_persp_get(nullptr)) {
|
||||
for (int i = 0; i < pix_num; i++) {
|
||||
if (ro_buffer_data[i] == 1.0f) {
|
||||
ro_buffer_data[i] = 1e10f; /* Background */
|
||||
@@ -197,8 +197,8 @@ static void GPENCIL_render_result_z(RenderLayer *rl,
|
||||
}
|
||||
else {
|
||||
/* Keep in mind, near and far distance are negatives. */
|
||||
float near = DRW_view_near_distance_get(NULL);
|
||||
float far = DRW_view_far_distance_get(NULL);
|
||||
float near = DRW_view_near_distance_get(nullptr);
|
||||
float far = DRW_view_far_distance_get(nullptr);
|
||||
float range = fabsf(far - near);
|
||||
|
||||
for (int i = 0; i < pix_num; i++) {
|
||||
@@ -9,20 +9,20 @@
|
||||
|
||||
#include "gpencil_engine.h"
|
||||
|
||||
extern char datatoc_gpencil_common_lib_glsl[];
|
||||
extern char datatoc_gpencil_frag_glsl[];
|
||||
extern char datatoc_gpencil_vert_glsl[];
|
||||
extern char datatoc_gpencil_antialiasing_frag_glsl[];
|
||||
extern char datatoc_gpencil_antialiasing_vert_glsl[];
|
||||
extern char datatoc_gpencil_layer_blend_frag_glsl[];
|
||||
extern char datatoc_gpencil_mask_invert_frag_glsl[];
|
||||
extern char datatoc_gpencil_depth_merge_frag_glsl[];
|
||||
extern char datatoc_gpencil_depth_merge_vert_glsl[];
|
||||
extern char datatoc_gpencil_vfx_frag_glsl[];
|
||||
extern "C" char datatoc_gpencil_common_lib_glsl[];
|
||||
extern "C" char datatoc_gpencil_frag_glsl[];
|
||||
extern "C" char datatoc_gpencil_vert_glsl[];
|
||||
extern "C" char datatoc_gpencil_antialiasing_frag_glsl[];
|
||||
extern "C" char datatoc_gpencil_antialiasing_vert_glsl[];
|
||||
extern "C" char datatoc_gpencil_layer_blend_frag_glsl[];
|
||||
extern "C" char datatoc_gpencil_mask_invert_frag_glsl[];
|
||||
extern "C" char datatoc_gpencil_depth_merge_frag_glsl[];
|
||||
extern "C" char datatoc_gpencil_depth_merge_vert_glsl[];
|
||||
extern "C" char datatoc_gpencil_vfx_frag_glsl[];
|
||||
|
||||
extern char datatoc_common_colormanagement_lib_glsl[];
|
||||
extern char datatoc_common_fullscreen_vert_glsl[];
|
||||
extern char datatoc_common_view_lib_glsl[];
|
||||
extern "C" char datatoc_common_colormanagement_lib_glsl[];
|
||||
extern "C" char datatoc_common_fullscreen_vert_glsl[];
|
||||
extern "C" char datatoc_common_view_lib_glsl[];
|
||||
|
||||
static struct {
|
||||
/* SMAA antialiasing */
|
||||
@@ -46,7 +46,7 @@ static struct {
|
||||
GPUShader *fx_rim_sh;
|
||||
GPUShader *fx_shadow_sh;
|
||||
GPUShader *fx_transform_sh;
|
||||
} g_shaders = {{NULL}};
|
||||
} g_shaders = {{nullptr}};
|
||||
|
||||
void GPENCIL_shader_free(void)
|
||||
{
|
||||
@@ -24,11 +24,11 @@
|
||||
/* verify if this fx is active */
|
||||
static bool effect_is_active(bGPdata *gpd, ShaderFxData *fx, bool is_viewport)
|
||||
{
|
||||
if (fx == NULL) {
|
||||
if (fx == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (gpd == NULL) {
|
||||
if (gpd == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ static bool effect_is_active(bGPdata *gpd, ShaderFxData *fx, bool is_viewport)
|
||||
return false;
|
||||
}
|
||||
|
||||
typedef struct gpIterVfxData {
|
||||
struct gpIterVfxData {
|
||||
GPENCIL_PrivateData *pd;
|
||||
GPENCIL_tObject *tgp_ob;
|
||||
GPUFrameBuffer **target_fb;
|
||||
@@ -55,7 +55,7 @@ typedef struct gpIterVfxData {
|
||||
GPUTexture **source_color_tx;
|
||||
GPUTexture **target_reveal_tx;
|
||||
GPUTexture **source_reveal_tx;
|
||||
} gpIterVfxData;
|
||||
};
|
||||
|
||||
static DRWShadingGroup *gpencil_vfx_pass_create(const char *name,
|
||||
DRWState state,
|
||||
@@ -67,7 +67,7 @@ static DRWShadingGroup *gpencil_vfx_pass_create(const char *name,
|
||||
DRW_shgroup_uniform_texture_ref(grp, "colorBuf", iter->source_color_tx);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "revealBuf", iter->source_reveal_tx);
|
||||
|
||||
GPENCIL_tVfx *tgp_vfx = BLI_memblock_alloc(iter->pd->gp_vfx_pool);
|
||||
GPENCIL_tVfx *tgp_vfx = static_cast<GPENCIL_tVfx *>(BLI_memblock_alloc(iter->pd->gp_vfx_pool));
|
||||
tgp_vfx->target_fb = iter->target_fb;
|
||||
tgp_vfx->vfx_ps = pass;
|
||||
|
||||
@@ -86,7 +86,7 @@ static void gpencil_vfx_blur(BlurShaderFxData *fx, Object *ob, gpIterVfxData *it
|
||||
return;
|
||||
}
|
||||
|
||||
if ((fx->flag & FX_BLUR_DOF_MODE) && iter->pd->camera == NULL) {
|
||||
if ((fx->flag & FX_BLUR_DOF_MODE) && iter->pd->camera == nullptr) {
|
||||
/* No blur outside camera view (or when DOF is disabled on the camera). */
|
||||
return;
|
||||
}
|
||||
@@ -97,7 +97,7 @@ static void gpencil_vfx_blur(BlurShaderFxData *fx, Object *ob, gpIterVfxData *it
|
||||
|
||||
float winmat[4][4], persmat[4][4];
|
||||
float blur_size[2] = {fx->radius[0], fx->radius[1]};
|
||||
DRW_view_persmat_get(NULL, persmat, false);
|
||||
DRW_view_persmat_get(nullptr, persmat, false);
|
||||
const float w = fabsf(mul_project_m4_v3_zfac(persmat, ob->object_to_world[3]));
|
||||
|
||||
if (fx->flag & FX_BLUR_DOF_MODE) {
|
||||
@@ -107,7 +107,7 @@ static void gpencil_vfx_blur(BlurShaderFxData *fx, Object *ob, gpIterVfxData *it
|
||||
}
|
||||
else {
|
||||
/* Modify by distance to camera and object scale. */
|
||||
DRW_view_winmat_get(NULL, winmat, false);
|
||||
DRW_view_winmat_get(nullptr, winmat, false);
|
||||
const float *vp_size = DRW_viewport_size_get();
|
||||
float world_pixel_scale = 1.0f / GPENCIL_PIXEL_FACTOR;
|
||||
float scale = mat4_to_scale(ob->object_to_world);
|
||||
@@ -120,19 +120,21 @@ static void gpencil_vfx_blur(BlurShaderFxData *fx, Object *ob, gpIterVfxData *it
|
||||
DRWState state = DRW_STATE_WRITE_COLOR;
|
||||
if (blur_size[0] > 0.0f) {
|
||||
grp = gpencil_vfx_pass_create("Fx Blur H", state, iter, sh);
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "offset", (float[2]){blur_size[0] * c, blur_size[0] * s});
|
||||
DRW_shgroup_uniform_vec2_copy(
|
||||
grp, "offset", blender::float2{blur_size[0] * c, blur_size[0] * s});
|
||||
DRW_shgroup_uniform_int_copy(grp, "sampCount", max_ii(1, min_ii(fx->samples, blur_size[0])));
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
if (blur_size[1] > 0.0f) {
|
||||
grp = gpencil_vfx_pass_create("Fx Blur V", state, iter, sh);
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "offset", (float[2]){-blur_size[1] * s, blur_size[1] * c});
|
||||
DRW_shgroup_uniform_vec2_copy(
|
||||
grp, "offset", blender::float2{-blur_size[1] * s, blur_size[1] * c});
|
||||
DRW_shgroup_uniform_int_copy(grp, "sampCount", max_ii(1, min_ii(fx->samples, blur_size[1])));
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
}
|
||||
|
||||
static void gpencil_vfx_colorize(ColorizeShaderFxData *fx, Object *UNUSED(ob), gpIterVfxData *iter)
|
||||
static void gpencil_vfx_colorize(ColorizeShaderFxData *fx, Object * /*ob*/, gpIterVfxData *iter)
|
||||
{
|
||||
DRWShadingGroup *grp;
|
||||
|
||||
@@ -144,10 +146,10 @@ static void gpencil_vfx_colorize(ColorizeShaderFxData *fx, Object *UNUSED(ob), g
|
||||
DRW_shgroup_uniform_vec3_copy(grp, "highColor", fx->high_color);
|
||||
DRW_shgroup_uniform_float_copy(grp, "factor", fx->factor);
|
||||
DRW_shgroup_uniform_int_copy(grp, "mode", fx->mode);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
|
||||
static void gpencil_vfx_flip(FlipShaderFxData *fx, Object *UNUSED(ob), gpIterVfxData *iter)
|
||||
static void gpencil_vfx_flip(FlipShaderFxData *fx, Object * /*ob*/, gpIterVfxData *iter)
|
||||
{
|
||||
DRWShadingGroup *grp;
|
||||
|
||||
@@ -160,9 +162,9 @@ static void gpencil_vfx_flip(FlipShaderFxData *fx, Object *UNUSED(ob), gpIterVfx
|
||||
DRWState state = DRW_STATE_WRITE_COLOR;
|
||||
grp = gpencil_vfx_pass_create("Fx Flip", state, iter, sh);
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "axisFlip", axis_flip);
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "waveOffset", (float[2]){0.0f, 0.0f});
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "waveOffset", blender::float2{0.0f, 0.0f});
|
||||
DRW_shgroup_uniform_float_copy(grp, "swirlRadius", 0.0f);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
|
||||
static void gpencil_vfx_rim(RimShaderFxData *fx, Object *ob, gpIterVfxData *iter)
|
||||
@@ -170,10 +172,10 @@ static void gpencil_vfx_rim(RimShaderFxData *fx, Object *ob, gpIterVfxData *iter
|
||||
DRWShadingGroup *grp;
|
||||
|
||||
float winmat[4][4], persmat[4][4];
|
||||
float offset[2] = {fx->offset[0], fx->offset[1]};
|
||||
float blur_size[2] = {fx->blur[0], fx->blur[1]};
|
||||
DRW_view_winmat_get(NULL, winmat, false);
|
||||
DRW_view_persmat_get(NULL, persmat, false);
|
||||
float offset[2] = {float(fx->offset[0]), float(fx->offset[1])};
|
||||
float blur_size[2] = {float(fx->blur[0]), float(fx->blur[1])};
|
||||
DRW_view_winmat_get(nullptr, winmat, false);
|
||||
DRW_view_persmat_get(nullptr, persmat, false);
|
||||
const float *vp_size = DRW_viewport_size_get();
|
||||
const float *vp_size_inv = DRW_viewport_invert_size_get();
|
||||
|
||||
@@ -191,12 +193,13 @@ static void gpencil_vfx_rim(RimShaderFxData *fx, Object *ob, gpIterVfxData *iter
|
||||
|
||||
DRWState state = DRW_STATE_WRITE_COLOR;
|
||||
grp = gpencil_vfx_pass_create("Fx Rim H", state, iter, sh);
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "blurDir", (float[2]){blur_size[0] * vp_size_inv[0], 0.0f});
|
||||
DRW_shgroup_uniform_vec2_copy(
|
||||
grp, "blurDir", blender::float2{blur_size[0] * vp_size_inv[0], 0.0f});
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "uvOffset", offset);
|
||||
DRW_shgroup_uniform_int_copy(grp, "sampCount", max_ii(1, min_ii(fx->samples, blur_size[0])));
|
||||
DRW_shgroup_uniform_vec3_copy(grp, "maskColor", fx->mask_rgb);
|
||||
DRW_shgroup_uniform_bool_copy(grp, "isFirstPass", true);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
|
||||
switch (fx->mode) {
|
||||
case eShaderFxRimMode_Normal:
|
||||
@@ -218,13 +221,14 @@ static void gpencil_vfx_rim(RimShaderFxData *fx, Object *ob, gpIterVfxData *iter
|
||||
zero_v2(offset);
|
||||
|
||||
grp = gpencil_vfx_pass_create("Fx Rim V", state, iter, sh);
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "blurDir", (float[2]){0.0f, blur_size[1] * vp_size_inv[1]});
|
||||
DRW_shgroup_uniform_vec2_copy(
|
||||
grp, "blurDir", blender::float2{0.0f, blur_size[1] * vp_size_inv[1]});
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "uvOffset", offset);
|
||||
DRW_shgroup_uniform_vec3_copy(grp, "rimColor", fx->rim_rgb);
|
||||
DRW_shgroup_uniform_int_copy(grp, "sampCount", max_ii(1, min_ii(fx->samples, blur_size[1])));
|
||||
DRW_shgroup_uniform_int_copy(grp, "blendMode", fx->mode);
|
||||
DRW_shgroup_uniform_bool_copy(grp, "isFirstPass", false);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
|
||||
if (fx->mode == eShaderFxRimMode_Overlay) {
|
||||
/* We cannot do custom blending on multi-target frame-buffers.
|
||||
@@ -233,7 +237,7 @@ static void gpencil_vfx_rim(RimShaderFxData *fx, Object *ob, gpIterVfxData *iter
|
||||
DRW_shgroup_state_disable(grp, DRW_STATE_BLEND_MUL);
|
||||
DRW_shgroup_state_enable(grp, DRW_STATE_BLEND_ADD_FULL);
|
||||
DRW_shgroup_uniform_int_copy(grp, "blendMode", 999);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -242,11 +246,11 @@ static void gpencil_vfx_pixelize(PixelShaderFxData *fx, Object *ob, gpIterVfxDat
|
||||
DRWShadingGroup *grp;
|
||||
|
||||
float persmat[4][4], winmat[4][4], ob_center[3], pixsize_uniform[2];
|
||||
DRW_view_winmat_get(NULL, winmat, false);
|
||||
DRW_view_persmat_get(NULL, persmat, false);
|
||||
DRW_view_winmat_get(nullptr, winmat, false);
|
||||
DRW_view_persmat_get(nullptr, persmat, false);
|
||||
const float *vp_size = DRW_viewport_size_get();
|
||||
const float *vp_size_inv = DRW_viewport_invert_size_get();
|
||||
float pixel_size[2] = {fx->size[0], fx->size[1]};
|
||||
float pixel_size[2] = {float(fx->size[0]), float(fx->size[1])};
|
||||
mul_v2_v2(pixel_size, vp_size_inv);
|
||||
|
||||
/* Fixed pixelisation center from object center. */
|
||||
@@ -278,20 +282,20 @@ static void gpencil_vfx_pixelize(PixelShaderFxData *fx, Object *ob, gpIterVfxDat
|
||||
grp = gpencil_vfx_pass_create("Fx Pixelize X", state, iter, sh);
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "targetPixelSize", pixsize_uniform);
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "targetPixelOffset", ob_center);
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "accumOffset", (float[2]){pixel_size[0], 0.0f});
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "accumOffset", blender::float2{pixel_size[0], 0.0f});
|
||||
int samp_count = (pixel_size[0] / vp_size_inv[0] > 3.0) ? 2 : 1;
|
||||
DRW_shgroup_uniform_int_copy(grp, "sampCount", use_antialiasing ? samp_count : 0);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
|
||||
if (pixel_size[1] > vp_size_inv[1]) {
|
||||
copy_v2_fl2(pixsize_uniform, vp_size_inv[0], pixel_size[1]);
|
||||
grp = gpencil_vfx_pass_create("Fx Pixelize Y", state, iter, sh);
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "targetPixelSize", pixsize_uniform);
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "accumOffset", (float[2]){0.0f, pixel_size[1]});
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "accumOffset", blender::float2{0.0f, pixel_size[1]});
|
||||
int samp_count = (pixel_size[1] / vp_size_inv[1] > 3.0) ? 2 : 1;
|
||||
DRW_shgroup_uniform_int_copy(grp, "sampCount", use_antialiasing ? samp_count : 0);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -304,10 +308,10 @@ static void gpencil_vfx_shadow(ShadowShaderFxData *fx, Object *ob, gpIterVfxData
|
||||
|
||||
float uv_mat[4][4], winmat[4][4], persmat[4][4], rot_center[3];
|
||||
float wave_ofs[3], wave_dir[3], wave_phase, blur_dir[2], tmp[2];
|
||||
float offset[2] = {fx->offset[0], fx->offset[1]};
|
||||
float blur_size[2] = {fx->blur[0], fx->blur[1]};
|
||||
DRW_view_winmat_get(NULL, winmat, false);
|
||||
DRW_view_persmat_get(NULL, persmat, false);
|
||||
float offset[2] = {float(fx->offset[0]), float(fx->offset[1])};
|
||||
float blur_size[2] = {float(fx->blur[0]), float(fx->blur[1])};
|
||||
DRW_view_winmat_get(nullptr, winmat, false);
|
||||
DRW_view_persmat_get(nullptr, persmat, false);
|
||||
const float *vp_size = DRW_viewport_size_get();
|
||||
const float *vp_size_inv = DRW_viewport_invert_size_get();
|
||||
const float ratio = vp_size_inv[1] / vp_size_inv[0];
|
||||
@@ -334,11 +338,11 @@ static void gpencil_vfx_shadow(ShadowShaderFxData *fx, Object *ob, gpIterVfxData
|
||||
/* UV transform matrix. (loc, rot, scale) Sent to shader as 2x3 matrix. */
|
||||
unit_m4(uv_mat);
|
||||
translate_m4(uv_mat, rot_center[0], rot_center[1], 0.0f);
|
||||
rescale_m4(uv_mat, (float[3]){1.0f / fx->scale[0], 1.0f / fx->scale[1], 1.0f});
|
||||
rescale_m4(uv_mat, blender::float3{1.0f / fx->scale[0], 1.0f / fx->scale[1], 1.0f});
|
||||
translate_m4(uv_mat, -offset[0], -offset[1], 0.0f);
|
||||
rescale_m4(uv_mat, (float[3]){1.0f / ratio, 1.0f, 1.0f});
|
||||
rescale_m4(uv_mat, blender::float3{1.0f / ratio, 1.0f, 1.0f});
|
||||
rotate_m4(uv_mat, 'Z', fx->rotation);
|
||||
rescale_m4(uv_mat, (float[3]){ratio, 1.0f, 1.0f});
|
||||
rescale_m4(uv_mat, blender::float3{ratio, 1.0f, 1.0f});
|
||||
translate_m4(uv_mat, -rot_center[0], -rot_center[1], 0.0f);
|
||||
|
||||
if (use_wave) {
|
||||
@@ -386,7 +390,7 @@ static void gpencil_vfx_shadow(ShadowShaderFxData *fx, Object *ob, gpIterVfxData
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "uvOffset", uv_mat[3]);
|
||||
DRW_shgroup_uniform_int_copy(grp, "sampCount", max_ii(1, min_ii(fx->samples, blur_size[0])));
|
||||
DRW_shgroup_uniform_bool_copy(grp, "isFirstPass", true);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
|
||||
unit_m4(uv_mat);
|
||||
zero_v2(wave_ofs);
|
||||
@@ -406,10 +410,10 @@ static void gpencil_vfx_shadow(ShadowShaderFxData *fx, Object *ob, gpIterVfxData
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "uvOffset", uv_mat[3]);
|
||||
DRW_shgroup_uniform_int_copy(grp, "sampCount", max_ii(1, min_ii(fx->samples, blur_size[1])));
|
||||
DRW_shgroup_uniform_bool_copy(grp, "isFirstPass", false);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
|
||||
static void gpencil_vfx_glow(GlowShaderFxData *fx, Object *UNUSED(ob), gpIterVfxData *iter)
|
||||
static void gpencil_vfx_glow(GlowShaderFxData *fx, Object * /*ob*/, gpIterVfxData *iter)
|
||||
{
|
||||
const bool use_glow_under = (fx->flag & FX_GLOW_USE_ALPHA) != 0;
|
||||
DRWShadingGroup *grp;
|
||||
@@ -435,13 +439,13 @@ static void gpencil_vfx_glow(GlowShaderFxData *fx, Object *UNUSED(ob), gpIterVfx
|
||||
|
||||
DRWState state = DRW_STATE_WRITE_COLOR;
|
||||
grp = gpencil_vfx_pass_create("Fx Glow H", state, iter, sh);
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "offset", (float[2]){fx->blur[0] * c, fx->blur[0] * s});
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "offset", blender::float2{fx->blur[0] * c, fx->blur[0] * s});
|
||||
DRW_shgroup_uniform_int_copy(grp, "sampCount", max_ii(1, min_ii(fx->samples, fx->blur[0])));
|
||||
DRW_shgroup_uniform_vec4_copy(grp, "threshold", ref_col);
|
||||
DRW_shgroup_uniform_vec4_copy(grp, "glowColor", fx->glow_color);
|
||||
DRW_shgroup_uniform_bool_copy(grp, "glowUnder", use_glow_under);
|
||||
DRW_shgroup_uniform_bool_copy(grp, "firstPass", true);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
|
||||
state = DRW_STATE_WRITE_COLOR;
|
||||
/* Blending: Force blending. */
|
||||
@@ -469,13 +473,14 @@ static void gpencil_vfx_glow(GlowShaderFxData *fx, Object *UNUSED(ob), gpIterVfx
|
||||
}
|
||||
|
||||
grp = gpencil_vfx_pass_create("Fx Glow V", state, iter, sh);
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "offset", (float[2]){-fx->blur[1] * s, fx->blur[1] * c});
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "offset", blender::float2{-fx->blur[1] * s, fx->blur[1] * c});
|
||||
DRW_shgroup_uniform_int_copy(grp, "sampCount", max_ii(1, min_ii(fx->samples, fx->blur[0])));
|
||||
DRW_shgroup_uniform_vec4_copy(grp, "threshold", (float[4]){-1.0f, -1.0f, -1.0f, -1.0});
|
||||
DRW_shgroup_uniform_vec4_copy(grp, "glowColor", (float[4]){1.0f, 1.0f, 1.0f, fx->glow_color[3]});
|
||||
DRW_shgroup_uniform_vec4_copy(grp, "threshold", blender::float4{-1.0f, -1.0f, -1.0f, -1.0});
|
||||
DRW_shgroup_uniform_vec4_copy(
|
||||
grp, "glowColor", blender::float4{1.0f, 1.0f, 1.0f, fx->glow_color[3]});
|
||||
DRW_shgroup_uniform_bool_copy(grp, "firstPass", false);
|
||||
DRW_shgroup_uniform_int_copy(grp, "blendMode", fx->blend_mode);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
|
||||
static void gpencil_vfx_wave(WaveShaderFxData *fx, Object *ob, gpIterVfxData *iter)
|
||||
@@ -484,8 +489,8 @@ static void gpencil_vfx_wave(WaveShaderFxData *fx, Object *ob, gpIterVfxData *it
|
||||
|
||||
float winmat[4][4], persmat[4][4], wave_center[3];
|
||||
float wave_ofs[3], wave_dir[3], wave_phase;
|
||||
DRW_view_winmat_get(NULL, winmat, false);
|
||||
DRW_view_persmat_get(NULL, persmat, false);
|
||||
DRW_view_winmat_get(nullptr, winmat, false);
|
||||
DRW_view_persmat_get(nullptr, persmat, false);
|
||||
const float *vp_size = DRW_viewport_size_get();
|
||||
const float *vp_size_inv = DRW_viewport_invert_size_get();
|
||||
|
||||
@@ -525,25 +530,25 @@ static void gpencil_vfx_wave(WaveShaderFxData *fx, Object *ob, gpIterVfxData *it
|
||||
|
||||
DRWState state = DRW_STATE_WRITE_COLOR;
|
||||
grp = gpencil_vfx_pass_create("Fx Wave", state, iter, sh);
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "axisFlip", (float[2]){1.0f, 1.0f});
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "axisFlip", blender::float2{1.0f, 1.0f});
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "waveDir", wave_dir);
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "waveOffset", wave_ofs);
|
||||
DRW_shgroup_uniform_float_copy(grp, "wavePhase", wave_phase);
|
||||
DRW_shgroup_uniform_float_copy(grp, "swirlRadius", 0.0f);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
|
||||
static void gpencil_vfx_swirl(SwirlShaderFxData *fx, Object *UNUSED(ob), gpIterVfxData *iter)
|
||||
static void gpencil_vfx_swirl(SwirlShaderFxData *fx, Object * /*ob*/, gpIterVfxData *iter)
|
||||
{
|
||||
DRWShadingGroup *grp;
|
||||
|
||||
if (fx->object == NULL) {
|
||||
if (fx->object == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
float winmat[4][4], persmat[4][4], swirl_center[3];
|
||||
DRW_view_winmat_get(NULL, winmat, false);
|
||||
DRW_view_persmat_get(NULL, persmat, false);
|
||||
DRW_view_winmat_get(nullptr, winmat, false);
|
||||
DRW_view_persmat_get(nullptr, persmat, false);
|
||||
const float *vp_size = DRW_viewport_size_get();
|
||||
|
||||
copy_v3_v3(swirl_center, fx->object->object_to_world[3]);
|
||||
@@ -570,12 +575,12 @@ static void gpencil_vfx_swirl(SwirlShaderFxData *fx, Object *UNUSED(ob), gpIterV
|
||||
|
||||
DRWState state = DRW_STATE_WRITE_COLOR;
|
||||
grp = gpencil_vfx_pass_create("Fx Flip", state, iter, sh);
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "axisFlip", (float[2]){1.0f, 1.0f});
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "waveOffset", (float[2]){0.0f, 0.0f});
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "axisFlip", blender::float2{1.0f, 1.0f});
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "waveOffset", blender::float2{0.0f, 0.0f});
|
||||
DRW_shgroup_uniform_vec2_copy(grp, "swirlCenter", swirl_center);
|
||||
DRW_shgroup_uniform_float_copy(grp, "swirlAngle", fx->angle);
|
||||
DRW_shgroup_uniform_float_copy(grp, "swirlRadius", radius);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
|
||||
void gpencil_vfx_cache_populate(GPENCIL_Data *vedata, Object *ob, GPENCIL_tObject *tgp_ob)
|
||||
@@ -585,16 +590,16 @@ void gpencil_vfx_cache_populate(GPENCIL_Data *vedata, Object *ob, GPENCIL_tObjec
|
||||
GPENCIL_PrivateData *pd = vedata->stl->pd;
|
||||
|
||||
/* These may not be allocated yet, use address of future pointer. */
|
||||
gpIterVfxData iter = {
|
||||
.pd = pd,
|
||||
.tgp_ob = tgp_ob,
|
||||
.target_fb = &fbl->layer_fb,
|
||||
.source_fb = &fbl->object_fb,
|
||||
.target_color_tx = &pd->color_layer_tx,
|
||||
.source_color_tx = &pd->color_object_tx,
|
||||
.target_reveal_tx = &pd->reveal_layer_tx,
|
||||
.source_reveal_tx = &pd->reveal_object_tx,
|
||||
};
|
||||
gpIterVfxData iter{};
|
||||
iter.pd = pd;
|
||||
iter.tgp_ob = tgp_ob;
|
||||
iter.target_fb = &fbl->layer_fb;
|
||||
iter.source_fb = &fbl->object_fb;
|
||||
iter.target_color_tx = &pd->color_layer_tx;
|
||||
iter.source_color_tx = &pd->color_object_tx;
|
||||
iter.target_reveal_tx = &pd->reveal_layer_tx;
|
||||
iter.source_reveal_tx = &pd->reveal_object_tx;
|
||||
|
||||
/* If simplify enabled, nothing more to do. */
|
||||
if (!pd->simplify_fx) {
|
||||
LISTBASE_FOREACH (ShaderFxData *, fx, &ob->shader_fx) {
|
||||
@@ -634,7 +639,7 @@ void gpencil_vfx_cache_populate(GPENCIL_Data *vedata, Object *ob, GPENCIL_tObjec
|
||||
}
|
||||
}
|
||||
|
||||
if ((!pd->simplify_fx && tgp_ob->vfx.first != NULL) || tgp_ob->do_mat_holdout) {
|
||||
if ((!pd->simplify_fx && tgp_ob->vfx.first != nullptr) || tgp_ob->do_mat_holdout) {
|
||||
/* We need an extra pass to combine result to main buffer. */
|
||||
iter.target_fb = &fbl->gpencil_fb;
|
||||
|
||||
@@ -643,7 +648,7 @@ void gpencil_vfx_cache_populate(GPENCIL_Data *vedata, Object *ob, GPENCIL_tObjec
|
||||
DRWState state = DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_MUL;
|
||||
DRWShadingGroup *grp = gpencil_vfx_pass_create("GPencil Object Compose", state, &iter, sh);
|
||||
DRW_shgroup_uniform_int_copy(grp, "isFirstPass", true);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
|
||||
/* We cannot do custom blending on multi-target frame-buffers.
|
||||
* Workaround by doing 2 passes. */
|
||||
@@ -651,7 +656,7 @@ void gpencil_vfx_cache_populate(GPENCIL_Data *vedata, Object *ob, GPENCIL_tObjec
|
||||
DRW_shgroup_state_disable(grp, DRW_STATE_BLEND_MUL);
|
||||
DRW_shgroup_state_enable(grp, DRW_STATE_BLEND_ADD_FULL);
|
||||
DRW_shgroup_uniform_int_copy(grp, "isFirstPass", false);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
|
||||
pd->use_object_fb = true;
|
||||
pd->use_layer_fb = true;
|
||||
@@ -25,21 +25,21 @@
|
||||
/** \name Structs and static variables
|
||||
* \{ */
|
||||
|
||||
typedef struct SELECTIDDEBUG_PassList {
|
||||
struct SELECTIDDEBUG_PassList {
|
||||
struct DRWPass *debug_pass;
|
||||
} SELECTIDDEBUG_PassList;
|
||||
};
|
||||
|
||||
typedef struct SELECTIDDEBUG_Data {
|
||||
struct SELECTIDDEBUG_Data {
|
||||
void *engine_type;
|
||||
DRWViewportEmptyList *fbl;
|
||||
DRWViewportEmptyList *txl;
|
||||
SELECTIDDEBUG_PassList *psl;
|
||||
DRWViewportEmptyList *stl;
|
||||
} SELECTIDDEBUG_Data;
|
||||
};
|
||||
|
||||
static struct {
|
||||
struct GPUShader *select_debug_sh;
|
||||
} e_data = {{NULL}}; /* Engine data */
|
||||
} e_data = {{nullptr}}; /* Engine data */
|
||||
|
||||
/** \} */
|
||||
|
||||
@@ -67,7 +67,7 @@ static void select_debug_engine_init(void *vedata)
|
||||
" fragColor.b = ((px >> 4) & 0x3u) / float(0x3u);"
|
||||
" }"
|
||||
"}\n",
|
||||
NULL);
|
||||
nullptr);
|
||||
}
|
||||
|
||||
psl->debug_pass = DRW_pass_create("Debug Pass", DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_ALPHA);
|
||||
@@ -75,7 +75,7 @@ static void select_debug_engine_init(void *vedata)
|
||||
if (texture_u32) {
|
||||
DRWShadingGroup *shgrp = DRW_shgroup_create(e_data.select_debug_sh, psl->debug_pass);
|
||||
DRW_shgroup_uniform_texture(shgrp, "image", texture_u32);
|
||||
DRW_shgroup_call_procedural_triangles(shgrp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(shgrp, nullptr, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,21 +100,21 @@ static const DrawEngineDataSize select_debug_data_size = DRW_VIEWPORT_DATA_SIZE(
|
||||
SELECTIDDEBUG_Data);
|
||||
|
||||
DrawEngineType draw_engine_debug_select_type = {
|
||||
/*next*/ NULL,
|
||||
/*prev*/ NULL,
|
||||
/*next*/ nullptr,
|
||||
/*prev*/ nullptr,
|
||||
/*idname*/ N_("Select ID Debug"),
|
||||
/*vedata_size*/ &select_debug_data_size,
|
||||
/*engine_init*/ &select_debug_engine_init,
|
||||
/*engine_free*/ &select_debug_engine_free,
|
||||
/*instance_free*/ /*instance_free*/ NULL,
|
||||
/*cache_init*/ NULL,
|
||||
/*cache_populate*/ NULL,
|
||||
/*cache_finish*/ NULL,
|
||||
/*instance_free*/ /*instance_free*/ nullptr,
|
||||
/*cache_init*/ nullptr,
|
||||
/*cache_populate*/ nullptr,
|
||||
/*cache_finish*/ nullptr,
|
||||
/*draw_scene*/ &select_debug_draw_scene,
|
||||
/*view_update*/ NULL,
|
||||
/*id_update*/ NULL,
|
||||
/*render_to_image*/ NULL,
|
||||
/*store_metadata*/ NULL,
|
||||
/*view_update*/ nullptr,
|
||||
/*id_update*/ nullptr,
|
||||
/*render_to_image*/ nullptr,
|
||||
/*store_metadata*/ nullptr,
|
||||
};
|
||||
|
||||
/** \} */
|
||||
@@ -54,7 +54,7 @@ short select_id_get_object_select_mode(Scene *scene, Object *ob)
|
||||
* Note this is not working correctly for vertex-paint (yet), but has been discussed
|
||||
* in #66645 and there is a solution by @mano-wii in P1032.
|
||||
* So OB_MODE_VERTEX_PAINT is already included here [required for P1032 I guess]. */
|
||||
Mesh *me_orig = DEG_get_original_object(ob)->data;
|
||||
Mesh *me_orig = static_cast<Mesh *>(DEG_get_original_object(ob)->data);
|
||||
if (me_orig->editflag & ME_EDIT_PAINT_VERT_SEL) {
|
||||
r_select_mode = SCE_SELECT_VERTEX;
|
||||
}
|
||||
@@ -91,7 +91,7 @@ static void draw_select_id_edit_mesh(SELECTID_StorageList *stl,
|
||||
uint *r_edge_offset,
|
||||
uint *r_face_offset)
|
||||
{
|
||||
Mesh *me = ob->data;
|
||||
Mesh *me = static_cast<Mesh *>(ob->data);
|
||||
BMEditMesh *em = me->edit_mesh;
|
||||
|
||||
BM_mesh_elem_table_ensure(em->bm, BM_VERT | BM_EDGE | BM_FACE);
|
||||
@@ -156,7 +156,7 @@ static void draw_select_id_mesh(SELECTID_StorageList *stl,
|
||||
uint *r_edge_offset,
|
||||
uint *r_face_offset)
|
||||
{
|
||||
Mesh *me = ob->data;
|
||||
Mesh *me = static_cast<Mesh *>(ob->data);
|
||||
|
||||
struct GPUBatch *geom_faces = DRW_mesh_batch_cache_get_triangles_with_select_id(me);
|
||||
DRWShadingGroup *face_shgrp;
|
||||
@@ -211,7 +211,7 @@ void select_id_draw_object(void *vedata,
|
||||
switch (ob->type) {
|
||||
case OB_MESH:
|
||||
if (ob->mode & OB_MODE_EDIT) {
|
||||
bool draw_facedot = check_ob_drawface_dot(select_mode, v3d, ob->dt);
|
||||
bool draw_facedot = check_ob_drawface_dot(select_mode, v3d, eDrawType(ob->dt));
|
||||
draw_select_id_edit_mesh(stl,
|
||||
ob,
|
||||
select_mode,
|
||||
@@ -34,7 +34,7 @@ static struct {
|
||||
SELECTID_Shaders sh_data[GPU_SHADER_CFG_LEN];
|
||||
SELECTID_Context context;
|
||||
uint runtime_new_objects;
|
||||
} e_data = {NULL}; /* Engine data */
|
||||
} e_data = {nullptr}; /* Engine data */
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
/** \name Utils
|
||||
@@ -47,28 +47,28 @@ static void select_engine_framebuffer_setup(void)
|
||||
size[0] = GPU_texture_width(dtxl->depth);
|
||||
size[1] = GPU_texture_height(dtxl->depth);
|
||||
|
||||
if (e_data.framebuffer_select_id == NULL) {
|
||||
if (e_data.framebuffer_select_id == nullptr) {
|
||||
e_data.framebuffer_select_id = GPU_framebuffer_create("framebuffer_select_id");
|
||||
}
|
||||
|
||||
if ((e_data.texture_u32 != NULL) && ((GPU_texture_width(e_data.texture_u32) != size[0]) ||
|
||||
(GPU_texture_height(e_data.texture_u32) != size[1])))
|
||||
if ((e_data.texture_u32 != nullptr) && ((GPU_texture_width(e_data.texture_u32) != size[0]) ||
|
||||
(GPU_texture_height(e_data.texture_u32) != size[1])))
|
||||
{
|
||||
GPU_texture_free(e_data.texture_u32);
|
||||
e_data.texture_u32 = NULL;
|
||||
e_data.texture_u32 = nullptr;
|
||||
}
|
||||
|
||||
/* Make sure the depth texture is attached.
|
||||
* It may disappear when loading another Blender session. */
|
||||
GPU_framebuffer_texture_attach(e_data.framebuffer_select_id, dtxl->depth, 0, 0);
|
||||
|
||||
if (e_data.texture_u32 == NULL) {
|
||||
if (e_data.texture_u32 == nullptr) {
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT;
|
||||
e_data.texture_u32 = GPU_texture_create_2d(
|
||||
"select_buf_ids", size[0], size[1], 1, GPU_R32UI, usage, NULL);
|
||||
"select_buf_ids", size[0], size[1], 1, GPU_R32UI, usage, nullptr);
|
||||
GPU_framebuffer_texture_attach(e_data.framebuffer_select_id, e_data.texture_u32, 0, 0);
|
||||
|
||||
GPU_framebuffer_check_valid(e_data.framebuffer_select_id, NULL);
|
||||
GPU_framebuffer_check_valid(e_data.framebuffer_select_id, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,7 +98,7 @@ static void select_engine_init(void *vedata)
|
||||
|
||||
if (!stl->g_data) {
|
||||
/* Alloc transient pointers */
|
||||
stl->g_data = MEM_mallocN(sizeof(*stl->g_data), __func__);
|
||||
stl->g_data = static_cast<SELECTID_PrivateData *>(MEM_mallocN(sizeof(*stl->g_data), __func__));
|
||||
}
|
||||
|
||||
{
|
||||
@@ -108,14 +108,15 @@ static void select_engine_init(void *vedata)
|
||||
DRW_view_viewmat_get(view_default, viewmat, false);
|
||||
DRW_view_winmat_get(view_default, winmat, false);
|
||||
projmat_from_subregion(winmat,
|
||||
(int[2]){draw_ctx->region->winx, draw_ctx->region->winy},
|
||||
blender::int2{draw_ctx->region->winx, draw_ctx->region->winy},
|
||||
e_data.context.last_rect.xmin,
|
||||
e_data.context.last_rect.xmax,
|
||||
e_data.context.last_rect.ymin,
|
||||
e_data.context.last_rect.ymax,
|
||||
winmat_subregion);
|
||||
|
||||
stl->g_data->view_subregion = DRW_view_create(viewmat, winmat_subregion, NULL, NULL, NULL);
|
||||
stl->g_data->view_subregion = DRW_view_create(
|
||||
viewmat, winmat_subregion, nullptr, nullptr, nullptr);
|
||||
|
||||
/* Create view with depth offset */
|
||||
stl->g_data->view_faces = (DRWView *)view_default;
|
||||
@@ -140,7 +141,8 @@ static void select_cache_init(void *vedata)
|
||||
}
|
||||
|
||||
DRWState state = DRW_STATE_DEFAULT;
|
||||
state |= RV3D_CLIPPING_ENABLED(draw_ctx->v3d, draw_ctx->rv3d) ? DRW_STATE_CLIP_PLANES : 0;
|
||||
state |= RV3D_CLIPPING_ENABLED(draw_ctx->v3d, draw_ctx->rv3d) ? DRW_STATE_CLIP_PLANES :
|
||||
DRWState(0);
|
||||
|
||||
bool retopology_occlusion = RETOPOLOGY_ENABLED(draw_ctx->v3d) && !XRAY_ENABLED(draw_ctx->v3d);
|
||||
float retopology_offset = RETOPOLOGY_OFFSET(draw_ctx->v3d);
|
||||
@@ -206,7 +208,7 @@ static void select_cache_init(void *vedata)
|
||||
e_data.context.index_drawn_len = 1;
|
||||
select_engine_framebuffer_setup();
|
||||
GPU_framebuffer_bind(e_data.framebuffer_select_id);
|
||||
GPU_framebuffer_clear_color_depth(e_data.framebuffer_select_id, (const float[4]){0.0f}, 1.0f);
|
||||
GPU_framebuffer_clear_color_depth(e_data.framebuffer_select_id, blender::float4{0.0f}, 1.0f);
|
||||
}
|
||||
e_data.runtime_new_objects = 0;
|
||||
}
|
||||
@@ -220,7 +222,7 @@ static void select_cache_populate(void *vedata, Object *ob)
|
||||
!XRAY_ENABLED(draw_ctx->v3d);
|
||||
if (retopology_occlusion && !DRW_object_is_in_edit_mode(ob)) {
|
||||
if (ob->dt >= OB_SOLID) {
|
||||
GPUBatch *geom_faces = DRW_mesh_batch_cache_get_surface(ob->data);
|
||||
GPUBatch *geom_faces = DRW_mesh_batch_cache_get_surface(static_cast<Mesh *>(ob->data));
|
||||
DRW_shgroup_call_obmat(stl->g_data->shgrp_occlude, geom_faces, ob->object_to_world);
|
||||
}
|
||||
return;
|
||||
@@ -232,7 +234,7 @@ static void select_cache_populate(void *vedata, Object *ob)
|
||||
if (!e_data.context.is_dirty && sel_data && sel_data->is_drawn) {
|
||||
/* The object indices have already been drawn. Fill depth pass.
|
||||
* Optimization: Most of the time this depth pass is not used. */
|
||||
struct Mesh *me = ob->data;
|
||||
struct Mesh *me = static_cast<Mesh *>(ob->data);
|
||||
if (e_data.context.select_mode & SCE_SELECT_FACE) {
|
||||
GPUBatch *geom_faces = DRW_mesh_batch_cache_get_triangles_with_select_id(me);
|
||||
DRW_shgroup_call_obmat(stl->g_data->shgrp_depth_only, geom_faces, ob->object_to_world);
|
||||
@@ -262,9 +264,9 @@ static void select_cache_populate(void *vedata, Object *ob)
|
||||
select_id_object_min_max(ob, min, max);
|
||||
|
||||
if (DRW_culling_min_max_test(stl->g_data->view_subregion, ob->object_to_world, min, max)) {
|
||||
if (sel_data == NULL) {
|
||||
if (sel_data == nullptr) {
|
||||
sel_data = (SELECTID_ObjectData *)DRW_drawdata_ensure(
|
||||
&ob->id, &draw_engine_select_type, sizeof(SELECTID_ObjectData), NULL, NULL);
|
||||
&ob->id, &draw_engine_select_type, sizeof(SELECTID_ObjectData), nullptr, nullptr);
|
||||
}
|
||||
sel_data->dd.recalc = 0;
|
||||
sel_data->drawn_index = e_data.context.objects_drawn_len;
|
||||
@@ -353,46 +355,46 @@ static void select_engine_free(void)
|
||||
static const DrawEngineDataSize select_data_size = DRW_VIEWPORT_DATA_SIZE(SELECTID_Data);
|
||||
|
||||
DrawEngineType draw_engine_select_type = {
|
||||
/*next*/ NULL,
|
||||
/*prev*/ NULL,
|
||||
/*next*/ nullptr,
|
||||
/*prev*/ nullptr,
|
||||
/*idname*/ N_("Select ID"),
|
||||
/*vedata_size*/ &select_data_size,
|
||||
/*engine_init*/ &select_engine_init,
|
||||
/*engine_free*/ &select_engine_free,
|
||||
/*instance_free*/ /*instance_free*/ NULL,
|
||||
/*instance_free*/ /*instance_free*/ nullptr,
|
||||
/*cache_init*/ &select_cache_init,
|
||||
/*cache_populate*/ &select_cache_populate,
|
||||
/*cache_finish*/ NULL,
|
||||
/*cache_finish*/ nullptr,
|
||||
/*draw_scene*/ &select_draw_scene,
|
||||
/*view_update*/ NULL,
|
||||
/*id_update*/ NULL,
|
||||
/*render_to_image*/ NULL,
|
||||
/*store_metadata*/ NULL,
|
||||
/*view_update*/ nullptr,
|
||||
/*id_update*/ nullptr,
|
||||
/*render_to_image*/ nullptr,
|
||||
/*store_metadata*/ nullptr,
|
||||
};
|
||||
|
||||
/* NOTE: currently unused, we may want to register so we can see this when debugging the view. */
|
||||
|
||||
RenderEngineType DRW_engine_viewport_select_type = {
|
||||
/*next*/ NULL,
|
||||
/*prev*/ NULL,
|
||||
/*next*/ nullptr,
|
||||
/*prev*/ nullptr,
|
||||
/*idname*/ SELECT_ENGINE,
|
||||
/*name*/ N_("Select ID"),
|
||||
/*flag*/ RE_INTERNAL | RE_USE_STEREO_VIEWPORT | RE_USE_GPU_CONTEXT,
|
||||
/*update*/ NULL,
|
||||
/*render*/ NULL,
|
||||
/*render_frame_finish*/ NULL,
|
||||
/*draw*/ NULL,
|
||||
/*bake*/ NULL,
|
||||
/*view_update*/ NULL,
|
||||
/*view_draw*/ NULL,
|
||||
/*update_script_node*/ NULL,
|
||||
/*update_render_passes*/ NULL,
|
||||
/*update*/ nullptr,
|
||||
/*render*/ nullptr,
|
||||
/*render_frame_finish*/ nullptr,
|
||||
/*draw*/ nullptr,
|
||||
/*bake*/ nullptr,
|
||||
/*view_update*/ nullptr,
|
||||
/*view_draw*/ nullptr,
|
||||
/*update_script_node*/ nullptr,
|
||||
/*update_render_passes*/ nullptr,
|
||||
/*draw_engine*/ &draw_engine_select_type,
|
||||
/*rna_ext*/
|
||||
{
|
||||
/*data*/ NULL,
|
||||
/*srna*/ NULL,
|
||||
/*call*/ NULL,
|
||||
/*data*/ nullptr,
|
||||
/*srna*/ nullptr,
|
||||
/*call*/ nullptr,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -12,6 +12,10 @@
|
||||
|
||||
#include "DRW_render.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* GPUViewport.storage
|
||||
* Is freed every time the viewport engine changes. */
|
||||
typedef struct SELECTID_StorageList {
|
||||
@@ -65,3 +69,7 @@ void select_id_draw_object(void *vedata,
|
||||
uint *r_vert_offset,
|
||||
uint *r_edge_offset,
|
||||
uint *r_face_offset);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -26,8 +26,8 @@
|
||||
|
||||
GPUUniformBuf *workbench_material_ubo_alloc(WORKBENCH_PrivateData *wpd)
|
||||
{
|
||||
GPUUniformBuf **ubo = BLI_memblock_alloc(wpd->material_ubo);
|
||||
if (*ubo == NULL) {
|
||||
GPUUniformBuf **ubo = static_cast<GPUUniformBuf **>(BLI_memblock_alloc(wpd->material_ubo));
|
||||
if (*ubo == nullptr) {
|
||||
*ubo = GPU_uniformbuf_create(sizeof(WORKBENCH_UBO_Material) * MAX_MATERIAL);
|
||||
}
|
||||
return *ubo;
|
||||
@@ -35,7 +35,7 @@ GPUUniformBuf *workbench_material_ubo_alloc(WORKBENCH_PrivateData *wpd)
|
||||
|
||||
static void workbench_ubo_free(void *elem)
|
||||
{
|
||||
GPUUniformBuf **ubo = elem;
|
||||
GPUUniformBuf **ubo = static_cast<GPUUniformBuf **>(elem);
|
||||
DRW_UBO_FREE_SAFE(*ubo);
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ static void workbench_view_layer_data_free(void *storage)
|
||||
DRW_UBO_FREE_SAFE(vldata->cavity_sample_ubo);
|
||||
DRW_TEXTURE_FREE_SAFE(vldata->cavity_jitter_tx);
|
||||
|
||||
BLI_memblock_destroy(vldata->material_ubo_data, NULL);
|
||||
BLI_memblock_destroy(vldata->material_ubo_data, nullptr);
|
||||
BLI_memblock_destroy(vldata->material_ubo, workbench_ubo_free);
|
||||
}
|
||||
|
||||
@@ -59,12 +59,14 @@ static WORKBENCH_ViewLayerData *workbench_view_layer_data_ensure_ex(ViewLayer *v
|
||||
(DrawEngineType *)&workbench_view_layer_data_ensure_ex,
|
||||
&workbench_view_layer_data_free);
|
||||
|
||||
if (*vldata == NULL) {
|
||||
*vldata = MEM_callocN(sizeof(**vldata), "WORKBENCH_ViewLayerData");
|
||||
if (*vldata == nullptr) {
|
||||
*vldata = static_cast<WORKBENCH_ViewLayerData *>(
|
||||
MEM_callocN(sizeof(**vldata), "WORKBENCH_ViewLayerData"));
|
||||
size_t matbuf_size = sizeof(WORKBENCH_UBO_Material) * MAX_MATERIAL;
|
||||
(*vldata)->material_ubo_data = BLI_memblock_create_ex(matbuf_size, matbuf_size * 2);
|
||||
(*vldata)->material_ubo = BLI_memblock_create_ex(sizeof(void *), sizeof(void *) * 8);
|
||||
(*vldata)->world_ubo = GPU_uniformbuf_create_ex(sizeof(WORKBENCH_UBO_World), NULL, "wb_World");
|
||||
(*vldata)->world_ubo = GPU_uniformbuf_create_ex(
|
||||
sizeof(WORKBENCH_UBO_World), nullptr, "wb_World");
|
||||
}
|
||||
|
||||
return *vldata;
|
||||
@@ -76,7 +78,7 @@ static void workbench_studiolight_data_update(WORKBENCH_PrivateData *wpd, WORKBE
|
||||
{
|
||||
StudioLight *studiolight = wpd->studio_light;
|
||||
float view_matrix[4][4], rot_matrix[4][4];
|
||||
DRW_view_viewmat_get(NULL, view_matrix, false);
|
||||
DRW_view_viewmat_get(nullptr, view_matrix, false);
|
||||
|
||||
if (USE_WORLD_ORIENTATION(wpd)) {
|
||||
axis_angle_to_mat4_single(rot_matrix, 'Z', -wpd->shading.studiolight_rot_z);
|
||||
@@ -96,7 +98,7 @@ static void workbench_studiolight_data_update(WORKBENCH_PrivateData *wpd, WORKBE
|
||||
for (int i = 0; i < 4; i++) {
|
||||
WORKBENCH_UBO_Light *light = &wd->lights[i];
|
||||
|
||||
SolidLight *sl = (studiolight) ? &studiolight->light[i] : NULL;
|
||||
SolidLight *sl = (studiolight) ? &studiolight->light[i] : nullptr;
|
||||
if (sl && sl->flag) {
|
||||
copy_v3_v3(light->light_direction, sl->vec);
|
||||
mul_mat3_m4_v3(rot_matrix, light->light_direction);
|
||||
@@ -126,7 +128,7 @@ static void workbench_studiolight_data_update(WORKBENCH_PrivateData *wpd, WORKBE
|
||||
void workbench_private_data_alloc(WORKBENCH_StorageList *stl)
|
||||
{
|
||||
if (!stl->wpd) {
|
||||
stl->wpd = MEM_callocN(sizeof(*stl->wpd), __func__);
|
||||
stl->wpd = static_cast<WORKBENCH_PrivateData *>(MEM_callocN(sizeof(*stl->wpd), __func__));
|
||||
stl->wpd->taa_sample_len_previous = -1;
|
||||
stl->wpd->view_updated = true;
|
||||
}
|
||||
@@ -152,7 +154,7 @@ void workbench_private_data_init(WORKBENCH_PrivateData *wpd)
|
||||
|
||||
/* FIXME: This reproduce old behavior when workbench was separated in 2 engines.
|
||||
* But this is a workaround for a missing update tagging. */
|
||||
DRWState clip_state = RV3D_CLIPPING_ENABLED(v3d, rv3d) ? DRW_STATE_CLIP_PLANES : 0;
|
||||
DRWState clip_state = RV3D_CLIPPING_ENABLED(v3d, rv3d) ? DRW_STATE_CLIP_PLANES : DRWState(0);
|
||||
if (clip_state != wpd->clip_state) {
|
||||
wpd->view_updated = true;
|
||||
}
|
||||
@@ -167,7 +169,7 @@ void workbench_private_data_init(WORKBENCH_PrivateData *wpd)
|
||||
|
||||
/* FIXME: This reproduce old behavior when workbench was separated in 2 engines.
|
||||
* But this is a workaround for a missing update tagging. */
|
||||
if ((rv3d != NULL) && (rv3d->rflag & RV3D_GPULIGHT_UPDATE)) {
|
||||
if ((rv3d != nullptr) && (rv3d->rflag & RV3D_GPULIGHT_UPDATE)) {
|
||||
wpd->view_updated = true;
|
||||
rv3d->rflag &= ~RV3D_GPULIGHT_UPDATE;
|
||||
}
|
||||
@@ -237,7 +239,7 @@ void workbench_private_data_init(WORKBENCH_PrivateData *wpd)
|
||||
copy_v4_fl(wpd->background_color, 0.0f);
|
||||
}
|
||||
|
||||
wpd->cull_state = CULL_BACKFACE_ENABLED(wpd) ? DRW_STATE_CULL_BACK : 0;
|
||||
wpd->cull_state = CULL_BACKFACE_ENABLED(wpd) ? DRW_STATE_CULL_BACK : DRWState(0);
|
||||
|
||||
if (wpd->shading.light == V3D_LIGHTING_MATCAP) {
|
||||
wpd->studio_light = BKE_studiolight_find(wpd->shading.matcap, STUDIOLIGHT_TYPE_MATCAP);
|
||||
@@ -247,7 +249,7 @@ void workbench_private_data_init(WORKBENCH_PrivateData *wpd)
|
||||
}
|
||||
|
||||
/* If matcaps are missing, use this as fallback. */
|
||||
if (UNLIKELY(wpd->studio_light == NULL)) {
|
||||
if (UNLIKELY(wpd->studio_light == nullptr)) {
|
||||
wpd->studio_light = BKE_studiolight_find(wpd->shading.studio_light, STUDIOLIGHT_TYPE_STUDIO);
|
||||
}
|
||||
|
||||
@@ -259,11 +261,12 @@ void workbench_private_data_init(WORKBENCH_PrivateData *wpd)
|
||||
wpd->material_chunk_curr = 0;
|
||||
wpd->material_index = 1;
|
||||
/* Create default material ubo. */
|
||||
wpd->material_ubo_data_curr = BLI_memblock_alloc(wpd->material_ubo_data);
|
||||
wpd->material_ubo_data_curr = static_cast<WORKBENCH_UBO_Material *>(
|
||||
BLI_memblock_alloc(wpd->material_ubo_data));
|
||||
wpd->material_ubo_curr = workbench_material_ubo_alloc(wpd);
|
||||
/* Init default material used by vertex color & texture. */
|
||||
workbench_material_ubo_data(
|
||||
wpd, NULL, NULL, &wpd->material_ubo_data_curr[0], V3D_SHADING_MATERIAL_COLOR);
|
||||
wpd, nullptr, nullptr, &wpd->material_ubo_data_curr[0], V3D_SHADING_MATERIAL_COLOR);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -285,7 +288,7 @@ void workbench_update_world_ubo(WORKBENCH_PrivateData *wpd)
|
||||
GPU_uniformbuf_update(wpd->world_ubo, &wd);
|
||||
}
|
||||
|
||||
void workbench_update_material_ubos(WORKBENCH_PrivateData *UNUSED(wpd))
|
||||
void workbench_update_material_ubos(WORKBENCH_PrivateData * /*wpd*/)
|
||||
{
|
||||
const DRWContextState *draw_ctx = DRW_context_state_get();
|
||||
WORKBENCH_ViewLayerData *vldata = workbench_view_layer_data_ensure_ex(draw_ctx->view_layer);
|
||||
@@ -294,12 +297,12 @@ void workbench_update_material_ubos(WORKBENCH_PrivateData *UNUSED(wpd))
|
||||
BLI_memblock_iternew(vldata->material_ubo, &iter);
|
||||
BLI_memblock_iternew(vldata->material_ubo_data, &iter_data);
|
||||
WORKBENCH_UBO_Material *matchunk;
|
||||
while ((matchunk = BLI_memblock_iterstep(&iter_data))) {
|
||||
GPUUniformBuf **ubo = BLI_memblock_iterstep(&iter);
|
||||
BLI_assert(*ubo != NULL);
|
||||
while ((matchunk = static_cast<WORKBENCH_UBO_Material *>(BLI_memblock_iterstep(&iter_data)))) {
|
||||
GPUUniformBuf **ubo = static_cast<GPUUniformBuf **>(BLI_memblock_iterstep(&iter));
|
||||
BLI_assert(*ubo != nullptr);
|
||||
GPU_uniformbuf_update(*ubo, matchunk);
|
||||
}
|
||||
|
||||
BLI_memblock_clear(vldata->material_ubo, workbench_ubo_free);
|
||||
BLI_memblock_clear(vldata->material_ubo_data, NULL);
|
||||
BLI_memblock_clear(vldata->material_ubo_data, nullptr);
|
||||
}
|
||||
@@ -151,7 +151,7 @@ void workbench_antialiasing_engine_init(WORKBENCH_Data *vedata)
|
||||
WORKBENCH_PrivateData *wpd = vedata->stl->wpd;
|
||||
DrawEngineType *owner = (DrawEngineType *)&workbench_antialiasing_engine_init;
|
||||
|
||||
wpd->view = NULL;
|
||||
wpd->view = nullptr;
|
||||
|
||||
/* Reset complete drawing when navigating or during viewport playback or when
|
||||
* leaving one of those states. In case of multires modifier the navigation
|
||||
@@ -186,7 +186,7 @@ void workbench_antialiasing_engine_init(WORKBENCH_Data *vedata)
|
||||
|
||||
{
|
||||
float persmat[4][4];
|
||||
DRW_view_persmat_get(NULL, persmat, false);
|
||||
DRW_view_persmat_get(nullptr, persmat, false);
|
||||
if (!equals_m4m4(persmat, wpd->last_mat)) {
|
||||
copy_m4_m4(wpd->last_mat, persmat);
|
||||
wpd->taa_sample = 0;
|
||||
@@ -198,11 +198,12 @@ void workbench_antialiasing_engine_init(WORKBENCH_Data *vedata)
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT;
|
||||
DRW_texture_ensure_fullscreen_2d_ex(
|
||||
&txl->history_buffer_tx, GPU_RGBA16F, usage, DRW_TEX_FILTER);
|
||||
DRW_texture_ensure_fullscreen_2d_ex(&txl->depth_buffer_tx, GPU_DEPTH24_STENCIL8, usage, 0);
|
||||
DRW_texture_ensure_fullscreen_2d_ex(
|
||||
&txl->depth_buffer_tx, GPU_DEPTH24_STENCIL8, usage, DRWTextureFlag(0));
|
||||
const bool in_front_history = workbench_in_front_history_needed(vedata);
|
||||
if (in_front_history) {
|
||||
DRW_texture_ensure_fullscreen_2d_ex(
|
||||
&txl->depth_buffer_in_front_tx, GPU_DEPTH24_STENCIL8, usage, 0);
|
||||
&txl->depth_buffer_in_front_tx, GPU_DEPTH24_STENCIL8, usage, DRWTextureFlag(0));
|
||||
}
|
||||
else {
|
||||
DRW_TEXTURE_FREE_SAFE(txl->depth_buffer_in_front_tx);
|
||||
@@ -236,13 +237,13 @@ void workbench_antialiasing_engine_init(WORKBENCH_Data *vedata)
|
||||
});
|
||||
|
||||
/* TODO: could be shared for all viewports. */
|
||||
if (txl->smaa_search_tx == NULL) {
|
||||
if (txl->smaa_search_tx == nullptr) {
|
||||
txl->smaa_search_tx = GPU_texture_create_2d(
|
||||
"smaa_search", SEARCHTEX_WIDTH, SEARCHTEX_HEIGHT, 1, GPU_R8, usage, NULL);
|
||||
"smaa_search", SEARCHTEX_WIDTH, SEARCHTEX_HEIGHT, 1, GPU_R8, usage, nullptr);
|
||||
GPU_texture_update(txl->smaa_search_tx, GPU_DATA_UBYTE, searchTexBytes);
|
||||
|
||||
txl->smaa_area_tx = GPU_texture_create_2d(
|
||||
"smaa_area", AREATEX_WIDTH, AREATEX_HEIGHT, 1, GPU_RG8, usage, NULL);
|
||||
"smaa_area", AREATEX_WIDTH, AREATEX_HEIGHT, 1, GPU_RG8, usage, nullptr);
|
||||
GPU_texture_update(txl->smaa_area_tx, GPU_DATA_UBYTE, areaTexBytes);
|
||||
|
||||
GPU_texture_filter_mode(txl->smaa_search_tx, true);
|
||||
@@ -279,7 +280,7 @@ static void workbench_antialiasing_weights_get(const float offset[2],
|
||||
int i = 0;
|
||||
for (int x = -1; x <= 1; x++) {
|
||||
for (int y = -1; y <= 1; y++, i++) {
|
||||
float sample_co[2] = {x, y};
|
||||
float sample_co[2] = {float(x), float(y)};
|
||||
sub_v2_v2(sample_co, offset);
|
||||
float r = len_v2(sample_co);
|
||||
/* fclem: is radial distance ok here? */
|
||||
@@ -296,7 +297,7 @@ void workbench_antialiasing_cache_init(WORKBENCH_Data *vedata)
|
||||
WORKBENCH_PrivateData *wpd = vedata->stl->wpd;
|
||||
WORKBENCH_PassList *psl = vedata->psl;
|
||||
DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
|
||||
DRWShadingGroup *grp = NULL;
|
||||
DRWShadingGroup *grp = nullptr;
|
||||
|
||||
if (wpd->taa_sample_len == 0) {
|
||||
return;
|
||||
@@ -308,9 +309,10 @@ void workbench_antialiasing_cache_init(WORKBENCH_Data *vedata)
|
||||
|
||||
GPUShader *shader = workbench_shader_antialiasing_accumulation_get();
|
||||
grp = DRW_shgroup_create(shader, psl->aa_accum_ps);
|
||||
DRW_shgroup_uniform_texture_ex(grp, "colorBuffer", dtxl->color, GPU_SAMPLER_DEFAULT);
|
||||
DRW_shgroup_uniform_texture_ex(
|
||||
grp, "colorBuffer", dtxl->color, GPUSamplerState::default_sampler());
|
||||
DRW_shgroup_uniform_float(grp, "samplesWeights", wpd->taa_weights, 9);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
|
||||
const float *size = DRW_viewport_size_get();
|
||||
@@ -327,7 +329,7 @@ void workbench_antialiasing_cache_init(WORKBENCH_Data *vedata)
|
||||
DRW_shgroup_uniform_vec4_copy(grp, "viewportMetrics", metrics);
|
||||
|
||||
DRW_shgroup_clear_framebuffer(grp, GPU_COLOR_BIT, 0, 0, 0, 0, 0.0f, 0x0);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
{
|
||||
/* Stage 2: Blend Weight/Coord. */
|
||||
@@ -341,7 +343,7 @@ void workbench_antialiasing_cache_init(WORKBENCH_Data *vedata)
|
||||
DRW_shgroup_uniform_vec4_copy(grp, "viewportMetrics", metrics);
|
||||
|
||||
DRW_shgroup_clear_framebuffer(grp, GPU_COLOR_BIT, 0, 0, 0, 0, 0.0f, 0x0);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
{
|
||||
/* Stage 3: Resolve. */
|
||||
@@ -355,7 +357,7 @@ void workbench_antialiasing_cache_init(WORKBENCH_Data *vedata)
|
||||
DRW_shgroup_uniform_float(grp, "mixFactor", &wpd->smaa_mix_factor, 1);
|
||||
DRW_shgroup_uniform_float(grp, "taaAccumulatedWeight", &wpd->taa_weight_accum, 1);
|
||||
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,7 +32,8 @@ static float *create_disk_samples(int num_samples, int num_iterations)
|
||||
const int total_samples = num_samples * num_iterations;
|
||||
const float num_samples_inv = 1.0f / num_samples;
|
||||
/* vec4 to ensure memory alignment. */
|
||||
float(*texels)[4] = MEM_callocN(sizeof(float[4]) * CAVITY_MAX_SAMPLES, __func__);
|
||||
float(*texels)[4] = static_cast<float(*)[4]>(
|
||||
MEM_callocN(sizeof(float[4]) * CAVITY_MAX_SAMPLES, __func__));
|
||||
for (int i = 0; i < total_samples; i++) {
|
||||
float it_add = (i / num_samples) * 0.499f;
|
||||
float r = fmodf((i + 0.5f + it_add) * num_samples_inv, 1.0f);
|
||||
@@ -121,7 +122,7 @@ void workbench_cavity_samples_ubo_ensure(WORKBENCH_PrivateData *wpd)
|
||||
DRW_TEXTURE_FREE_SAFE(wpd->vldata->cavity_jitter_tx);
|
||||
}
|
||||
|
||||
if (wpd->vldata->cavity_sample_ubo == NULL) {
|
||||
if (wpd->vldata->cavity_sample_ubo == nullptr) {
|
||||
float *samples = create_disk_samples(cavity_sample_count_single_iteration, max_iter_count);
|
||||
wpd->vldata->cavity_jitter_tx = create_jitter_texture(cavity_sample_count);
|
||||
/* NOTE: Uniform buffer needs to always be filled to be valid. */
|
||||
@@ -144,7 +145,7 @@ void workbench_cavity_cache_init(WORKBENCH_Data *data)
|
||||
workbench_cavity_samples_ubo_ensure(wpd);
|
||||
|
||||
int state = DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_MUL;
|
||||
DRW_PASS_CREATE(psl->cavity_ps, state);
|
||||
DRW_PASS_CREATE(psl->cavity_ps, DRWState(state));
|
||||
|
||||
sh = workbench_shader_cavity_get(SSAO_ENABLED(wpd), CURVATURE_ENABLED(wpd));
|
||||
|
||||
@@ -160,9 +161,9 @@ void workbench_cavity_cache_init(WORKBENCH_Data *data)
|
||||
if (CURVATURE_ENABLED(wpd)) {
|
||||
DRW_shgroup_uniform_texture(grp, "objectIdBuffer", wpd->object_id_tx);
|
||||
}
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
else {
|
||||
psl->cavity_ps = NULL;
|
||||
psl->cavity_ps = nullptr;
|
||||
}
|
||||
}
|
||||
@@ -63,10 +63,11 @@ static void square_to_circle(float x, float y, float *r, float *T)
|
||||
static void workbench_dof_setup_samples(
|
||||
GPUUniformBuf **ubo, float **data, float bokeh_sides, float bokeh_rotation, float bokeh_ratio)
|
||||
{
|
||||
if (*data == NULL) {
|
||||
*data = MEM_callocN(sizeof(float[4]) * SAMP_LEN, "workbench dof samples");
|
||||
if (*data == nullptr) {
|
||||
*data = static_cast<float *>(
|
||||
MEM_callocN(sizeof(float[4]) * SAMP_LEN, "workbench dof samples"));
|
||||
}
|
||||
if (*ubo == NULL) {
|
||||
if (*ubo == nullptr) {
|
||||
*ubo = GPU_uniformbuf_create(sizeof(float[4]) * SAMP_LEN);
|
||||
}
|
||||
|
||||
@@ -120,14 +121,15 @@ void workbench_dof_engine_init(WORKBENCH_Data *vedata)
|
||||
Object *camera;
|
||||
|
||||
if (v3d && rv3d) {
|
||||
camera = (rv3d->persp == RV3D_CAMOB) ? v3d->camera : NULL;
|
||||
camera = (rv3d->persp == RV3D_CAMOB) ? v3d->camera : nullptr;
|
||||
}
|
||||
else {
|
||||
camera = wpd->cam_original_ob;
|
||||
}
|
||||
|
||||
Camera *cam = camera != NULL && camera->type == OB_CAMERA ? camera->data : NULL;
|
||||
if ((wpd->shading.flag & V3D_SHADING_DEPTH_OF_FIELD) == 0 || (cam == NULL) ||
|
||||
Camera *cam = static_cast<Camera *>(
|
||||
camera != nullptr && camera->type == OB_CAMERA ? camera->data : nullptr);
|
||||
if ((wpd->shading.flag & V3D_SHADING_DEPTH_OF_FIELD) == 0 || (cam == nullptr) ||
|
||||
((cam->dof.flag & CAM_DOF_ENABLED) == 0))
|
||||
{
|
||||
wpd->dof_enabled = false;
|
||||
@@ -146,10 +148,16 @@ void workbench_dof_engine_init(WORKBENCH_Data *vedata)
|
||||
int shrink_w_size[2] = {shrink_h_size[0], ceilf(size[1] / 8.0f)};
|
||||
#endif
|
||||
|
||||
DRW_texture_ensure_2d(
|
||||
&txl->dof_source_tx, size[0], size[1], GPU_RGBA16F, DRW_TEX_FILTER | DRW_TEX_MIPMAP);
|
||||
DRW_texture_ensure_2d(
|
||||
&txl->coc_halfres_tx, size[0], size[1], GPU_RG8, DRW_TEX_FILTER | DRW_TEX_MIPMAP);
|
||||
DRW_texture_ensure_2d(&txl->dof_source_tx,
|
||||
size[0],
|
||||
size[1],
|
||||
GPU_RGBA16F,
|
||||
DRWTextureFlag(DRW_TEX_FILTER | DRW_TEX_MIPMAP));
|
||||
DRW_texture_ensure_2d(&txl->coc_halfres_tx,
|
||||
size[0],
|
||||
size[1],
|
||||
GPU_RG8,
|
||||
DRWTextureFlag(DRW_TEX_FILTER | DRW_TEX_MIPMAP));
|
||||
wpd->dof_blur_tx = DRW_texture_pool_query_2d(
|
||||
size[0], size[1], GPU_RGBA16F, &draw_engine_workbench);
|
||||
#if 0 /* TODO(fclem): finish COC min_max optimization. */
|
||||
@@ -209,7 +217,7 @@ void workbench_dof_engine_init(WORKBENCH_Data *vedata)
|
||||
float focal_len_scaled = scale_camera * focal_len;
|
||||
float sensor_scaled = scale_camera * sensor;
|
||||
|
||||
if (rv3d != NULL) {
|
||||
if (rv3d != nullptr) {
|
||||
sensor_scaled *= rv3d->viewcamtexcofac[0];
|
||||
}
|
||||
|
||||
@@ -224,7 +232,7 @@ void workbench_dof_engine_init(WORKBENCH_Data *vedata)
|
||||
float rotation = cam->dof.aperture_rotation;
|
||||
float ratio = 1.0f / cam->dof.aperture_ratio;
|
||||
|
||||
if (wpd->vldata->dof_sample_ubo == NULL || blades != wpd->dof_blades ||
|
||||
if (wpd->vldata->dof_sample_ubo == nullptr || blades != wpd->dof_blades ||
|
||||
rotation != wpd->dof_rotation || ratio != wpd->dof_ratio)
|
||||
{
|
||||
wpd->dof_blades = blades;
|
||||
@@ -264,16 +272,17 @@ void workbench_dof_cache_init(WORKBENCH_Data *vedata)
|
||||
DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
|
||||
DRW_shgroup_uniform_vec3(grp, "dofParams", &wpd->dof_aperturesize, 1);
|
||||
DRW_shgroup_uniform_vec2(grp, "nearFar", wpd->dof_near_far, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
|
||||
{
|
||||
psl->dof_down2_ps = DRW_pass_create("DoF DownSample", DRW_STATE_WRITE_COLOR);
|
||||
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(downsample_sh, psl->dof_down2_ps);
|
||||
DRW_shgroup_uniform_texture_ex(grp, "sceneColorTex", txl->dof_source_tx, GPU_SAMPLER_DEFAULT);
|
||||
DRW_shgroup_uniform_texture_ex(
|
||||
grp, "sceneColorTex", txl->dof_source_tx, GPUSamplerState::default_sampler());
|
||||
DRW_shgroup_uniform_texture(grp, "inputCocTex", txl->coc_halfres_tx);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
#if 0 /* TODO(fclem): finish COC min_max optimization */
|
||||
{
|
||||
@@ -281,28 +290,28 @@ void workbench_dof_cache_init(WORKBENCH_Data *vedata)
|
||||
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(flatten_h_sh, psl->dof_flatten_h_ps);
|
||||
DRW_shgroup_uniform_texture(grp, "inputCocTex", txl->coc_halfres_tx);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
{
|
||||
psl->dof_flatten_v_ps = DRW_pass_create("DoF Flatten Coc V", DRW_STATE_WRITE_COLOR);
|
||||
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(flatten_v_sh, psl->dof_flatten_v_ps);
|
||||
DRW_shgroup_uniform_texture(grp, "inputCocTex", wpd->coc_temp_tx);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
{
|
||||
psl->dof_dilate_h_ps = DRW_pass_create("DoF Dilate Coc H", DRW_STATE_WRITE_COLOR);
|
||||
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(dilate_v_sh, psl->dof_dilate_v_ps);
|
||||
DRW_shgroup_uniform_texture(grp, "inputCocTex", wpd->coc_tiles_tx[0]);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
{
|
||||
psl->dof_dilate_v_ps = DRW_pass_create("DoF Dilate Coc V", DRW_STATE_WRITE_COLOR);
|
||||
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(dilate_h_sh, psl->dof_dilate_h_ps);
|
||||
DRW_shgroup_uniform_texture(grp, "inputCocTex", wpd->coc_tiles_tx[1]);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
#endif
|
||||
{
|
||||
@@ -319,7 +328,7 @@ void workbench_dof_cache_init(WORKBENCH_Data *vedata)
|
||||
DRW_shgroup_uniform_texture(grp, "halfResColorTex", txl->dof_source_tx);
|
||||
DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
|
||||
DRW_shgroup_uniform_float_copy(grp, "noiseOffset", offset);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
{
|
||||
psl->dof_blur2_ps = DRW_pass_create("DoF Blur 2", DRW_STATE_WRITE_COLOR);
|
||||
@@ -328,7 +337,7 @@ void workbench_dof_cache_init(WORKBENCH_Data *vedata)
|
||||
DRW_shgroup_uniform_texture(grp, "inputCocTex", txl->coc_halfres_tx);
|
||||
DRW_shgroup_uniform_texture(grp, "blurTex", wpd->dof_blur_tx);
|
||||
DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
{
|
||||
psl->dof_resolve_ps = DRW_pass_create("DoF Resolve",
|
||||
@@ -340,11 +349,11 @@ void workbench_dof_cache_init(WORKBENCH_Data *vedata)
|
||||
DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
|
||||
DRW_shgroup_uniform_vec3(grp, "dofParams", &wpd->dof_aperturesize, 1);
|
||||
DRW_shgroup_uniform_vec2(grp, "nearFar", wpd->dof_near_far, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
}
|
||||
|
||||
static void workbench_dof_downsample_level(void *user_data, int UNUSED(level))
|
||||
static void workbench_dof_downsample_level(void *user_data, int /*level*/)
|
||||
{
|
||||
WORKBENCH_PassList *psl = (WORKBENCH_PassList *)user_data;
|
||||
DRW_draw_pass(psl->dof_down2_ps);
|
||||
@@ -25,7 +25,7 @@ void workbench_outline_cache_init(WORKBENCH_Data *data)
|
||||
|
||||
if (OBJECT_OUTLINE_ENABLED(wpd)) {
|
||||
int state = DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_ALPHA_PREMUL;
|
||||
DRW_PASS_CREATE(psl->outline_ps, state);
|
||||
DRW_PASS_CREATE(psl->outline_ps, DRWState(state));
|
||||
|
||||
sh = workbench_shader_outline_get();
|
||||
|
||||
@@ -33,9 +33,9 @@ void workbench_outline_cache_init(WORKBENCH_Data *data)
|
||||
DRW_shgroup_uniform_texture(grp, "objectIdBuffer", wpd->object_id_tx);
|
||||
DRW_shgroup_uniform_texture(grp, "depthBuffer", dtxl->depth);
|
||||
DRW_shgroup_uniform_block(grp, "world_data", wpd->world_ubo);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
else {
|
||||
psl->outline_ps = NULL;
|
||||
psl->outline_ps = nullptr;
|
||||
}
|
||||
}
|
||||
@@ -41,7 +41,7 @@
|
||||
void workbench_engine_init(void *ved)
|
||||
{
|
||||
GPU_render_begin();
|
||||
WORKBENCH_Data *vedata = ved;
|
||||
WORKBENCH_Data *vedata = static_cast<WORKBENCH_Data *>(ved);
|
||||
WORKBENCH_StorageList *stl = vedata->stl;
|
||||
WORKBENCH_TextureList *txl = vedata->txl;
|
||||
|
||||
@@ -50,9 +50,9 @@ void workbench_engine_init(void *ved)
|
||||
workbench_private_data_init(wpd);
|
||||
workbench_update_world_ubo(wpd);
|
||||
|
||||
if (txl->dummy_image_tx == NULL) {
|
||||
if (txl->dummy_image_tx == nullptr) {
|
||||
const float fpixel[4] = {1.0f, 0.0f, 1.0f, 1.0f};
|
||||
txl->dummy_image_tx = DRW_texture_create_2d(1, 1, GPU_RGBA8, 0, fpixel);
|
||||
txl->dummy_image_tx = DRW_texture_create_2d(1, 1, GPU_RGBA8, DRWTextureFlag(0), fpixel);
|
||||
}
|
||||
wpd->dummy_image_tx = txl->dummy_image_tx;
|
||||
|
||||
@@ -63,7 +63,7 @@ void workbench_engine_init(void *ved)
|
||||
}
|
||||
else {
|
||||
/* Don't free because it's a pool texture. */
|
||||
wpd->object_id_tx = NULL;
|
||||
wpd->object_id_tx = nullptr;
|
||||
}
|
||||
|
||||
workbench_opaque_engine_init(vedata);
|
||||
@@ -76,7 +76,7 @@ void workbench_engine_init(void *ved)
|
||||
|
||||
void workbench_cache_init(void *ved)
|
||||
{
|
||||
WORKBENCH_Data *vedata = ved;
|
||||
WORKBENCH_Data *vedata = static_cast<WORKBENCH_Data *>(ved);
|
||||
|
||||
workbench_opaque_cache_init(vedata);
|
||||
workbench_transparent_cache_init(vedata);
|
||||
@@ -88,7 +88,7 @@ void workbench_cache_init(void *ved)
|
||||
workbench_volume_cache_init(vedata);
|
||||
}
|
||||
|
||||
/* TODO(fclem): DRW_cache_object_surface_material_get needs a refactor to allow passing NULL
|
||||
/* TODO(fclem): DRW_cache_object_surface_material_get needs a refactor to allow passing nullptr
|
||||
* instead of gpumat_array. Avoiding all this boilerplate code. */
|
||||
static GPUBatch **workbench_object_surface_material_get(Object *ob)
|
||||
{
|
||||
@@ -105,7 +105,7 @@ static void workbench_cache_sculpt_populate(WORKBENCH_PrivateData *wpd,
|
||||
{
|
||||
const bool use_single_drawcall = !ELEM(color_type, V3D_SHADING_MATERIAL_COLOR);
|
||||
if (use_single_drawcall) {
|
||||
DRWShadingGroup *grp = workbench_material_setup(wpd, ob, ob->actcol, color_type, NULL);
|
||||
DRWShadingGroup *grp = workbench_material_setup(wpd, ob, ob->actcol, color_type, nullptr);
|
||||
|
||||
bool use_color = color_type == V3D_SHADING_VERTEX_COLOR;
|
||||
bool use_uv = color_type == V3D_SHADING_TEXTURE_COLOR;
|
||||
@@ -116,9 +116,9 @@ static void workbench_cache_sculpt_populate(WORKBENCH_PrivateData *wpd,
|
||||
const int materials_len = DRW_cache_object_material_count_get(ob);
|
||||
DRWShadingGroup **shgrps = BLI_array_alloca(shgrps, materials_len);
|
||||
for (int i = 0; i < materials_len; i++) {
|
||||
shgrps[i] = workbench_material_setup(wpd, ob, i + 1, color_type, NULL);
|
||||
shgrps[i] = workbench_material_setup(wpd, ob, i + 1, color_type, nullptr);
|
||||
}
|
||||
DRW_shgroup_call_sculpt_with_materials(shgrps, NULL, materials_len, ob);
|
||||
DRW_shgroup_call_sculpt_with_materials(shgrps, nullptr, materials_len, ob);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -151,7 +151,7 @@ static void workbench_cache_texpaint_populate(WORKBENCH_PrivateData *wpd, Object
|
||||
GPUSamplerState state = {
|
||||
filtering, GPU_SAMPLER_EXTEND_MODE_REPEAT, GPU_SAMPLER_EXTEND_MODE_REPEAT};
|
||||
|
||||
DRWShadingGroup *grp = workbench_image_setup(wpd, ob, 0, ima, NULL, state);
|
||||
DRWShadingGroup *grp = workbench_image_setup(wpd, ob, 0, ima, nullptr, state);
|
||||
workbench_object_drawcall(grp, geom, ob);
|
||||
}
|
||||
}
|
||||
@@ -160,11 +160,11 @@ static void workbench_cache_texpaint_populate(WORKBENCH_PrivateData *wpd, Object
|
||||
if (geoms) {
|
||||
const int materials_len = DRW_cache_object_material_count_get(ob);
|
||||
for (int i = 0; i < materials_len; i++) {
|
||||
if (geoms[i] == NULL) {
|
||||
if (geoms[i] == nullptr) {
|
||||
continue;
|
||||
}
|
||||
DRWShadingGroup *grp = workbench_image_setup(
|
||||
wpd, ob, i + 1, NULL, NULL, GPU_SAMPLER_DEFAULT);
|
||||
wpd, ob, i + 1, nullptr, nullptr, GPUSamplerState::default_sampler());
|
||||
workbench_object_drawcall(grp, geoms[i], ob);
|
||||
}
|
||||
}
|
||||
@@ -206,7 +206,7 @@ static void workbench_cache_common_populate(WORKBENCH_PrivateData *wpd,
|
||||
if (geoms) {
|
||||
const int materials_len = DRW_cache_object_material_count_get(ob);
|
||||
for (int i = 0; i < materials_len; i++) {
|
||||
if (geoms[i] == NULL) {
|
||||
if (geoms[i] == nullptr) {
|
||||
continue;
|
||||
}
|
||||
DRWShadingGroup *grp = workbench_material_setup(wpd, ob, i + 1, color_type, r_transp);
|
||||
@@ -227,23 +227,24 @@ static void workbench_cache_hair_populate(WORKBENCH_PrivateData *wpd,
|
||||
const DRWContextState *draw_ctx = DRW_context_state_get();
|
||||
const Scene *scene = draw_ctx->scene;
|
||||
|
||||
const ImagePaintSettings *imapaint = use_texpaint_mode ? &scene->toolsettings->imapaint : NULL;
|
||||
Image *ima = (imapaint && imapaint->mode == IMAGEPAINT_MODE_IMAGE) ? imapaint->canvas : NULL;
|
||||
const ImagePaintSettings *imapaint = use_texpaint_mode ? &scene->toolsettings->imapaint :
|
||||
nullptr;
|
||||
Image *ima = (imapaint && imapaint->mode == IMAGEPAINT_MODE_IMAGE) ? imapaint->canvas : nullptr;
|
||||
GPUSamplerState state = {imapaint && imapaint->interp == IMAGEPAINT_INTERP_LINEAR ?
|
||||
GPU_SAMPLER_FILTERING_LINEAR :
|
||||
GPU_SAMPLER_FILTERING_DEFAULT};
|
||||
DRWShadingGroup *grp = (use_texpaint_mode) ?
|
||||
workbench_image_hair_setup(wpd, ob, matnr, ima, NULL, state) :
|
||||
workbench_image_hair_setup(wpd, ob, matnr, ima, nullptr, state) :
|
||||
workbench_material_hair_setup(wpd, ob, matnr, color_type);
|
||||
|
||||
DRW_shgroup_hair_create_sub(ob, psys, md, grp, NULL);
|
||||
DRW_shgroup_hair_create_sub(ob, psys, md, grp, nullptr);
|
||||
}
|
||||
|
||||
static const CustomData *workbench_mesh_get_loop_custom_data(const Mesh *mesh)
|
||||
{
|
||||
if (BKE_mesh_wrapper_type(mesh) == ME_WRAPPER_TYPE_BMESH) {
|
||||
BLI_assert(mesh->edit_mesh != NULL);
|
||||
BLI_assert(mesh->edit_mesh->bm != NULL);
|
||||
BLI_assert(mesh->edit_mesh != nullptr);
|
||||
BLI_assert(mesh->edit_mesh->bm != nullptr);
|
||||
return &mesh->edit_mesh->bm->ldata;
|
||||
}
|
||||
return &mesh->loop_data;
|
||||
@@ -252,8 +253,8 @@ static const CustomData *workbench_mesh_get_loop_custom_data(const Mesh *mesh)
|
||||
static const CustomData *workbench_mesh_get_vert_custom_data(const Mesh *mesh)
|
||||
{
|
||||
if (BKE_mesh_wrapper_type(mesh) == ME_WRAPPER_TYPE_BMESH) {
|
||||
BLI_assert(mesh->edit_mesh != NULL);
|
||||
BLI_assert(mesh->edit_mesh->bm != NULL);
|
||||
BLI_assert(mesh->edit_mesh != nullptr);
|
||||
BLI_assert(mesh->edit_mesh->bm != nullptr);
|
||||
return &mesh->edit_mesh->bm->vdata;
|
||||
}
|
||||
return &mesh->vert_data;
|
||||
@@ -269,15 +270,15 @@ static eV3DShadingColorType workbench_color_type_get(WORKBENCH_PrivateData *wpd,
|
||||
bool *r_texpaint_mode,
|
||||
bool *r_draw_shadow)
|
||||
{
|
||||
eV3DShadingColorType color_type = wpd->shading.color_type;
|
||||
const Mesh *me = (ob->type == OB_MESH) ? ob->data : NULL;
|
||||
const CustomData *ldata = (me == NULL) ? NULL : workbench_mesh_get_loop_custom_data(me);
|
||||
eV3DShadingColorType color_type = eV3DShadingColorType(wpd->shading.color_type);
|
||||
const Mesh *me = static_cast<const Mesh *>((ob->type == OB_MESH) ? ob->data : nullptr);
|
||||
const CustomData *ldata = (me == nullptr) ? nullptr : workbench_mesh_get_loop_custom_data(me);
|
||||
|
||||
const DRWContextState *draw_ctx = DRW_context_state_get();
|
||||
const bool is_active = (ob == draw_ctx->obact);
|
||||
const bool is_sculpt_pbvh = BKE_sculptsession_use_pbvh_draw(ob, draw_ctx->rv3d) &&
|
||||
!DRW_state_is_image_render();
|
||||
const bool is_render = DRW_state_is_image_render() && (draw_ctx->v3d == NULL);
|
||||
const bool is_render = DRW_state_is_image_render() && (draw_ctx->v3d == nullptr);
|
||||
const bool is_texpaint_mode = is_active && (wpd->ctx_mode == CTX_MODE_PAINT_TEXTURE);
|
||||
const bool is_vertpaint_mode = is_active && (wpd->ctx_mode == CTX_MODE_PAINT_VERTEX);
|
||||
|
||||
@@ -305,7 +306,7 @@ static eV3DShadingColorType workbench_color_type_get(WORKBENCH_PrivateData *wpd,
|
||||
if (ob->dt < OB_TEXTURE) {
|
||||
color_type = V3D_SHADING_MATERIAL_COLOR;
|
||||
}
|
||||
else if ((me == NULL) || !CustomData_has_layer(ldata, CD_PROP_FLOAT2)) {
|
||||
else if ((me == nullptr) || !CustomData_has_layer(ldata, CD_PROP_FLOAT2)) {
|
||||
/* Disable color mode if data layer is unavailable. */
|
||||
color_type = V3D_SHADING_MATERIAL_COLOR;
|
||||
}
|
||||
@@ -352,7 +353,7 @@ static eV3DShadingColorType workbench_color_type_get(WORKBENCH_PrivateData *wpd,
|
||||
/* Bad call C is required to access the tool system that is context aware. Cast to non-const
|
||||
* due to current API. */
|
||||
bContext *C = (bContext *)DRW_context_state_get()->evil_C;
|
||||
if (C != NULL) {
|
||||
if (C != nullptr) {
|
||||
color_type = ED_paint_shading_color_override(
|
||||
C, &wpd->scene->toolsettings->paint_mode, ob, color_type);
|
||||
}
|
||||
@@ -377,7 +378,7 @@ static eV3DShadingColorType workbench_color_type_get(WORKBENCH_PrivateData *wpd,
|
||||
|
||||
void workbench_cache_populate(void *ved, Object *ob)
|
||||
{
|
||||
WORKBENCH_Data *vedata = ved;
|
||||
WORKBENCH_Data *vedata = static_cast<WORKBENCH_Data *>(ved);
|
||||
WORKBENCH_StorageList *stl = vedata->stl;
|
||||
WORKBENCH_PrivateData *wpd = stl->wpd;
|
||||
|
||||
@@ -385,9 +386,9 @@ void workbench_cache_populate(void *ved, Object *ob)
|
||||
return;
|
||||
}
|
||||
|
||||
if (ob->type == OB_MESH && ob->modifiers.first != NULL) {
|
||||
if (ob->type == OB_MESH && ob->modifiers.first != nullptr) {
|
||||
bool use_texpaint_mode;
|
||||
int color_type = workbench_color_type_get(wpd, ob, NULL, &use_texpaint_mode, NULL);
|
||||
int color_type = workbench_color_type_get(wpd, ob, nullptr, &use_texpaint_mode, nullptr);
|
||||
|
||||
LISTBASE_FOREACH (ModifierData *, md, &ob->modifiers) {
|
||||
if (md->type != eModifierType_ParticleSystem) {
|
||||
@@ -402,7 +403,7 @@ void workbench_cache_populate(void *ved, Object *ob)
|
||||
|
||||
if (draw_as == PART_DRAW_PATH) {
|
||||
workbench_cache_hair_populate(
|
||||
wpd, ob, psys, md, color_type, use_texpaint_mode, part->omat);
|
||||
wpd, ob, psys, md, eV3DShadingColorType(color_type), use_texpaint_mode, part->omat);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -448,27 +449,29 @@ void workbench_cache_populate(void *ved, Object *ob)
|
||||
}
|
||||
}
|
||||
else if (ob->type == OB_CURVES) {
|
||||
int color_type = workbench_color_type_get(wpd, ob, NULL, NULL, NULL);
|
||||
DRWShadingGroup *grp = workbench_material_hair_setup(wpd, ob, CURVES_MATERIAL_NR, color_type);
|
||||
DRW_shgroup_curves_create_sub(ob, grp, NULL);
|
||||
int color_type = workbench_color_type_get(wpd, ob, nullptr, nullptr, nullptr);
|
||||
DRWShadingGroup *grp = workbench_material_hair_setup(
|
||||
wpd, ob, CURVES_MATERIAL_NR, eV3DShadingColorType(color_type));
|
||||
DRW_shgroup_curves_create_sub(ob, grp, nullptr);
|
||||
}
|
||||
else if (ob->type == OB_POINTCLOUD) {
|
||||
int color_type = workbench_color_type_get(wpd, ob, NULL, NULL, NULL);
|
||||
int color_type = workbench_color_type_get(wpd, ob, nullptr, nullptr, nullptr);
|
||||
DRWShadingGroup *grp = workbench_material_ptcloud_setup(
|
||||
wpd, ob, POINTCLOUD_MATERIAL_NR, color_type);
|
||||
DRW_shgroup_pointcloud_create_sub(ob, grp, NULL);
|
||||
wpd, ob, POINTCLOUD_MATERIAL_NR, eV3DShadingColorType(color_type));
|
||||
DRW_shgroup_pointcloud_create_sub(ob, grp, nullptr);
|
||||
}
|
||||
else if (ob->type == OB_VOLUME) {
|
||||
if (wpd->shading.type != OB_WIRE) {
|
||||
int color_type = workbench_color_type_get(wpd, ob, NULL, NULL, NULL);
|
||||
workbench_volume_cache_populate(vedata, wpd->scene, ob, NULL, color_type);
|
||||
int color_type = workbench_color_type_get(wpd, ob, nullptr, nullptr, nullptr);
|
||||
workbench_volume_cache_populate(
|
||||
vedata, wpd->scene, ob, nullptr, eV3DShadingColorType(color_type));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void workbench_cache_finish(void *ved)
|
||||
{
|
||||
WORKBENCH_Data *vedata = ved;
|
||||
WORKBENCH_Data *vedata = static_cast<WORKBENCH_Data *>(ved);
|
||||
WORKBENCH_StorageList *stl = vedata->stl;
|
||||
WORKBENCH_FramebufferList *fbl = vedata->fbl;
|
||||
WORKBENCH_PrivateData *wpd = stl->wpd;
|
||||
@@ -479,7 +482,8 @@ void workbench_cache_finish(void *ved)
|
||||
DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
|
||||
DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
|
||||
|
||||
DRW_texture_ensure_fullscreen_2d(&dtxl->depth_in_front, GPU_DEPTH24_STENCIL8, 0);
|
||||
DRW_texture_ensure_fullscreen_2d(
|
||||
&dtxl->depth_in_front, GPU_DEPTH24_STENCIL8, DRWTextureFlag(0));
|
||||
|
||||
GPU_framebuffer_ensure_config(&dfbl->in_front_fb,
|
||||
{
|
||||
@@ -521,8 +525,8 @@ void workbench_cache_finish(void *ved)
|
||||
for (int j = 0; j < 2; j++) {
|
||||
for (int k = 0; k < WORKBENCH_DATATYPE_MAX; k++) {
|
||||
if (wpd->prepass[i][j][k].material_hash) {
|
||||
BLI_ghash_free(wpd->prepass[i][j][k].material_hash, NULL, NULL);
|
||||
wpd->prepass[i][j][k].material_hash = NULL;
|
||||
BLI_ghash_free(wpd->prepass[i][j][k].material_hash, nullptr, nullptr);
|
||||
wpd->prepass[i][j][k].material_hash = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -531,7 +535,7 @@ void workbench_cache_finish(void *ved)
|
||||
|
||||
void workbench_draw_sample(void *ved)
|
||||
{
|
||||
WORKBENCH_Data *vedata = ved;
|
||||
WORKBENCH_Data *vedata = static_cast<WORKBENCH_Data *>(ved);
|
||||
WORKBENCH_FramebufferList *fbl = vedata->fbl;
|
||||
WORKBENCH_PrivateData *wpd = vedata->stl->wpd;
|
||||
WORKBENCH_PassList *psl = vedata->psl;
|
||||
@@ -626,7 +630,7 @@ void workbench_draw_sample(void *ved)
|
||||
/* Viewport rendering. */
|
||||
static void workbench_draw_scene(void *ved)
|
||||
{
|
||||
WORKBENCH_Data *vedata = ved;
|
||||
WORKBENCH_Data *vedata = static_cast<WORKBENCH_Data *>(ved);
|
||||
WORKBENCH_PrivateData *wpd = vedata->stl->wpd;
|
||||
|
||||
if (DRW_state_is_viewport_image_render()) {
|
||||
@@ -643,10 +647,10 @@ static void workbench_draw_scene(void *ved)
|
||||
workbench_draw_finish(vedata);
|
||||
}
|
||||
|
||||
void workbench_draw_finish(void *UNUSED(ved))
|
||||
void workbench_draw_finish(void * /*ved*/)
|
||||
{
|
||||
/* Reset default view. */
|
||||
DRW_view_set_active(NULL);
|
||||
DRW_view_set_active(nullptr);
|
||||
}
|
||||
|
||||
static void workbench_engine_free(void)
|
||||
@@ -656,16 +660,16 @@ static void workbench_engine_free(void)
|
||||
|
||||
static void workbench_view_update(void *vedata)
|
||||
{
|
||||
WORKBENCH_Data *data = vedata;
|
||||
WORKBENCH_Data *data = static_cast<WORKBENCH_Data *>(vedata);
|
||||
workbench_antialiasing_view_updated(data);
|
||||
}
|
||||
|
||||
static void workbench_id_update(void *UNUSED(vedata), ID *id)
|
||||
static void workbench_id_update(void * /*vedata*/, ID *id)
|
||||
{
|
||||
if (GS(id->name) == ID_OB) {
|
||||
WORKBENCH_ObjectData *oed = (WORKBENCH_ObjectData *)DRW_drawdata_get(id,
|
||||
&draw_engine_workbench);
|
||||
if (oed != NULL && oed->dd.recalc != 0) {
|
||||
if (oed != nullptr && oed->dd.recalc != 0) {
|
||||
oed->shadow_bbox_dirty = (oed->dd.recalc & ID_RECALC_ALL) != 0;
|
||||
oed->dd.recalc = 0;
|
||||
}
|
||||
@@ -675,13 +679,13 @@ static void workbench_id_update(void *UNUSED(vedata), ID *id)
|
||||
static const DrawEngineDataSize workbench_data_size = DRW_VIEWPORT_DATA_SIZE(WORKBENCH_Data);
|
||||
|
||||
DrawEngineType draw_engine_workbench = {
|
||||
/*next*/ NULL,
|
||||
/*prev*/ NULL,
|
||||
/*next*/ nullptr,
|
||||
/*prev*/ nullptr,
|
||||
/*idname*/ N_("Workbench"),
|
||||
/*vedata_size*/ &workbench_data_size,
|
||||
/*engine_init*/ &workbench_engine_init,
|
||||
/*engine_free*/ &workbench_engine_free,
|
||||
/*instance_free*/ /*instance_free*/ NULL,
|
||||
/*instance_free*/ /*instance_free*/ nullptr,
|
||||
/*cache_init*/ &workbench_cache_init,
|
||||
/*cache_populate*/ &workbench_cache_populate,
|
||||
/*cache_finish*/ &workbench_cache_finish,
|
||||
@@ -689,30 +693,30 @@ DrawEngineType draw_engine_workbench = {
|
||||
/*view_update*/ &workbench_view_update,
|
||||
/*id_update*/ &workbench_id_update,
|
||||
/*render_to_image*/ &workbench_render,
|
||||
/*store_metadata*/ NULL,
|
||||
/*store_metadata*/ nullptr,
|
||||
};
|
||||
|
||||
RenderEngineType DRW_engine_viewport_workbench_type = {
|
||||
/*next*/ NULL,
|
||||
/*prev*/ NULL,
|
||||
/*next*/ nullptr,
|
||||
/*prev*/ nullptr,
|
||||
/*idname*/ WORKBENCH_ENGINE,
|
||||
/*name*/ N_("Workbench"),
|
||||
/*flag*/ RE_INTERNAL | RE_USE_STEREO_VIEWPORT | RE_USE_GPU_CONTEXT,
|
||||
/*update*/ NULL,
|
||||
/*update*/ nullptr,
|
||||
/*render*/ &DRW_render_to_image,
|
||||
/*render_frame_finish*/ NULL,
|
||||
/*draw*/ NULL,
|
||||
/*bake*/ NULL,
|
||||
/*view_update*/ NULL,
|
||||
/*view_draw*/ NULL,
|
||||
/*update_script_node*/ NULL,
|
||||
/*render_frame_finish*/ nullptr,
|
||||
/*draw*/ nullptr,
|
||||
/*bake*/ nullptr,
|
||||
/*view_update*/ nullptr,
|
||||
/*view_draw*/ nullptr,
|
||||
/*update_script_node*/ nullptr,
|
||||
/*update_render_passes*/ &workbench_render_update_passes,
|
||||
/*draw_engine*/ &draw_engine_workbench,
|
||||
/*rna_ext*/
|
||||
{
|
||||
/*data*/ NULL,
|
||||
/*srna*/ NULL,
|
||||
/*call*/ NULL,
|
||||
/*data*/ nullptr,
|
||||
/*srna*/ nullptr,
|
||||
/*call*/ nullptr,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -69,7 +69,8 @@ void workbench_opaque_cache_init(WORKBENCH_Data *vedata)
|
||||
pass = psl->opaque_ps;
|
||||
}
|
||||
|
||||
for (eWORKBENCH_DataType data = 0; data < WORKBENCH_DATATYPE_MAX; data++) {
|
||||
for (int data_i = 0; data_i < WORKBENCH_DATATYPE_MAX; data_i++) {
|
||||
eWORKBENCH_DataType data = eWORKBENCH_DataType(data_i);
|
||||
wpd->prepass[opaque][infront][data].material_hash = BLI_ghash_ptr_new(__func__);
|
||||
|
||||
sh = workbench_shader_opaque_get(wpd, data);
|
||||
@@ -129,7 +130,7 @@ void workbench_opaque_cache_init(WORKBENCH_Data *vedata)
|
||||
DRW_shgroup_uniform_texture(grp, "matcap_diffuse_tx", diff_tx);
|
||||
DRW_shgroup_uniform_texture(grp, "matcap_specular_tx", spec_tx);
|
||||
}
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
|
||||
if (SHADOW_ENABLED(wpd)) {
|
||||
grp = DRW_shgroup_create_sub(grp);
|
||||
@@ -137,7 +138,7 @@ void workbench_opaque_cache_init(WORKBENCH_Data *vedata)
|
||||
DRW_shgroup_state_disable(grp, DRW_STATE_STENCIL_EQUAL);
|
||||
DRW_shgroup_state_enable(grp, DRW_STATE_STENCIL_NEQUAL);
|
||||
DRW_shgroup_stencil_mask(grp, 0x00);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
}
|
||||
{
|
||||
@@ -151,6 +152,6 @@ void workbench_opaque_cache_init(WORKBENCH_Data *vedata)
|
||||
grp = DRW_shgroup_create(sh, psl->merge_infront_ps);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "depthBuffer", &dtxl->depth_in_front);
|
||||
DRW_shgroup_stencil_mask(grp, 0x00);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
}
|
||||
@@ -32,8 +32,8 @@
|
||||
|
||||
static void workbench_render_cache(void *vedata,
|
||||
Object *ob,
|
||||
RenderEngine *UNUSED(engine),
|
||||
Depsgraph *UNUSED(depsgraph))
|
||||
RenderEngine * /*engine*/,
|
||||
Depsgraph * /*depsgraph*/)
|
||||
{
|
||||
workbench_cache_populate(vedata, ob);
|
||||
}
|
||||
@@ -51,7 +51,7 @@ static void workbench_render_matrices_init(RenderEngine *engine, Depsgraph *deps
|
||||
|
||||
invert_m4_m4(viewmat, viewinv);
|
||||
|
||||
DRWView *view = DRW_view_create(viewmat, winmat, NULL, NULL, NULL);
|
||||
DRWView *view = DRW_view_create(viewmat, winmat, nullptr, nullptr, nullptr);
|
||||
DRW_view_default_set(view);
|
||||
DRW_view_set_active(view);
|
||||
}
|
||||
@@ -66,12 +66,13 @@ static bool workbench_render_framebuffers_init(void)
|
||||
|
||||
/* When doing a multi view rendering the first view will allocate the buffers
|
||||
* the other views will reuse these buffers */
|
||||
if (dtxl->color == NULL) {
|
||||
if (dtxl->color == nullptr) {
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT;
|
||||
BLI_assert(dtxl->depth == NULL);
|
||||
dtxl->color = GPU_texture_create_2d("txl.color", UNPACK2(size), 1, GPU_RGBA16F, usage, NULL);
|
||||
BLI_assert(dtxl->depth == nullptr);
|
||||
dtxl->color = GPU_texture_create_2d(
|
||||
"txl.color", UNPACK2(size), 1, GPU_RGBA16F, usage, nullptr);
|
||||
dtxl->depth = GPU_texture_create_2d(
|
||||
"txl.depth", UNPACK2(size), 1, GPU_DEPTH24_STENCIL8, usage, NULL);
|
||||
"txl.depth", UNPACK2(size), 1, GPU_DEPTH24_STENCIL8, usage, nullptr);
|
||||
}
|
||||
|
||||
if (!(dtxl->depth && dtxl->color)) {
|
||||
@@ -91,9 +92,9 @@ static bool workbench_render_framebuffers_init(void)
|
||||
{GPU_ATTACHMENT_NONE, GPU_ATTACHMENT_TEXTURE(dtxl->color)});
|
||||
|
||||
bool ok = true;
|
||||
ok = ok && GPU_framebuffer_check_valid(dfbl->default_fb, NULL);
|
||||
ok = ok && GPU_framebuffer_check_valid(dfbl->color_only_fb, NULL);
|
||||
ok = ok && GPU_framebuffer_check_valid(dfbl->depth_only_fb, NULL);
|
||||
ok = ok && GPU_framebuffer_check_valid(dfbl->default_fb, nullptr);
|
||||
ok = ok && GPU_framebuffer_check_valid(dfbl->color_only_fb, nullptr);
|
||||
ok = ok && GPU_framebuffer_check_valid(dfbl->depth_only_fb, nullptr);
|
||||
|
||||
return ok;
|
||||
}
|
||||
@@ -118,12 +119,12 @@ static void workbench_render_result_z(RenderLayer *rl, const char *viewname, con
|
||||
rp_buffer_data);
|
||||
|
||||
float winmat[4][4];
|
||||
DRW_view_winmat_get(NULL, winmat, false);
|
||||
DRW_view_winmat_get(nullptr, winmat, false);
|
||||
|
||||
int pix_num = BLI_rcti_size_x(rect) * BLI_rcti_size_y(rect);
|
||||
|
||||
/* Convert GPU depth [0..1] to view Z [near..far] */
|
||||
if (DRW_view_is_persp_get(NULL)) {
|
||||
if (DRW_view_is_persp_get(nullptr)) {
|
||||
for (int i = 0; i < pix_num; i++) {
|
||||
if (rp_buffer_data[i] == 1.0f) {
|
||||
rp_buffer_data[i] = 1e10f; /* Background */
|
||||
@@ -136,8 +137,8 @@ static void workbench_render_result_z(RenderLayer *rl, const char *viewname, con
|
||||
}
|
||||
else {
|
||||
/* Keep in mind, near and far distance are negatives. */
|
||||
float near = DRW_view_near_distance_get(NULL);
|
||||
float far = DRW_view_far_distance_get(NULL);
|
||||
float near = DRW_view_near_distance_get(nullptr);
|
||||
float far = DRW_view_far_distance_get(nullptr);
|
||||
float range = fabsf(far - near);
|
||||
|
||||
for (int i = 0; i < pix_num; i++) {
|
||||
@@ -154,7 +155,7 @@ static void workbench_render_result_z(RenderLayer *rl, const char *viewname, con
|
||||
|
||||
void workbench_render(void *ved, RenderEngine *engine, RenderLayer *render_layer, const rcti *rect)
|
||||
{
|
||||
WORKBENCH_Data *data = ved;
|
||||
WORKBENCH_Data *data = static_cast<WORKBENCH_Data *>(ved);
|
||||
DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
|
||||
const DRWContextState *draw_ctx = DRW_context_state_get();
|
||||
Depsgraph *depsgraph = draw_ctx->depsgraph;
|
||||
@@ -63,12 +63,12 @@ static void workbench_shadow_update(WORKBENCH_PrivateData *wpd)
|
||||
}
|
||||
|
||||
float planes[6][4];
|
||||
DRW_culling_frustum_planes_get(NULL, planes);
|
||||
DRW_culling_frustum_planes_get(nullptr, planes);
|
||||
/* we only need the far plane. */
|
||||
copy_v4_v4(wpd->shadow_far_plane, planes[2]);
|
||||
|
||||
BoundBox frustum_corners;
|
||||
DRW_culling_frustum_corners_get(NULL, &frustum_corners);
|
||||
DRW_culling_frustum_corners_get(nullptr, &frustum_corners);
|
||||
|
||||
float shadow_near_corners[4][3];
|
||||
mul_v3_mat3_m4v3(shadow_near_corners[0], wpd->shadow_inv, frustum_corners.vec[0]);
|
||||
@@ -97,7 +97,7 @@ void workbench_shadow_data_update(WORKBENCH_PrivateData *wpd, WORKBENCH_UBO_Worl
|
||||
const Scene *scene = draw_ctx->scene;
|
||||
|
||||
float view_matrix[4][4];
|
||||
DRW_view_viewmat_get(NULL, view_matrix, false);
|
||||
DRW_view_viewmat_get(nullptr, view_matrix, false);
|
||||
|
||||
/* Turn the light in a way where it's more user friendly to control. */
|
||||
copy_v3_v3(wpd->shadow_direction_ws, scene->display.light_direction);
|
||||
@@ -162,8 +162,8 @@ void workbench_shadow_cache_init(WORKBENCH_Data *data)
|
||||
}
|
||||
}
|
||||
else {
|
||||
psl->shadow_ps[0] = NULL;
|
||||
psl->shadow_ps[1] = NULL;
|
||||
psl->shadow_ps[0] = nullptr;
|
||||
psl->shadow_ps[1] = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -298,7 +298,7 @@ void workbench_shadow_cache_populate(WORKBENCH_Data *data, Object *ob, const boo
|
||||
|
||||
bool is_manifold;
|
||||
struct GPUBatch *geom_shadow = DRW_cache_object_edge_detection_get(ob, &is_manifold);
|
||||
if (geom_shadow == NULL) {
|
||||
if (geom_shadow == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -307,7 +307,7 @@ void workbench_shadow_cache_populate(WORKBENCH_Data *data, Object *ob, const boo
|
||||
&draw_engine_workbench,
|
||||
sizeof(WORKBENCH_ObjectData),
|
||||
&workbench_init_object_data,
|
||||
NULL);
|
||||
nullptr);
|
||||
|
||||
if (workbench_shadow_object_cast_visible_shadow(wpd, ob, engine_object_data)) {
|
||||
mul_v3_mat3_m4v3(
|
||||
@@ -333,7 +333,7 @@ void workbench_shadow_cache_populate(WORKBENCH_Data *data, Object *ob, const boo
|
||||
DRW_shgroup_uniform_float_copy(grp, "lightDistance", 1e5f);
|
||||
DRW_shgroup_call_no_cull(grp, geom_shadow, ob);
|
||||
#if DEBUG_SHADOW_VOLUME
|
||||
DRW_debug_bbox(&engine_object_data->shadow_bbox, (float[4]){1.0f, 0.0f, 0.0f, 1.0f});
|
||||
DRW_debug_bbox(&engine_object_data->shadow_bbox, blender::float4{1.0f, 0.0f, 0.0f, 1.0f});
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
@@ -354,7 +354,7 @@ void workbench_shadow_cache_populate(WORKBENCH_Data *data, Object *ob, const boo
|
||||
DRW_shgroup_uniform_float_copy(grp, "lightDistance", extrude_distance);
|
||||
DRW_shgroup_call_no_cull(grp, geom_shadow, ob);
|
||||
#if DEBUG_SHADOW_VOLUME
|
||||
DRW_debug_bbox(&engine_object_data->shadow_bbox, (float[4]){0.0f, 1.0f, 0.0f, 1.0f});
|
||||
DRW_debug_bbox(&engine_object_data->shadow_bbox, blender::float4{0.0f, 1.0f, 0.0f, 1.0f});
|
||||
#endif
|
||||
}
|
||||
}
|
||||
@@ -92,7 +92,8 @@ void workbench_transparent_cache_init(WORKBENCH_Data *vedata)
|
||||
DRW_PASS_INSTANCE_CREATE(psl->transp_depth_ps, pass, state | DRW_STATE_WRITE_DEPTH);
|
||||
}
|
||||
|
||||
for (eWORKBENCH_DataType data = 0; data < WORKBENCH_DATATYPE_MAX; data++) {
|
||||
for (int data_i = 0; data_i < WORKBENCH_DATATYPE_MAX; data_i++) {
|
||||
eWORKBENCH_DataType data = eWORKBENCH_DataType(data_i);
|
||||
wpd->prepass[transp][infront][data].material_hash = BLI_ghash_ptr_new(__func__);
|
||||
|
||||
sh = workbench_shader_transparent_get(wpd, data);
|
||||
@@ -132,7 +133,7 @@ void workbench_transparent_cache_init(WORKBENCH_Data *vedata)
|
||||
grp = DRW_shgroup_create(sh, psl->transp_resolve_ps);
|
||||
DRW_shgroup_uniform_texture(grp, "transparentAccum", wpd->accum_buffer_tx);
|
||||
DRW_shgroup_uniform_texture(grp, "transparentRevealage", wpd->reveal_buffer_tx);
|
||||
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ void workbench_volume_engine_init(WORKBENCH_Data *vedata)
|
||||
{
|
||||
WORKBENCH_TextureList *txl = vedata->txl;
|
||||
|
||||
if (txl->dummy_volume_tx == NULL) {
|
||||
if (txl->dummy_volume_tx == nullptr) {
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ;
|
||||
|
||||
const float zero[4] = {0.0f, 0.0f, 0.0f, 0.0f};
|
||||
@@ -58,7 +58,7 @@ static void workbench_volume_modifier_cache_populate(WORKBENCH_Data *vedata,
|
||||
WORKBENCH_PrivateData *wpd = vedata->stl->wpd;
|
||||
WORKBENCH_TextureList *txl = vedata->txl;
|
||||
DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
|
||||
DRWShadingGroup *grp = NULL;
|
||||
DRWShadingGroup *grp = nullptr;
|
||||
|
||||
if (!fds->fluid) {
|
||||
return;
|
||||
@@ -75,8 +75,8 @@ static void workbench_volume_modifier_cache_populate(WORKBENCH_Data *vedata,
|
||||
return;
|
||||
}
|
||||
|
||||
if ((!fds->use_coba && (fds->tex_density == NULL && fds->tex_color == NULL)) ||
|
||||
(fds->use_coba && fds->tex_field == NULL))
|
||||
if ((!fds->use_coba && (fds->tex_density == nullptr && fds->tex_color == nullptr)) ||
|
||||
(fds->use_coba && fds->tex_field == nullptr))
|
||||
{
|
||||
return;
|
||||
}
|
||||
@@ -107,7 +107,7 @@ static void workbench_volume_modifier_cache_populate(WORKBENCH_Data *vedata,
|
||||
|
||||
if (use_slice) {
|
||||
float invviewmat[4][4];
|
||||
DRW_view_viewmat_get(NULL, invviewmat, true);
|
||||
DRW_view_viewmat_get(nullptr, invviewmat, true);
|
||||
|
||||
const int axis = (fds->slice_axis == SLICE_AXIS_AUTO) ?
|
||||
axis_dominant_v3_single(invviewmat[2]) :
|
||||
@@ -128,7 +128,7 @@ static void workbench_volume_modifier_cache_populate(WORKBENCH_Data *vedata,
|
||||
double noise_ofs;
|
||||
BLI_halton_1d(3, 0.0, wpd->taa_sample, &noise_ofs);
|
||||
float dim[3], step_length, max_slice;
|
||||
float slice_count[3] = {fds->res[0], fds->res[1], fds->res[2]};
|
||||
float slice_count[3] = {float(fds->res[0]), float(fds->res[1]), float(fds->res[2])};
|
||||
mul_v3_fl(slice_count, max_ff(0.001f, fds->slice_per_voxel));
|
||||
max_slice = max_fff(slice_count[0], slice_count[1], slice_count[2]);
|
||||
BKE_object_dimensions_get(ob, dim);
|
||||
@@ -200,21 +200,21 @@ static void workbench_volume_object_cache_populate(WORKBENCH_Data *vedata,
|
||||
eV3DShadingColorType color_type)
|
||||
{
|
||||
/* Create 3D textures. */
|
||||
Volume *volume = ob->data;
|
||||
Volume *volume = static_cast<Volume *>(ob->data);
|
||||
BKE_volume_load(volume, G.main);
|
||||
const VolumeGrid *volume_grid = BKE_volume_grid_active_get_for_read(volume);
|
||||
if (volume_grid == NULL) {
|
||||
if (volume_grid == nullptr) {
|
||||
return;
|
||||
}
|
||||
DRWVolumeGrid *grid = DRW_volume_batch_cache_get_grid(volume, volume_grid);
|
||||
if (grid == NULL) {
|
||||
if (grid == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
WORKBENCH_PrivateData *wpd = vedata->stl->wpd;
|
||||
WORKBENCH_TextureList *txl = vedata->txl;
|
||||
DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
|
||||
DRWShadingGroup *grp = NULL;
|
||||
DRWShadingGroup *grp = nullptr;
|
||||
|
||||
wpd->volumes_do = true;
|
||||
const bool use_slice = (volume->display.axis_slice_method == AXIS_SLICE_SINGLE);
|
||||
@@ -245,7 +245,7 @@ static void workbench_volume_object_cache_populate(WORKBENCH_Data *vedata,
|
||||
|
||||
if (use_slice) {
|
||||
float invviewmat[4][4];
|
||||
DRW_view_viewmat_get(NULL, invviewmat, true);
|
||||
DRW_view_viewmat_get(nullptr, invviewmat, true);
|
||||
|
||||
const int axis = (volume->display.slice_axis == SLICE_AXIS_AUTO) ?
|
||||
axis_dominant_v3_single(invviewmat[2]) :
|
||||
@@ -277,7 +277,7 @@ static void workbench_volume_object_cache_populate(WORKBENCH_Data *vedata,
|
||||
float step_length, max_slice;
|
||||
int resolution[3];
|
||||
GPU_texture_get_mipmap_size(grid->texture, 0, resolution);
|
||||
float slice_count[3] = {resolution[0], resolution[1], resolution[2]};
|
||||
float slice_count[3] = {float(resolution[0]), float(resolution[1]), float(resolution[2])};
|
||||
mul_v3_fl(slice_count, max_ff(0.001f, 5.0f));
|
||||
max_slice = max_fff(slice_count[0], slice_count[1], slice_count[2]);
|
||||
invert_v3(slice_count);
|
||||
@@ -312,12 +312,12 @@ static void workbench_volume_object_cache_populate(WORKBENCH_Data *vedata,
|
||||
}
|
||||
|
||||
void workbench_volume_cache_populate(WORKBENCH_Data *vedata,
|
||||
Scene *UNUSED(scene),
|
||||
Scene * /*scene*/,
|
||||
Object *ob,
|
||||
ModifierData *md,
|
||||
eV3DShadingColorType color_type)
|
||||
{
|
||||
if (md == NULL) {
|
||||
if (md == nullptr) {
|
||||
workbench_volume_object_cache_populate(vedata, ob, color_type);
|
||||
}
|
||||
else {
|
||||
@@ -77,16 +77,27 @@
|
||||
/** \name Internal Types
|
||||
* \{ */
|
||||
|
||||
typedef struct Vert {
|
||||
struct Vert {
|
||||
float pos[3];
|
||||
int class;
|
||||
} Vert;
|
||||
int v_class;
|
||||
|
||||
typedef struct VertShaded {
|
||||
/** Allows creating a pointer to `Vert` in a single expression. */
|
||||
operator const void *() const
|
||||
{
|
||||
return this;
|
||||
}
|
||||
};
|
||||
|
||||
struct VertShaded {
|
||||
float pos[3];
|
||||
int class;
|
||||
int v_class;
|
||||
float nor[3];
|
||||
} VertShaded;
|
||||
|
||||
operator const void *() const
|
||||
{
|
||||
return this;
|
||||
}
|
||||
};
|
||||
|
||||
/* Batch's only (free'd as an array) */
|
||||
static struct DRWShapeCache {
|
||||
@@ -155,7 +166,7 @@ static struct DRWShapeCache {
|
||||
GPUBatch *drw_particle_axis;
|
||||
GPUBatch *drw_gpencil_dummy_quad;
|
||||
GPUBatch *drw_sphere_lod[DRW_LOD_MAX];
|
||||
} SHC = {NULL};
|
||||
} SHC = {nullptr};
|
||||
|
||||
void DRW_shape_cache_free(void)
|
||||
{
|
||||
@@ -182,7 +193,8 @@ GPUBatch *drw_cache_procedural_points_get(void)
|
||||
GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
|
||||
GPU_vertbuf_data_alloc(vbo, 1);
|
||||
|
||||
SHC.drw_procedural_verts = GPU_batch_create_ex(GPU_PRIM_POINTS, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_procedural_verts = GPU_batch_create_ex(
|
||||
GPU_PRIM_POINTS, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_procedural_verts;
|
||||
}
|
||||
@@ -196,7 +208,8 @@ GPUBatch *drw_cache_procedural_lines_get(void)
|
||||
GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
|
||||
GPU_vertbuf_data_alloc(vbo, 1);
|
||||
|
||||
SHC.drw_procedural_lines = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_procedural_lines = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_procedural_lines;
|
||||
}
|
||||
@@ -210,7 +223,7 @@ GPUBatch *drw_cache_procedural_triangles_get(void)
|
||||
GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
|
||||
GPU_vertbuf_data_alloc(vbo, 1);
|
||||
|
||||
SHC.drw_procedural_tris = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_procedural_tris = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_procedural_tris;
|
||||
}
|
||||
@@ -225,7 +238,7 @@ GPUBatch *drw_cache_procedural_triangle_strips_get(void)
|
||||
GPU_vertbuf_data_alloc(vbo, 1);
|
||||
|
||||
SHC.drw_procedural_tri_strips = GPU_batch_create_ex(
|
||||
GPU_PRIM_TRI_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
GPU_PRIM_TRI_STRIP, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_procedural_tri_strips;
|
||||
}
|
||||
@@ -358,13 +371,13 @@ static GPUVertBuf *sphere_wire_vbo(const float rad, int flag)
|
||||
cv[1] = p[(i + j) % NSEGMENTS][1];
|
||||
|
||||
if (axis == 0) {
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{cv[0], cv[1], 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{cv[0], cv[1], 0.0f}, flag});
|
||||
}
|
||||
else if (axis == 1) {
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{cv[0], 0.0f, cv[1]}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{cv[0], 0.0f, cv[1]}, flag});
|
||||
}
|
||||
else {
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, cv[0], cv[1]}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0f, cv[0], cv[1]}, flag});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -403,7 +416,7 @@ GPUBatch *DRW_cache_fullscreen_quad_get(void)
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.uvs, i, uvs[i]);
|
||||
}
|
||||
|
||||
SHC.drw_fullscreen_quad = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_fullscreen_quad = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_fullscreen_quad;
|
||||
}
|
||||
@@ -420,10 +433,10 @@ GPUBatch *DRW_cache_quad_get(void)
|
||||
int flag = VCLASS_EMPTY_SCALED;
|
||||
const float p[4][2] = {{-1.0f, 1.0f}, {1.0f, 1.0f}, {-1.0f, -1.0f}, {1.0f, -1.0f}};
|
||||
for (int a = 0; a < 4; a++) {
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[a][0], p[a][1], 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{p[a][0], p[a][1], 0.0f}, flag});
|
||||
}
|
||||
|
||||
SHC.drw_quad = GPU_batch_create_ex(GPU_PRIM_TRI_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_quad = GPU_batch_create_ex(GPU_PRIM_TRI_STRIP, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_quad;
|
||||
}
|
||||
@@ -440,10 +453,11 @@ GPUBatch *DRW_cache_quad_wires_get(void)
|
||||
int flag = VCLASS_EMPTY_SCALED;
|
||||
const float p[4][2] = {{-1.0f, -1.0f}, {-1.0f, 1.0f}, {1.0f, 1.0f}, {1.0f, -1.0f}};
|
||||
for (int a = 0; a < 5; a++) {
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[a % 4][0], p[a % 4][1], 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{p[a % 4][0], p[a % 4][1], 0.0f}, flag});
|
||||
}
|
||||
|
||||
SHC.drw_quad_wires = GPU_batch_create_ex(GPU_PRIM_LINE_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_quad_wires = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINE_STRIP, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_quad_wires;
|
||||
}
|
||||
@@ -471,10 +485,10 @@ GPUBatch *DRW_cache_grid_get(void)
|
||||
float pos2[2] = {(float)i / 8.0f, (float)(j + 1) / 8.0f};
|
||||
float pos3[2] = {(float)(i + 1) / 8.0f, (float)(j + 1) / 8.0f};
|
||||
|
||||
madd_v2_v2v2fl(pos0, (float[2]){-1.0f, -1.0f}, pos0, 2.0f);
|
||||
madd_v2_v2v2fl(pos1, (float[2]){-1.0f, -1.0f}, pos1, 2.0f);
|
||||
madd_v2_v2v2fl(pos2, (float[2]){-1.0f, -1.0f}, pos2, 2.0f);
|
||||
madd_v2_v2v2fl(pos3, (float[2]){-1.0f, -1.0f}, pos3, 2.0f);
|
||||
madd_v2_v2v2fl(pos0, blender::float2{-1.0f, -1.0f}, pos0, 2.0f);
|
||||
madd_v2_v2v2fl(pos1, blender::float2{-1.0f, -1.0f}, pos1, 2.0f);
|
||||
madd_v2_v2v2fl(pos2, blender::float2{-1.0f, -1.0f}, pos2, 2.0f);
|
||||
madd_v2_v2v2fl(pos3, blender::float2{-1.0f, -1.0f}, pos3, 2.0f);
|
||||
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos0);
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos1);
|
||||
@@ -486,7 +500,7 @@ GPUBatch *DRW_cache_grid_get(void)
|
||||
}
|
||||
}
|
||||
|
||||
SHC.drw_grid = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_grid = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_grid;
|
||||
}
|
||||
@@ -497,7 +511,7 @@ static void sphere_lat_lon_vert(GPUVertBuf *vbo, int *v_ofs, float lat, float lo
|
||||
float x = sinf(lat) * cosf(lon);
|
||||
float y = cosf(lat);
|
||||
float z = sinf(lat) * sinf(lon);
|
||||
GPU_vertbuf_vert_set(vbo, *v_ofs, &(VertShaded){{x, y, z}, VCLASS_EMPTY_SCALED, {x, y, z}});
|
||||
GPU_vertbuf_vert_set(vbo, *v_ofs, VertShaded{{x, y, z}, VCLASS_EMPTY_SCALED, {x, y, z}});
|
||||
(*v_ofs)++;
|
||||
}
|
||||
|
||||
@@ -523,7 +537,7 @@ GPUBatch *DRW_cache_sphere_get(const eDRWLevelOfDetail level_of_detail)
|
||||
lon_res = DRW_SPHERE_SHAPE_LONGITUDE_HIGH;
|
||||
break;
|
||||
default:
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
GPUVertFormat format = extra_vert_format();
|
||||
@@ -556,7 +570,7 @@ GPUBatch *DRW_cache_sphere_get(const eDRWLevelOfDetail level_of_detail)
|
||||
}
|
||||
|
||||
SHC.drw_sphere_lod[level_of_detail] = GPU_batch_create_ex(
|
||||
GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
GPU_PRIM_TRIS, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_sphere_lod[level_of_detail];
|
||||
}
|
||||
@@ -577,7 +591,7 @@ static void circle_verts(
|
||||
float c = cosf(angle) * radius;
|
||||
int v = *vert_idx;
|
||||
*vert_idx = v + 1;
|
||||
GPU_vertbuf_vert_set(vbo, v, &(Vert){{s, c, z}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v, Vert{{s, c, z}, flag});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -592,7 +606,7 @@ static void circle_dashed_verts(
|
||||
float c = cosf(angle) * radius;
|
||||
int v = *vert_idx;
|
||||
*vert_idx = v + 1;
|
||||
GPU_vertbuf_vert_set(vbo, v, &(Vert){{s, c, z}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v, Vert{{s, c, z}, flag});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -733,7 +747,7 @@ GPUBatch *DRW_cache_cube_get(void)
|
||||
float x = bone_box_verts[i][0];
|
||||
float y = bone_box_verts[i][1] * 2.0f - 1.0f;
|
||||
float z = bone_box_verts[i][2];
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{x, y, z}, VCLASS_EMPTY_SCALED});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{x, y, z}, VCLASS_EMPTY_SCALED});
|
||||
}
|
||||
|
||||
for (int i = 0; i < tri_len; i++) {
|
||||
@@ -761,10 +775,10 @@ GPUBatch *DRW_cache_circle_get(void)
|
||||
float x = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
|
||||
float z = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
|
||||
float y = 0.0f;
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{x, y, z}, VCLASS_EMPTY_SCALED});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{x, y, z}, VCLASS_EMPTY_SCALED});
|
||||
}
|
||||
|
||||
SHC.drw_circle = GPU_batch_create_ex(GPU_PRIM_LINE_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_circle = GPU_batch_create_ex(GPU_PRIM_LINE_STRIP, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_circle;
|
||||
#undef CIRCLE_RESOL
|
||||
@@ -781,7 +795,7 @@ GPUBatch *DRW_cache_normal_arrow_get(void)
|
||||
|
||||
/* TODO: real arrow. For now, it's a line positioned in the vertex shader. */
|
||||
|
||||
SHC.drw_normal_arrow = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_normal_arrow = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_normal_arrow;
|
||||
}
|
||||
@@ -830,7 +844,7 @@ void DRW_vertbuf_create_wiredata(GPUVertBuf *vbo, const int vert_len)
|
||||
|
||||
GPUBatch *DRW_gpencil_dummy_buffer_get(void)
|
||||
{
|
||||
if (SHC.drw_gpencil_dummy_quad == NULL) {
|
||||
if (SHC.drw_gpencil_dummy_quad == nullptr) {
|
||||
GPUVertFormat format = {0};
|
||||
/* NOTE: Use GPU_COMP_U32 to satisfy minimum 4-byte vertex stride for Metal backend. */
|
||||
GPU_vertformat_attr_add(&format, "dummy", GPU_COMP_U32, 1, GPU_FETCH_INT);
|
||||
@@ -838,7 +852,7 @@ GPUBatch *DRW_gpencil_dummy_buffer_get(void)
|
||||
GPU_vertbuf_data_alloc(vbo, 4);
|
||||
|
||||
SHC.drw_gpencil_dummy_quad = GPU_batch_create_ex(
|
||||
GPU_PRIM_TRI_FAN, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
GPU_PRIM_TRI_FAN, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_gpencil_dummy_quad;
|
||||
}
|
||||
@@ -860,7 +874,7 @@ GPUBatch *DRW_cache_object_all_edges_get(Object *ob)
|
||||
return DRW_cache_mesh_all_edges_get(ob);
|
||||
/* TODO: should match #DRW_cache_object_surface_get. */
|
||||
default:
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -870,7 +884,7 @@ GPUBatch *DRW_cache_object_edge_detection_get(Object *ob, bool *r_is_manifold)
|
||||
case OB_MESH:
|
||||
return DRW_cache_mesh_edge_detection_get(ob, r_is_manifold);
|
||||
default:
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -886,7 +900,7 @@ GPUBatch *DRW_cache_object_face_wireframe_get(Object *ob)
|
||||
case OB_GPENCIL_LEGACY:
|
||||
return DRW_cache_gpencil_face_wireframe_get(ob);
|
||||
default:
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -896,7 +910,7 @@ GPUBatch *DRW_cache_object_loose_edges_get(Object *ob)
|
||||
case OB_MESH:
|
||||
return DRW_cache_mesh_loose_edges_get(ob);
|
||||
default:
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -906,20 +920,21 @@ GPUBatch *DRW_cache_object_surface_get(Object *ob)
|
||||
case OB_MESH:
|
||||
return DRW_cache_mesh_surface_get(ob);
|
||||
default:
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
GPUVertBuf *DRW_cache_object_pos_vertbuf_get(Object *ob)
|
||||
{
|
||||
Mesh *me = BKE_object_get_evaluated_mesh_no_subsurf(ob);
|
||||
short type = (me != NULL) ? OB_MESH : ob->type;
|
||||
short type = (me != nullptr) ? OB_MESH : ob->type;
|
||||
|
||||
switch (type) {
|
||||
case OB_MESH:
|
||||
return DRW_mesh_batch_cache_pos_vertbuf_get((me != NULL) ? me : ob->data);
|
||||
return DRW_mesh_batch_cache_pos_vertbuf_get(
|
||||
static_cast<Mesh *>((me != nullptr) ? me : ob->data));
|
||||
default:
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -928,7 +943,7 @@ int DRW_cache_object_material_count_get(Object *ob)
|
||||
short type = ob->type;
|
||||
|
||||
Mesh *me = BKE_object_get_evaluated_mesh_no_subsurf(ob);
|
||||
if (me != NULL && type != OB_POINTCLOUD) {
|
||||
if (me != nullptr && type != OB_POINTCLOUD) {
|
||||
/* Some object types can have one data type in ob->data, but will be rendered as mesh.
|
||||
* For point clouds this never happens. Ideally this check would happen at another level
|
||||
* and we would just have to care about ob->data here. */
|
||||
@@ -937,19 +952,20 @@ int DRW_cache_object_material_count_get(Object *ob)
|
||||
|
||||
switch (type) {
|
||||
case OB_MESH:
|
||||
return DRW_mesh_material_count_get(ob, (me != NULL) ? me : ob->data);
|
||||
return DRW_mesh_material_count_get(
|
||||
ob, static_cast<const Mesh *>((me != nullptr) ? me : ob->data));
|
||||
case OB_CURVES_LEGACY:
|
||||
case OB_SURF:
|
||||
case OB_FONT:
|
||||
return DRW_curve_material_count_get(ob->data);
|
||||
return DRW_curve_material_count_get(static_cast<Curve *>(ob->data));
|
||||
case OB_CURVES:
|
||||
return DRW_curves_material_count_get(ob->data);
|
||||
return DRW_curves_material_count_get(static_cast<Curves *>(ob->data));
|
||||
case OB_POINTCLOUD:
|
||||
return DRW_pointcloud_material_count_get(ob->data);
|
||||
return DRW_pointcloud_material_count_get(static_cast<PointCloud *>(ob->data));
|
||||
case OB_VOLUME:
|
||||
return DRW_volume_material_count_get(ob->data);
|
||||
return DRW_volume_material_count_get(static_cast<Volume *>(ob->data));
|
||||
case OB_GPENCIL_LEGACY:
|
||||
return DRW_gpencil_material_count_get(ob->data);
|
||||
return DRW_gpencil_material_count_get(static_cast<bGPdata *>(ob->data));
|
||||
default:
|
||||
BLI_assert(0);
|
||||
return 0;
|
||||
@@ -964,7 +980,7 @@ GPUBatch **DRW_cache_object_surface_material_get(Object *ob,
|
||||
case OB_MESH:
|
||||
return DRW_cache_mesh_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
|
||||
default:
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -984,14 +1000,14 @@ GPUBatch *DRW_cache_plain_axes_get(void)
|
||||
|
||||
int v = 0;
|
||||
int flag = VCLASS_EMPTY_SCALED;
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, -1.0f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 1.0f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{-1.0f, 0.0f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{1.0f, 0.0f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, -1.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, 1.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0f, -1.0f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0f, 1.0f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{-1.0f, 0.0f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{1.0f, 0.0f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0f, 0.0f, -1.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0f, 0.0f, 1.0f}, flag});
|
||||
|
||||
SHC.drw_plain_axes = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_plain_axes = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_plain_axes;
|
||||
}
|
||||
@@ -1008,10 +1024,10 @@ GPUBatch *DRW_cache_empty_cube_get(void)
|
||||
float x = bone_box_verts[bone_box_wire[i]][0];
|
||||
float y = bone_box_verts[bone_box_wire[i]][1] * 2.0 - 1.0f;
|
||||
float z = bone_box_verts[bone_box_wire[i]][2];
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{x, y, z}, VCLASS_EMPTY_SCALED});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{x, y, z}, VCLASS_EMPTY_SCALED});
|
||||
}
|
||||
|
||||
SHC.drw_empty_cube = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_empty_cube = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_empty_cube;
|
||||
}
|
||||
@@ -1042,14 +1058,14 @@ GPUBatch *DRW_cache_single_arrow_get(void)
|
||||
p[2][0] = -p[2][0];
|
||||
}
|
||||
for (int i = 0, a = 1; i < 2; i++, a++) {
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[i][0], p[i][1], p[i][2]}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[a][0], p[a][1], p[a][2]}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{p[i][0], p[i][1], p[i][2]}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{p[a][0], p[a][1], p[a][2]}, flag});
|
||||
}
|
||||
}
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, 0.0}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, 0.75f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0f, 0.0f, 0.0}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0f, 0.0f, 0.75f}, flag});
|
||||
|
||||
SHC.drw_single_arrow = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_single_arrow = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_single_arrow;
|
||||
}
|
||||
@@ -1058,7 +1074,7 @@ GPUBatch *DRW_cache_empty_sphere_get(void)
|
||||
{
|
||||
if (!SHC.drw_empty_sphere) {
|
||||
GPUVertBuf *vbo = sphere_wire_vbo(1.0f, VCLASS_EMPTY_SCALED);
|
||||
SHC.drw_empty_sphere = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_empty_sphere = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_empty_sphere;
|
||||
}
|
||||
@@ -1086,17 +1102,17 @@ GPUBatch *DRW_cache_empty_cone_get(void)
|
||||
cv[1] = p[(i) % NSEGMENTS][1];
|
||||
|
||||
/* cone sides */
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{cv[0], 0.0f, cv[1]}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 2.0f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{cv[0], 0.0f, cv[1]}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0f, 2.0f, 0.0f}, flag});
|
||||
|
||||
/* end ring */
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{cv[0], 0.0f, cv[1]}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{cv[0], 0.0f, cv[1]}, flag});
|
||||
cv[0] = p[(i + 1) % NSEGMENTS][0];
|
||||
cv[1] = p[(i + 1) % NSEGMENTS][1];
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{cv[0], 0.0f, cv[1]}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{cv[0], 0.0f, cv[1]}, flag});
|
||||
}
|
||||
|
||||
SHC.drw_empty_cone = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_empty_cone = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_empty_cone;
|
||||
#undef NSEGMENTS
|
||||
@@ -1127,17 +1143,17 @@ GPUBatch *DRW_cache_empty_cylinder_get(void)
|
||||
pv[1] = p[(i + 1) % NSEGMENTS][1];
|
||||
|
||||
/* cylinder sides */
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{cv[0], cv[1], -1.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{cv[0], cv[1], 1.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{cv[0], cv[1], -1.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{cv[0], cv[1], 1.0f}, flag});
|
||||
/* top ring */
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{cv[0], cv[1], 1.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{pv[0], pv[1], 1.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{cv[0], cv[1], 1.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{pv[0], pv[1], 1.0f}, flag});
|
||||
/* bottom ring */
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{cv[0], cv[1], -1.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{pv[0], pv[1], -1.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{cv[0], cv[1], -1.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{pv[0], pv[1], -1.0f}, flag});
|
||||
}
|
||||
|
||||
SHC.drw_empty_cylinder = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_empty_cylinder = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_empty_cylinder;
|
||||
#undef NSEGMENTS
|
||||
@@ -1171,7 +1187,7 @@ GPUBatch *DRW_cache_empty_capsule_body_get(void)
|
||||
GPU_vertbuf_attr_fill(vbo, attr_id.pos, pos);
|
||||
|
||||
SHC.drw_empty_capsule_body = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_empty_capsule_body;
|
||||
}
|
||||
@@ -1226,7 +1242,8 @@ GPUBatch *DRW_cache_empty_capsule_cap_get(void)
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
|
||||
}
|
||||
|
||||
SHC.drw_empty_capsule_cap = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_empty_capsule_cap = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_empty_capsule_cap;
|
||||
#undef NSEGMENTS
|
||||
@@ -1249,7 +1266,7 @@ GPUBatch *DRW_cache_field_wind_get(void)
|
||||
circle_verts(vbo, &v, CIRCLE_RESOL, 1.0f, z, flag);
|
||||
}
|
||||
|
||||
SHC.drw_field_wind = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_field_wind = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_field_wind;
|
||||
#undef CIRCLE_RESOL
|
||||
@@ -1272,7 +1289,7 @@ GPUBatch *DRW_cache_field_force_get(void)
|
||||
circle_verts(vbo, &v, CIRCLE_RESOL, radius, 0.0f, flag);
|
||||
}
|
||||
|
||||
SHC.drw_field_force = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_field_force = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_field_force;
|
||||
#undef CIRCLE_RESOL
|
||||
@@ -1293,15 +1310,16 @@ GPUBatch *DRW_cache_field_vortex_get(void)
|
||||
for (int a = SPIRAL_RESOL; a > -1; a--) {
|
||||
float r = a / (float)SPIRAL_RESOL;
|
||||
float angle = (2.0f * M_PI * a) / SPIRAL_RESOL;
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{sinf(angle) * r, cosf(angle) * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{sinf(angle) * r, cosf(angle) * r, 0.0f}, flag});
|
||||
}
|
||||
for (int a = 1; a <= SPIRAL_RESOL; a++) {
|
||||
float r = a / (float)SPIRAL_RESOL;
|
||||
float angle = (2.0f * M_PI * a) / SPIRAL_RESOL;
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{sinf(angle) * -r, cosf(angle) * -r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{sinf(angle) * -r, cosf(angle) * -r, 0.0f}, flag});
|
||||
}
|
||||
|
||||
SHC.drw_field_vortex = GPU_batch_create_ex(GPU_PRIM_LINE_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_field_vortex = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINE_STRIP, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_field_vortex;
|
||||
#undef SPIRAL_RESOL
|
||||
@@ -1321,7 +1339,7 @@ GPUBatch *DRW_cache_field_curve_get(void)
|
||||
int flag = VCLASS_EMPTY_SIZE | VCLASS_SCREENALIGNED;
|
||||
circle_verts(vbo, &v, CIRCLE_RESOL, 1.0f, 0.0f, flag);
|
||||
|
||||
SHC.drw_field_curve = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_field_curve = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_field_curve;
|
||||
#undef CIRCLE_RESOL
|
||||
@@ -1350,11 +1368,12 @@ GPUBatch *DRW_cache_field_tube_limit_get(void)
|
||||
float angle = (2.0f * M_PI * a) / 4.0f;
|
||||
for (int i = 0; i < SIDE_STIPPLE; i++) {
|
||||
float z = (i / (float)SIDE_STIPPLE) * 2.0f - 1.0f;
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{sinf(angle), cosf(angle), z}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{sinf(angle), cosf(angle), z}, flag});
|
||||
}
|
||||
}
|
||||
|
||||
SHC.drw_field_tube_limit = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_field_tube_limit = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_field_tube_limit;
|
||||
#undef SIDE_STIPPLE
|
||||
@@ -1384,11 +1403,12 @@ GPUBatch *DRW_cache_field_cone_limit_get(void)
|
||||
float angle = (2.0f * M_PI * a) / 4.0f;
|
||||
for (int i = 0; i < SIDE_STIPPLE; i++) {
|
||||
float z = (i / (float)SIDE_STIPPLE) * 2.0f - 1.0f;
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{sinf(angle) * z, cosf(angle) * z, z}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{sinf(angle) * z, cosf(angle) * z, z}, flag});
|
||||
}
|
||||
}
|
||||
|
||||
SHC.drw_field_cone_limit = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_field_cone_limit = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_field_cone_limit;
|
||||
#undef SIDE_STIPPLE
|
||||
@@ -1410,7 +1430,7 @@ GPUBatch *DRW_cache_field_sphere_limit_get(void)
|
||||
circle_dashed_verts(vbo, &v, CIRCLE_RESOL, 1.0f, 0.0f, flag);
|
||||
|
||||
SHC.drw_field_sphere_limit = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_field_sphere_limit;
|
||||
#undef CIRCLE_RESOL
|
||||
@@ -1459,10 +1479,10 @@ GPUBatch *DRW_cache_groundline_get(void)
|
||||
/* Ground Point */
|
||||
circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.35f, 0.0f, 0);
|
||||
/* Ground Line */
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 0.0, 1.0}, 0});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 0.0, 0.0}, 0});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0, 0.0, 1.0}, 0});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0, 0.0, 0.0}, 0});
|
||||
|
||||
SHC.drw_ground_line = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_ground_line = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_ground_line;
|
||||
}
|
||||
@@ -1483,7 +1503,7 @@ GPUBatch *DRW_cache_light_icon_inner_lines_get(void)
|
||||
circle_dashed_verts(vbo, &v, INNER_NSEGMENTS, r * 1.0f, 0.0f, VCLASS_SCREENSPACE);
|
||||
|
||||
SHC.drw_light_icon_inner_lines = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_light_icon_inner_lines;
|
||||
}
|
||||
@@ -1503,7 +1523,7 @@ GPUBatch *DRW_cache_light_icon_outer_lines_get(void)
|
||||
circle_dashed_verts(vbo, &v, OUTER_NSEGMENTS, r * 1.33f, 0.0f, VCLASS_SCREENSPACE);
|
||||
|
||||
SHC.drw_light_icon_outer_lines = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_light_icon_outer_lines;
|
||||
}
|
||||
@@ -1528,14 +1548,14 @@ GPUBatch *DRW_cache_light_icon_sun_rays_get(void)
|
||||
float angle = (2.0f * M_PI * a) / (float)num_rays;
|
||||
float s = sinf(angle) * r;
|
||||
float c = cosf(angle) * r;
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{s * 1.6f, c * 1.6f, 0.0f}, VCLASS_SCREENSPACE});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{s * 1.9f, c * 1.9f, 0.0f}, VCLASS_SCREENSPACE});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{s * 2.2f, c * 2.2f, 0.0f}, VCLASS_SCREENSPACE});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{s * 2.5f, c * 2.5f, 0.0f}, VCLASS_SCREENSPACE});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{s * 1.6f, c * 1.6f, 0.0f}, VCLASS_SCREENSPACE});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{s * 1.9f, c * 1.9f, 0.0f}, VCLASS_SCREENSPACE});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{s * 2.2f, c * 2.2f, 0.0f}, VCLASS_SCREENSPACE});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{s * 2.5f, c * 2.5f, 0.0f}, VCLASS_SCREENSPACE});
|
||||
}
|
||||
|
||||
SHC.drw_light_icon_sun_rays = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_light_icon_sun_rays;
|
||||
}
|
||||
@@ -1555,7 +1575,8 @@ GPUBatch *DRW_cache_light_point_lines_get(void)
|
||||
int flag = VCLASS_SCREENALIGNED | VCLASS_LIGHT_AREA_SHAPE;
|
||||
circle_verts(vbo, &v, CIRCLE_NSEGMENTS, 1.0f, 0.0f, flag);
|
||||
|
||||
SHC.drw_light_point_lines = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_light_point_lines = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_light_point_lines;
|
||||
}
|
||||
@@ -1572,10 +1593,11 @@ GPUBatch *DRW_cache_light_sun_lines_get(void)
|
||||
int v = 0;
|
||||
|
||||
/* Direction Line */
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 0.0, 0.0}, 0});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 0.0, -20.0}, 0}); /* Good default. */
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0, 0.0, 0.0}, 0});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0, 0.0, -20.0}, 0}); /* Good default. */
|
||||
|
||||
SHC.drw_light_sun_lines = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_light_sun_lines = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_light_sun_lines;
|
||||
}
|
||||
@@ -1605,18 +1627,19 @@ GPUBatch *DRW_cache_light_spot_lines_get(void)
|
||||
float angle = (2.0f * M_PI * a) / CIRCLE_NSEGMENTS;
|
||||
float s = sinf(angle);
|
||||
float c = cosf(angle);
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, 0.0f}, 0});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{s, c, -1.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0f, 0.0f, 0.0f}, 0});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{s, c, -1.0f}, flag});
|
||||
}
|
||||
/* Direction Line */
|
||||
float zsta = light_distance_z_get('z', true);
|
||||
float zend = light_distance_z_get('z', false);
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 0.0, zsta}, VCLASS_LIGHT_DIST});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 0.0, zend}, VCLASS_LIGHT_DIST});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0, 0.0, zsta}, VCLASS_LIGHT_DIST});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0, 0.0, zend}, VCLASS_LIGHT_DIST});
|
||||
circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.2f, zsta, VCLASS_LIGHT_DIST | VCLASS_SCREENSPACE);
|
||||
circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.2f, zend, VCLASS_LIGHT_DIST | VCLASS_SCREENSPACE);
|
||||
|
||||
SHC.drw_light_spot_lines = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_light_spot_lines = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_light_spot_lines;
|
||||
}
|
||||
@@ -1632,18 +1655,18 @@ GPUBatch *DRW_cache_light_spot_volume_get(void)
|
||||
|
||||
int v = 0;
|
||||
/* Cone apex */
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, 0.0f}, 0});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0f, 0.0f, 0.0f}, 0});
|
||||
/* Cone silhouette */
|
||||
int flag = VCLASS_LIGHT_SPOT_SHAPE;
|
||||
for (int a = 0; a < CIRCLE_NSEGMENTS + 1; a++) {
|
||||
float angle = (2.0f * M_PI * a) / CIRCLE_NSEGMENTS;
|
||||
float s = sinf(-angle);
|
||||
float c = cosf(-angle);
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{s, c, -1.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{s, c, -1.0f}, flag});
|
||||
}
|
||||
|
||||
SHC.drw_light_spot_volume = GPU_batch_create_ex(
|
||||
GPU_PRIM_TRI_FAN, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
GPU_PRIM_TRI_FAN, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_light_spot_volume;
|
||||
}
|
||||
@@ -1664,13 +1687,13 @@ GPUBatch *DRW_cache_light_area_disk_lines_get(void)
|
||||
/* Direction Line */
|
||||
float zsta = light_distance_z_get('z', true);
|
||||
float zend = light_distance_z_get('z', false);
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 0.0, zsta}, VCLASS_LIGHT_DIST});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 0.0, zend}, VCLASS_LIGHT_DIST});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0, 0.0, zsta}, VCLASS_LIGHT_DIST});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0, 0.0, zend}, VCLASS_LIGHT_DIST});
|
||||
circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.2f, zsta, VCLASS_LIGHT_DIST | VCLASS_SCREENSPACE);
|
||||
circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.2f, zend, VCLASS_LIGHT_DIST | VCLASS_SCREENSPACE);
|
||||
|
||||
SHC.drw_light_area_disk_lines = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_light_area_disk_lines;
|
||||
}
|
||||
@@ -1693,19 +1716,19 @@ GPUBatch *DRW_cache_light_area_square_lines_get(void)
|
||||
const float p[4][2] = {{-1.0f, -1.0f}, {-1.0f, 1.0f}, {1.0f, 1.0f}, {1.0f, -1.0f}};
|
||||
float x = p[(a + b) % 4][0];
|
||||
float y = p[(a + b) % 4][1];
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{x * 0.5f, y * 0.5f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{x * 0.5f, y * 0.5f, 0.0f}, flag});
|
||||
}
|
||||
}
|
||||
/* Direction Line */
|
||||
float zsta = light_distance_z_get('z', true);
|
||||
float zend = light_distance_z_get('z', false);
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 0.0, zsta}, VCLASS_LIGHT_DIST});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 0.0, zend}, VCLASS_LIGHT_DIST});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0, 0.0, zsta}, VCLASS_LIGHT_DIST});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0, 0.0, zend}, VCLASS_LIGHT_DIST});
|
||||
circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.2f, zsta, VCLASS_LIGHT_DIST | VCLASS_SCREENSPACE);
|
||||
circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.2f, zend, VCLASS_LIGHT_DIST | VCLASS_SCREENSPACE);
|
||||
|
||||
SHC.drw_light_area_square_lines = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_light_area_square_lines;
|
||||
}
|
||||
@@ -1774,7 +1797,7 @@ GPUBatch *DRW_cache_speaker_get(void)
|
||||
}
|
||||
}
|
||||
|
||||
SHC.drw_speaker = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_speaker = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_speaker;
|
||||
}
|
||||
@@ -1813,28 +1836,29 @@ GPUBatch *DRW_cache_lightprobe_cube_get(void)
|
||||
float t1[2], t2[2];
|
||||
copy_v2_v2(t1, p[i]);
|
||||
copy_v2_v2(t2, p[(i + 1) % 6]);
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{t1[0] * r, t1[1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{t2[0] * r, t2[1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{t1[0] * r, t1[1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{t2[0] * r, t2[1] * r, 0.0f}, flag});
|
||||
}
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[1][0] * r, p[1][1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[6][0] * r, p[6][1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[5][0] * r, p[5][1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[6][0] * r, p[6][1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[3][0] * r, p[3][1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[6][0] * r, p[6][1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{p[1][0] * r, p[1][1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{p[6][0] * r, p[6][1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{p[5][0] * r, p[5][1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{p[6][0] * r, p[6][1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{p[3][0] * r, p[3][1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{p[6][0] * r, p[6][1] * r, 0.0f}, flag});
|
||||
/* Direction Lines */
|
||||
flag = VCLASS_LIGHT_DIST | VCLASS_SCREENSPACE;
|
||||
for (int i = 0; i < 6; i++) {
|
||||
char axes[] = "zZyYxX";
|
||||
float zsta = light_distance_z_get(axes[i], true);
|
||||
float zend = light_distance_z_get(axes[i], false);
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, zsta}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, zend}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0f, 0.0f, zsta}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0f, 0.0f, zend}, flag});
|
||||
circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.2f, zsta, flag);
|
||||
circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.2f, zend, flag);
|
||||
}
|
||||
|
||||
SHC.drw_lightprobe_cube = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_lightprobe_cube = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_lightprobe_cube;
|
||||
}
|
||||
@@ -1867,36 +1891,37 @@ GPUBatch *DRW_cache_lightprobe_grid_get(void)
|
||||
float t1[2], t2[2], tr[2];
|
||||
copy_v2_v2(t1, p[i]);
|
||||
copy_v2_v2(t2, p[(i + 1) % 6]);
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{t1[0] * r, t1[1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{t2[0] * r, t2[1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{t1[0] * r, t1[1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{t2[0] * r, t2[1] * r, 0.0f}, flag});
|
||||
/* Internal wires. */
|
||||
for (int j = 1; j < 2; j++) {
|
||||
mul_v2_v2fl(tr, p[(i / 2) * 2 + 1], -0.5f * j);
|
||||
add_v2_v2v2(t1, p[i], tr);
|
||||
add_v2_v2v2(t2, p[(i + 1) % 6], tr);
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{t1[0] * r, t1[1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{t2[0] * r, t2[1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{t1[0] * r, t1[1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{t2[0] * r, t2[1] * r, 0.0f}, flag});
|
||||
}
|
||||
}
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[1][0] * r, p[1][1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[6][0] * r, p[6][1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[5][0] * r, p[5][1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[6][0] * r, p[6][1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[3][0] * r, p[3][1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[6][0] * r, p[6][1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{p[1][0] * r, p[1][1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{p[6][0] * r, p[6][1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{p[5][0] * r, p[5][1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{p[6][0] * r, p[6][1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{p[3][0] * r, p[3][1] * r, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{p[6][0] * r, p[6][1] * r, 0.0f}, flag});
|
||||
/* Direction Lines */
|
||||
flag = VCLASS_LIGHT_DIST | VCLASS_SCREENSPACE;
|
||||
for (int i = 0; i < 6; i++) {
|
||||
char axes[] = "zZyYxX";
|
||||
float zsta = light_distance_z_get(axes[i], true);
|
||||
float zend = light_distance_z_get(axes[i], false);
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, zsta}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, zend}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0f, 0.0f, zsta}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0f, 0.0f, zend}, flag});
|
||||
circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.2f, zsta, flag);
|
||||
circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.2f, zend, flag);
|
||||
}
|
||||
|
||||
SHC.drw_lightprobe_grid = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_lightprobe_grid = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_lightprobe_grid;
|
||||
}
|
||||
@@ -1924,11 +1949,12 @@ GPUBatch *DRW_cache_lightprobe_planar_get(void)
|
||||
for (int a = 0; a < 2; a++) {
|
||||
float x = p[(i + a) % 4][0] * r;
|
||||
float y = p[(i + a) % 4][1] * r;
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{x, y, 0.0}, VCLASS_SCREENSPACE});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{x, y, 0.0}, VCLASS_SCREENSPACE});
|
||||
}
|
||||
}
|
||||
|
||||
SHC.drw_lightprobe_planar = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_lightprobe_planar = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_lightprobe_planar;
|
||||
}
|
||||
@@ -2072,7 +2098,7 @@ GPUBatch *DRW_cache_bone_octahedral_get(void)
|
||||
}
|
||||
}
|
||||
|
||||
SHC.drw_bone_octahedral = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_bone_octahedral = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_bone_octahedral;
|
||||
}
|
||||
@@ -2130,7 +2156,7 @@ GPUBatch *DRW_cache_bone_box_get(void)
|
||||
}
|
||||
}
|
||||
|
||||
SHC.drw_bone_box = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_bone_box = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_bone_box;
|
||||
}
|
||||
@@ -2214,7 +2240,8 @@ GPUBatch *DRW_cache_bone_envelope_solid_get(void)
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, co2);
|
||||
}
|
||||
|
||||
SHC.drw_bone_envelope = GPU_batch_create_ex(GPU_PRIM_TRI_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_bone_envelope = GPU_batch_create_ex(
|
||||
GPU_PRIM_TRI_STRIP, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_bone_envelope;
|
||||
}
|
||||
@@ -2258,7 +2285,7 @@ GPUBatch *DRW_cache_bone_envelope_outline_get(void)
|
||||
}
|
||||
|
||||
SHC.drw_bone_envelope_outline = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINE_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
GPU_PRIM_LINE_STRIP, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
#undef CIRCLE_RESOL
|
||||
}
|
||||
return SHC.drw_bone_envelope_outline;
|
||||
@@ -2308,7 +2335,7 @@ GPUBatch *DRW_cache_bone_point_get(void)
|
||||
}
|
||||
}
|
||||
|
||||
SHC.drw_bone_point = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_bone_point = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
#else
|
||||
# define CIRCLE_RESOL 64
|
||||
float v[2];
|
||||
@@ -2332,7 +2359,7 @@ GPUBatch *DRW_cache_bone_point_get(void)
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, a, v);
|
||||
}
|
||||
|
||||
SHC.drw_bone_point = GPU_batch_create_ex(GPU_PRIM_TRI_FAN, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_bone_point = GPU_batch_create_ex(GPU_PRIM_TRI_FAN, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
# undef CIRCLE_RESOL
|
||||
#endif
|
||||
}
|
||||
@@ -2344,7 +2371,8 @@ GPUBatch *DRW_cache_bone_point_wire_outline_get(void)
|
||||
if (!SHC.drw_bone_point_wire) {
|
||||
#if 0 /* old style geometry sphere */
|
||||
GPUVertBuf *vbo = sphere_wire_vbo(0.05f);
|
||||
SHC.drw_bone_point_wire = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_bone_point_wire = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
#else
|
||||
# define CIRCLE_RESOL 64
|
||||
const float radius = 0.05f;
|
||||
@@ -2370,7 +2398,7 @@ GPUBatch *DRW_cache_bone_point_wire_outline_get(void)
|
||||
}
|
||||
|
||||
SHC.drw_bone_point_wire = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINE_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
GPU_PRIM_LINE_STRIP, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
# undef CIRCLE_RESOL
|
||||
#endif
|
||||
}
|
||||
@@ -2543,15 +2571,15 @@ GPUBatch *DRW_cache_bone_arrows_get(void)
|
||||
int flag = VCLASS_EMPTY_AXES | VCLASS_SCREENALIGNED;
|
||||
/* Vertex layout is XY screen position and axis in Z.
|
||||
* Fractional part of Z is a positive offset at axis unit position. */
|
||||
float p[3] = {0.0f, 0.0f, axis};
|
||||
float p[3] = {0.0f, 0.0f, float(axis)};
|
||||
/* center to axis line */
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, 0.0f}, 0});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[0], p[1], p[2]}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0f, 0.0f, 0.0f}, 0});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{p[0], p[1], p[2]}, flag});
|
||||
/* Axis end marker */
|
||||
for (int j = 1; j < MARKER_FILL_LAYER + 1; j++) {
|
||||
for (int i = 0; i < MARKER_LEN; i++) {
|
||||
mul_v2_v2fl(p, axis_marker[i], 4.0f * j / (float)MARKER_FILL_LAYER);
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[0], p[1], p[2]}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{p[0], p[1], p[2]}, flag});
|
||||
}
|
||||
}
|
||||
/* Axis name */
|
||||
@@ -2561,11 +2589,11 @@ GPUBatch *DRW_cache_bone_arrows_get(void)
|
||||
p[2] = axis + 0.25f;
|
||||
for (int i = 0; i < axis_v_len[axis]; i++) {
|
||||
mul_v2_v2fl(p, axis_v[i], 4.0f);
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[0], p[1], p[2]}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{p[0], p[1], p[2]}, flag});
|
||||
}
|
||||
}
|
||||
|
||||
SHC.drw_bone_arrows = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_bone_arrows = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_bone_arrows;
|
||||
}
|
||||
@@ -2644,7 +2672,7 @@ GPUBatch *DRW_cache_bone_dof_sphere_get(void)
|
||||
/* TODO: allocate right count from the beginning. */
|
||||
GPU_vertbuf_data_resize(vbo, v);
|
||||
|
||||
SHC.drw_bone_dof_sphere = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_bone_dof_sphere = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_bone_dof_sphere;
|
||||
}
|
||||
@@ -2676,7 +2704,7 @@ GPUBatch *DRW_cache_bone_dof_lines_get(void)
|
||||
}
|
||||
|
||||
SHC.drw_bone_dof_lines = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINE_LOOP, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
GPU_PRIM_LINE_LOOP, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_bone_dof_lines;
|
||||
}
|
||||
@@ -2705,18 +2733,18 @@ GPUBatch *DRW_cache_camera_frame_get(void)
|
||||
for (int b = 0; b < 2; b++) {
|
||||
float x = p[(a + b) % 4][0];
|
||||
float y = p[(a + b) % 4][1];
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{x, y, 1.0f}, VCLASS_CAMERA_FRAME});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{x, y, 1.0f}, VCLASS_CAMERA_FRAME});
|
||||
}
|
||||
}
|
||||
/* Wires to origin. */
|
||||
for (int a = 0; a < 4; a++) {
|
||||
float x = p[a][0];
|
||||
float y = p[a][1];
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{x, y, 1.0f}, VCLASS_CAMERA_FRAME});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{x, y, 0.0f}, VCLASS_CAMERA_FRAME});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{x, y, 1.0f}, VCLASS_CAMERA_FRAME});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{x, y, 0.0f}, VCLASS_CAMERA_FRAME});
|
||||
}
|
||||
|
||||
SHC.drw_camera_frame = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_camera_frame = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_camera_frame;
|
||||
}
|
||||
@@ -2737,11 +2765,11 @@ GPUBatch *DRW_cache_camera_volume_get(void)
|
||||
float x = bone_box_verts[bone_box_solid_tris[i][a]][2];
|
||||
float y = bone_box_verts[bone_box_solid_tris[i][a]][0];
|
||||
float z = bone_box_verts[bone_box_solid_tris[i][a]][1];
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{x, y, z}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{x, y, z}, flag});
|
||||
}
|
||||
}
|
||||
|
||||
SHC.drw_camera_volume = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_camera_volume = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_camera_volume;
|
||||
}
|
||||
@@ -2761,11 +2789,11 @@ GPUBatch *DRW_cache_camera_volume_wire_get(void)
|
||||
float x = bone_box_verts[bone_box_wire[i]][2];
|
||||
float y = bone_box_verts[bone_box_wire[i]][0];
|
||||
float z = bone_box_verts[bone_box_wire[i]][1];
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{x, y, z}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{x, y, z}, flag});
|
||||
}
|
||||
|
||||
SHC.drw_camera_volume_wire = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_camera_volume_wire;
|
||||
}
|
||||
@@ -2785,11 +2813,12 @@ GPUBatch *DRW_cache_camera_tria_wire_get(void)
|
||||
for (int b = 0; b < 2; b++) {
|
||||
float x = p[(a + b) % 3][0];
|
||||
float y = p[(a + b) % 3][1];
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{x, y, 1.0f}, VCLASS_CAMERA_FRAME});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{x, y, 1.0f}, VCLASS_CAMERA_FRAME});
|
||||
}
|
||||
}
|
||||
|
||||
SHC.drw_camera_tria_wire = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_camera_tria_wire = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_camera_tria_wire;
|
||||
}
|
||||
@@ -2805,11 +2834,11 @@ GPUBatch *DRW_cache_camera_tria_get(void)
|
||||
|
||||
int v = 0;
|
||||
/* Use camera frame position */
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{-1.0f, 1.0f, 1.0f}, VCLASS_CAMERA_FRAME});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{1.0f, 1.0f, 1.0f}, VCLASS_CAMERA_FRAME});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, 1.0f}, VCLASS_CAMERA_FRAME});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{-1.0f, 1.0f, 1.0f}, VCLASS_CAMERA_FRAME});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{1.0f, 1.0f, 1.0f}, VCLASS_CAMERA_FRAME});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0f, 0.0f, 1.0f}, VCLASS_CAMERA_FRAME});
|
||||
|
||||
SHC.drw_camera_tria = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_camera_tria = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_camera_tria;
|
||||
}
|
||||
@@ -2825,17 +2854,18 @@ GPUBatch *DRW_cache_camera_distances_get(void)
|
||||
|
||||
int v = 0;
|
||||
/* Direction Line */
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 0.0, 0.0}, VCLASS_CAMERA_DIST});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 0.0, 1.0}, VCLASS_CAMERA_DIST});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0, 0.0, 0.0}, VCLASS_CAMERA_DIST});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0, 0.0, 1.0}, VCLASS_CAMERA_DIST});
|
||||
circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.5f, 0.0f, VCLASS_CAMERA_DIST | VCLASS_SCREENSPACE);
|
||||
circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.5f, 1.0f, VCLASS_CAMERA_DIST | VCLASS_SCREENSPACE);
|
||||
/* Focus cross */
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{1.0, 0.0, 2.0}, VCLASS_CAMERA_DIST});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{-1.0, 0.0, 2.0}, VCLASS_CAMERA_DIST});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 1.0, 2.0}, VCLASS_CAMERA_DIST});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, -1.0, 2.0}, VCLASS_CAMERA_DIST});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{1.0, 0.0, 2.0}, VCLASS_CAMERA_DIST});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{-1.0, 0.0, 2.0}, VCLASS_CAMERA_DIST});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0, 1.0, 2.0}, VCLASS_CAMERA_DIST});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0, -1.0, 2.0}, VCLASS_CAMERA_DIST});
|
||||
|
||||
SHC.drw_camera_distances = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_camera_distances = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
return SHC.drw_camera_distances;
|
||||
}
|
||||
@@ -2849,37 +2879,37 @@ GPUBatch *DRW_cache_camera_distances_get(void)
|
||||
GPUBatch *DRW_cache_mesh_all_verts_get(Object *ob)
|
||||
{
|
||||
BLI_assert(ob->type == OB_MESH);
|
||||
return DRW_mesh_batch_cache_get_all_verts(ob->data);
|
||||
return DRW_mesh_batch_cache_get_all_verts(static_cast<Mesh *>(ob->data));
|
||||
}
|
||||
|
||||
GPUBatch *DRW_cache_mesh_all_edges_get(Object *ob)
|
||||
{
|
||||
BLI_assert(ob->type == OB_MESH);
|
||||
return DRW_mesh_batch_cache_get_all_edges(ob->data);
|
||||
return DRW_mesh_batch_cache_get_all_edges(static_cast<Mesh *>(ob->data));
|
||||
}
|
||||
|
||||
GPUBatch *DRW_cache_mesh_loose_edges_get(Object *ob)
|
||||
{
|
||||
BLI_assert(ob->type == OB_MESH);
|
||||
return DRW_mesh_batch_cache_get_loose_edges(ob->data);
|
||||
return DRW_mesh_batch_cache_get_loose_edges(static_cast<Mesh *>(ob->data));
|
||||
}
|
||||
|
||||
GPUBatch *DRW_cache_mesh_edge_detection_get(Object *ob, bool *r_is_manifold)
|
||||
{
|
||||
BLI_assert(ob->type == OB_MESH);
|
||||
return DRW_mesh_batch_cache_get_edge_detection(ob->data, r_is_manifold);
|
||||
return DRW_mesh_batch_cache_get_edge_detection(static_cast<Mesh *>(ob->data), r_is_manifold);
|
||||
}
|
||||
|
||||
GPUBatch *DRW_cache_mesh_surface_get(Object *ob)
|
||||
{
|
||||
BLI_assert(ob->type == OB_MESH);
|
||||
return DRW_mesh_batch_cache_get_surface(ob->data);
|
||||
return DRW_mesh_batch_cache_get_surface(static_cast<Mesh *>(ob->data));
|
||||
}
|
||||
|
||||
GPUBatch *DRW_cache_mesh_surface_edges_get(Object *ob)
|
||||
{
|
||||
BLI_assert(ob->type == OB_MESH);
|
||||
return DRW_mesh_batch_cache_get_surface_edges(ob, ob->data);
|
||||
return DRW_mesh_batch_cache_get_surface_edges(ob, static_cast<Mesh *>(ob->data));
|
||||
}
|
||||
|
||||
GPUBatch **DRW_cache_mesh_surface_shaded_get(Object *ob,
|
||||
@@ -2887,55 +2917,56 @@ GPUBatch **DRW_cache_mesh_surface_shaded_get(Object *ob,
|
||||
uint gpumat_array_len)
|
||||
{
|
||||
BLI_assert(ob->type == OB_MESH);
|
||||
return DRW_mesh_batch_cache_get_surface_shaded(ob, ob->data, gpumat_array, gpumat_array_len);
|
||||
return DRW_mesh_batch_cache_get_surface_shaded(
|
||||
ob, static_cast<Mesh *>(ob->data), gpumat_array, gpumat_array_len);
|
||||
}
|
||||
|
||||
GPUBatch **DRW_cache_mesh_surface_texpaint_get(Object *ob)
|
||||
{
|
||||
BLI_assert(ob->type == OB_MESH);
|
||||
return DRW_mesh_batch_cache_get_surface_texpaint(ob, ob->data);
|
||||
return DRW_mesh_batch_cache_get_surface_texpaint(ob, static_cast<Mesh *>(ob->data));
|
||||
}
|
||||
|
||||
GPUBatch *DRW_cache_mesh_surface_texpaint_single_get(Object *ob)
|
||||
{
|
||||
BLI_assert(ob->type == OB_MESH);
|
||||
return DRW_mesh_batch_cache_get_surface_texpaint_single(ob, ob->data);
|
||||
return DRW_mesh_batch_cache_get_surface_texpaint_single(ob, static_cast<Mesh *>(ob->data));
|
||||
}
|
||||
|
||||
GPUBatch *DRW_cache_mesh_surface_vertpaint_get(Object *ob)
|
||||
{
|
||||
BLI_assert(ob->type == OB_MESH);
|
||||
return DRW_mesh_batch_cache_get_surface_vertpaint(ob, ob->data);
|
||||
return DRW_mesh_batch_cache_get_surface_vertpaint(ob, static_cast<Mesh *>(ob->data));
|
||||
}
|
||||
|
||||
GPUBatch *DRW_cache_mesh_surface_sculptcolors_get(Object *ob)
|
||||
{
|
||||
BLI_assert(ob->type == OB_MESH);
|
||||
return DRW_mesh_batch_cache_get_surface_sculpt(ob, ob->data);
|
||||
return DRW_mesh_batch_cache_get_surface_sculpt(ob, static_cast<Mesh *>(ob->data));
|
||||
}
|
||||
|
||||
GPUBatch *DRW_cache_mesh_surface_weights_get(Object *ob)
|
||||
{
|
||||
BLI_assert(ob->type == OB_MESH);
|
||||
return DRW_mesh_batch_cache_get_surface_weights(ob->data);
|
||||
return DRW_mesh_batch_cache_get_surface_weights(static_cast<Mesh *>(ob->data));
|
||||
}
|
||||
|
||||
GPUBatch *DRW_cache_mesh_face_wireframe_get(Object *ob)
|
||||
{
|
||||
BLI_assert(ob->type == OB_MESH);
|
||||
return DRW_mesh_batch_cache_get_wireframes_face(ob->data);
|
||||
return DRW_mesh_batch_cache_get_wireframes_face(static_cast<Mesh *>(ob->data));
|
||||
}
|
||||
|
||||
GPUBatch *DRW_cache_mesh_surface_mesh_analysis_get(Object *ob)
|
||||
{
|
||||
BLI_assert(ob->type == OB_MESH);
|
||||
return DRW_mesh_batch_cache_get_edit_mesh_analysis(ob->data);
|
||||
return DRW_mesh_batch_cache_get_edit_mesh_analysis(static_cast<Mesh *>(ob->data));
|
||||
}
|
||||
|
||||
GPUBatch *DRW_cache_mesh_surface_viewer_attribute_get(Object *ob)
|
||||
{
|
||||
BLI_assert(ob->type == OB_MESH);
|
||||
return DRW_mesh_batch_cache_get_surface_viewer_attribute(ob->data);
|
||||
return DRW_mesh_batch_cache_get_surface_viewer_attribute(static_cast<Mesh *>(ob->data));
|
||||
}
|
||||
|
||||
/** \} */
|
||||
@@ -2947,21 +2978,21 @@ GPUBatch *DRW_cache_mesh_surface_viewer_attribute_get(Object *ob)
|
||||
GPUBatch *DRW_cache_curve_edge_wire_get(Object *ob)
|
||||
{
|
||||
BLI_assert(ob->type == OB_CURVES_LEGACY);
|
||||
Curve *cu = ob->data;
|
||||
Curve *cu = static_cast<Curve *>(ob->data);
|
||||
return DRW_curve_batch_cache_get_wire_edge(cu);
|
||||
}
|
||||
|
||||
GPUBatch *DRW_cache_curve_edge_wire_viewer_attribute_get(Object *ob)
|
||||
{
|
||||
BLI_assert(ob->type == OB_CURVES_LEGACY);
|
||||
Curve *cu = ob->data;
|
||||
Curve *cu = static_cast<Curve *>(ob->data);
|
||||
return DRW_curve_batch_cache_get_wire_edge_viewer_attribute(cu);
|
||||
}
|
||||
|
||||
GPUBatch *DRW_cache_curve_edge_normal_get(Object *ob)
|
||||
{
|
||||
BLI_assert(ob->type == OB_CURVES_LEGACY);
|
||||
Curve *cu = ob->data;
|
||||
Curve *cu = static_cast<Curve *>(ob->data);
|
||||
return DRW_curve_batch_cache_get_normal_edge(cu);
|
||||
}
|
||||
|
||||
@@ -2969,7 +3000,7 @@ GPUBatch *DRW_cache_curve_edge_overlay_get(Object *ob)
|
||||
{
|
||||
BLI_assert(ELEM(ob->type, OB_CURVES_LEGACY, OB_SURF));
|
||||
|
||||
Curve *cu = ob->data;
|
||||
Curve *cu = static_cast<Curve *>(ob->data);
|
||||
return DRW_curve_batch_cache_get_edit_edges(cu);
|
||||
}
|
||||
|
||||
@@ -2977,7 +3008,7 @@ GPUBatch *DRW_cache_curve_vert_overlay_get(Object *ob)
|
||||
{
|
||||
BLI_assert(ELEM(ob->type, OB_CURVES_LEGACY, OB_SURF));
|
||||
|
||||
Curve *cu = ob->data;
|
||||
Curve *cu = static_cast<Curve *>(ob->data);
|
||||
return DRW_curve_batch_cache_get_edit_verts(cu);
|
||||
}
|
||||
|
||||
@@ -2990,7 +3021,7 @@ GPUBatch *DRW_cache_curve_vert_overlay_get(Object *ob)
|
||||
GPUBatch *DRW_cache_text_edge_wire_get(Object *ob)
|
||||
{
|
||||
BLI_assert(ob->type == OB_FONT);
|
||||
Curve *cu = ob->data;
|
||||
Curve *cu = static_cast<Curve *>(ob->data);
|
||||
return DRW_curve_batch_cache_get_wire_edge(cu);
|
||||
}
|
||||
|
||||
@@ -3003,7 +3034,7 @@ GPUBatch *DRW_cache_text_edge_wire_get(Object *ob)
|
||||
GPUBatch *DRW_cache_surf_edge_wire_get(Object *ob)
|
||||
{
|
||||
BLI_assert(ob->type == OB_SURF);
|
||||
Curve *cu = ob->data;
|
||||
Curve *cu = static_cast<Curve *>(ob->data);
|
||||
return DRW_curve_batch_cache_get_wire_edge(cu);
|
||||
}
|
||||
|
||||
@@ -3017,7 +3048,7 @@ GPUBatch *DRW_cache_lattice_verts_get(Object *ob)
|
||||
{
|
||||
BLI_assert(ob->type == OB_LATTICE);
|
||||
|
||||
Lattice *lt = ob->data;
|
||||
Lattice *lt = static_cast<Lattice *>(ob->data);
|
||||
return DRW_lattice_batch_cache_get_all_verts(lt);
|
||||
}
|
||||
|
||||
@@ -3025,7 +3056,7 @@ GPUBatch *DRW_cache_lattice_wire_get(Object *ob, bool use_weight)
|
||||
{
|
||||
BLI_assert(ob->type == OB_LATTICE);
|
||||
|
||||
Lattice *lt = ob->data;
|
||||
Lattice *lt = static_cast<Lattice *>(ob->data);
|
||||
int actdef = -1;
|
||||
|
||||
if (use_weight && !BLI_listbase_is_empty(<->vertex_group_names) && lt->editlatt->latt->dvert) {
|
||||
@@ -3039,7 +3070,7 @@ GPUBatch *DRW_cache_lattice_vert_overlay_get(Object *ob)
|
||||
{
|
||||
BLI_assert(ob->type == OB_LATTICE);
|
||||
|
||||
Lattice *lt = ob->data;
|
||||
Lattice *lt = static_cast<Lattice *>(ob->data);
|
||||
return DRW_lattice_batch_cache_get_edit_verts(lt);
|
||||
}
|
||||
|
||||
@@ -3058,13 +3089,13 @@ GPUBatch *DRW_cache_lattice_vert_overlay_get(Object *ob)
|
||||
GPUBatch *DRW_cache_volume_face_wireframe_get(Object *ob)
|
||||
{
|
||||
BLI_assert(ob->type == OB_VOLUME);
|
||||
return DRW_volume_batch_cache_get_wireframes_face(ob->data);
|
||||
return DRW_volume_batch_cache_get_wireframes_face(static_cast<Volume *>(ob->data));
|
||||
}
|
||||
|
||||
GPUBatch *DRW_cache_volume_selection_surface_get(Object *ob)
|
||||
{
|
||||
BLI_assert(ob->type == OB_VOLUME);
|
||||
return DRW_volume_batch_cache_get_selection_surface(ob->data);
|
||||
return DRW_volume_batch_cache_get_selection_surface(static_cast<Volume *>(ob->data));
|
||||
}
|
||||
|
||||
/** \} */
|
||||
@@ -3116,15 +3147,15 @@ GPUBatch *DRW_cache_particles_get_prim(int type)
|
||||
|
||||
int v = 0;
|
||||
int flag = 0;
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, -1.0f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 1.0f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{-1.0f, 0.0f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{1.0f, 0.0f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, -1.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, 1.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0f, -1.0f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0f, 1.0f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{-1.0f, 0.0f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{1.0f, 0.0f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0f, 0.0f, -1.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0f, 0.0f, 1.0f}, flag});
|
||||
|
||||
SHC.drw_particle_cross = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
|
||||
return SHC.drw_particle_cross;
|
||||
@@ -3137,14 +3168,15 @@ GPUBatch *DRW_cache_particles_get_prim(int type)
|
||||
int v = 0;
|
||||
int flag = VCLASS_EMPTY_AXES;
|
||||
/* Set minimum to 0.001f so we can easily normalize to get the color. */
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0001f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 2.0f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0001f, 0.0f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{2.0f, 0.0f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, 0.0001f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, 2.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0f, 0.0001f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0f, 2.0f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0001f, 0.0f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{2.0f, 0.0f, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0f, 0.0f, 0.0001f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{0.0f, 0.0f, 2.0f}, flag});
|
||||
|
||||
SHC.drw_particle_axis = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
SHC.drw_particle_axis = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
|
||||
return SHC.drw_particle_axis;
|
||||
@@ -3161,11 +3193,11 @@ GPUBatch *DRW_cache_particles_get_prim(int type)
|
||||
float angle = (2.0f * M_PI * a) / CIRCLE_RESOL;
|
||||
float x = sinf(angle);
|
||||
float y = cosf(angle);
|
||||
GPU_vertbuf_vert_set(vbo, v++, &(Vert){{x, y, 0.0f}, flag});
|
||||
GPU_vertbuf_vert_set(vbo, v++, Vert{{x, y, 0.0f}, flag});
|
||||
}
|
||||
|
||||
SHC.drw_particle_circle = GPU_batch_create_ex(
|
||||
GPU_PRIM_LINE_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
GPU_PRIM_LINE_STRIP, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
|
||||
return SHC.drw_particle_circle;
|
||||
@@ -3175,14 +3207,14 @@ GPUBatch *DRW_cache_particles_get_prim(int type)
|
||||
break;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
GPUBatch *DRW_cache_cursor_get(bool crosshair_lines)
|
||||
{
|
||||
GPUBatch **drw_cursor = crosshair_lines ? &SHC.drw_cursor : &SHC.drw_cursor_only_circle;
|
||||
|
||||
if (*drw_cursor == NULL) {
|
||||
if (*drw_cursor == nullptr) {
|
||||
const float f5 = 0.25f;
|
||||
const float f10 = 0.5f;
|
||||
const float f20 = 1.0f;
|
||||
@@ -3218,7 +3250,7 @@ GPUBatch *DRW_cache_cursor_get(bool crosshair_lines)
|
||||
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.color, v, (i % 2 == 0) ? red : white);
|
||||
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){x, y});
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, v, blender::float2{x, y});
|
||||
GPU_indexbuf_add_generic_vert(&elb, v++);
|
||||
}
|
||||
GPU_indexbuf_add_generic_vert(&elb, 0);
|
||||
@@ -3229,37 +3261,37 @@ GPUBatch *DRW_cache_cursor_get(bool crosshair_lines)
|
||||
|
||||
GPU_indexbuf_add_primitive_restart(&elb);
|
||||
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){-f20, 0});
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, v, blender::float2{-f20, 0});
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
|
||||
GPU_indexbuf_add_generic_vert(&elb, v++);
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){-f5, 0});
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, v, blender::float2{-f5, 0});
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
|
||||
GPU_indexbuf_add_generic_vert(&elb, v++);
|
||||
|
||||
GPU_indexbuf_add_primitive_restart(&elb);
|
||||
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){+f5, 0});
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, v, blender::float2{+f5, 0});
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
|
||||
GPU_indexbuf_add_generic_vert(&elb, v++);
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){+f20, 0});
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, v, blender::float2{+f20, 0});
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
|
||||
GPU_indexbuf_add_generic_vert(&elb, v++);
|
||||
|
||||
GPU_indexbuf_add_primitive_restart(&elb);
|
||||
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){0, -f20});
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, v, blender::float2{0, -f20});
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
|
||||
GPU_indexbuf_add_generic_vert(&elb, v++);
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){0, -f5});
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, v, blender::float2{0, -f5});
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
|
||||
GPU_indexbuf_add_generic_vert(&elb, v++);
|
||||
|
||||
GPU_indexbuf_add_primitive_restart(&elb);
|
||||
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){0, +f5});
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, v, blender::float2{0, +f5});
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
|
||||
GPU_indexbuf_add_generic_vert(&elb, v++);
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){0, +f20});
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, v, blender::float2{0, +f20});
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
|
||||
GPU_indexbuf_add_generic_vert(&elb, v++);
|
||||
}
|
||||
@@ -3365,7 +3397,7 @@ void drw_batch_cache_generate_requested_evaluated_mesh_or_curve(Object *ob)
|
||||
* If the curves are surfaces or have certain modifiers applied to them, the will have mesh data
|
||||
* of the final result.
|
||||
*/
|
||||
if (mesh != NULL) {
|
||||
if (mesh != nullptr) {
|
||||
DRW_mesh_batch_cache_create_requested(
|
||||
DST.task_graph, ob, mesh, scene, is_paint_mode, use_hide);
|
||||
}
|
||||
@@ -3400,7 +3432,7 @@ void DRW_batch_cache_free_old(Object *ob, int ctime)
|
||||
|
||||
void DRW_cdlayer_attr_aliases_add(GPUVertFormat *format,
|
||||
const char *base_name,
|
||||
const CustomData *UNUSED(data),
|
||||
const CustomData * /*data*/,
|
||||
const CustomDataLayer *cl,
|
||||
bool is_active_render,
|
||||
bool is_active_layer)
|
||||
@@ -88,7 +88,7 @@ static int lattice_render_edges_len_get(Lattice *lt)
|
||||
/* ---------------------------------------------------------------------- */
|
||||
/* Lattice Interface, indirect, partially cached access to complex data. */
|
||||
|
||||
typedef struct LatticeRenderData {
|
||||
struct LatticeRenderData {
|
||||
int types;
|
||||
|
||||
int vert_len;
|
||||
@@ -105,7 +105,7 @@ typedef struct LatticeRenderData {
|
||||
int actbp;
|
||||
|
||||
const MDeformVert *dvert;
|
||||
} LatticeRenderData;
|
||||
};
|
||||
|
||||
enum {
|
||||
LR_DATATYPE_VERT = 1 << 0,
|
||||
@@ -115,7 +115,8 @@ enum {
|
||||
|
||||
static LatticeRenderData *lattice_render_data_create(Lattice *lt, const int types)
|
||||
{
|
||||
LatticeRenderData *rdata = MEM_callocN(sizeof(*rdata), __func__);
|
||||
LatticeRenderData *rdata = static_cast<LatticeRenderData *>(
|
||||
MEM_callocN(sizeof(*rdata), __func__));
|
||||
rdata->types = types;
|
||||
|
||||
if (lt->editlatt) {
|
||||
@@ -137,7 +138,7 @@ static LatticeRenderData *lattice_render_data_create(Lattice *lt, const int type
|
||||
}
|
||||
}
|
||||
else {
|
||||
rdata->dvert = NULL;
|
||||
rdata->dvert = nullptr;
|
||||
|
||||
if (types & (LR_DATATYPE_VERT)) {
|
||||
rdata->vert_len = lattice_render_verts_len_get(lt);
|
||||
@@ -192,7 +193,7 @@ static const BPoint *lattice_render_data_vert_bpoint(const LatticeRenderData *rd
|
||||
/* ---------------------------------------------------------------------- */
|
||||
/* Lattice GPUBatch Cache */
|
||||
|
||||
typedef struct LatticeBatchCache {
|
||||
struct LatticeBatchCache {
|
||||
GPUVertBuf *pos;
|
||||
GPUIndexBuf *edges;
|
||||
|
||||
@@ -210,19 +211,19 @@ typedef struct LatticeBatchCache {
|
||||
bool show_only_outside;
|
||||
|
||||
bool is_editmode;
|
||||
} LatticeBatchCache;
|
||||
};
|
||||
|
||||
/* GPUBatch cache management. */
|
||||
|
||||
static bool lattice_batch_cache_valid(Lattice *lt)
|
||||
{
|
||||
LatticeBatchCache *cache = lt->batch_cache;
|
||||
LatticeBatchCache *cache = static_cast<LatticeBatchCache *>(lt->batch_cache);
|
||||
|
||||
if (cache == NULL) {
|
||||
if (cache == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (cache->is_editmode != (lt->editlatt != NULL)) {
|
||||
if (cache->is_editmode != (lt->editlatt != nullptr)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -242,10 +243,11 @@ static bool lattice_batch_cache_valid(Lattice *lt)
|
||||
|
||||
static void lattice_batch_cache_init(Lattice *lt)
|
||||
{
|
||||
LatticeBatchCache *cache = lt->batch_cache;
|
||||
LatticeBatchCache *cache = static_cast<LatticeBatchCache *>(lt->batch_cache);
|
||||
|
||||
if (!cache) {
|
||||
cache = lt->batch_cache = MEM_callocN(sizeof(*cache), __func__);
|
||||
cache = static_cast<LatticeBatchCache *>(
|
||||
lt->batch_cache = MEM_callocN(sizeof(*cache), __func__));
|
||||
}
|
||||
else {
|
||||
memset(cache, 0, sizeof(*cache));
|
||||
@@ -256,7 +258,7 @@ static void lattice_batch_cache_init(Lattice *lt)
|
||||
cache->dims.w_len = lt->pntsw;
|
||||
cache->show_only_outside = (lt->flag & LT_OUTSIDE) != 0;
|
||||
|
||||
cache->is_editmode = lt->editlatt != NULL;
|
||||
cache->is_editmode = lt->editlatt != nullptr;
|
||||
|
||||
cache->is_dirty = false;
|
||||
}
|
||||
@@ -271,13 +273,13 @@ void DRW_lattice_batch_cache_validate(Lattice *lt)
|
||||
|
||||
static LatticeBatchCache *lattice_batch_cache_get(Lattice *lt)
|
||||
{
|
||||
return lt->batch_cache;
|
||||
return static_cast<LatticeBatchCache *>(lt->batch_cache);
|
||||
}
|
||||
|
||||
void DRW_lattice_batch_cache_dirty_tag(Lattice *lt, int mode)
|
||||
{
|
||||
LatticeBatchCache *cache = lt->batch_cache;
|
||||
if (cache == NULL) {
|
||||
LatticeBatchCache *cache = static_cast<LatticeBatchCache *>(lt->batch_cache);
|
||||
if (cache == nullptr) {
|
||||
return;
|
||||
}
|
||||
switch (mode) {
|
||||
@@ -295,7 +297,7 @@ void DRW_lattice_batch_cache_dirty_tag(Lattice *lt, int mode)
|
||||
|
||||
static void lattice_batch_cache_clear(Lattice *lt)
|
||||
{
|
||||
LatticeBatchCache *cache = lt->batch_cache;
|
||||
LatticeBatchCache *cache = static_cast<LatticeBatchCache *>(lt->batch_cache);
|
||||
if (!cache) {
|
||||
return;
|
||||
}
|
||||
@@ -322,7 +324,7 @@ static GPUVertBuf *lattice_batch_cache_get_pos(LatticeRenderData *rdata,
|
||||
{
|
||||
BLI_assert(rdata->types & LR_DATATYPE_VERT);
|
||||
|
||||
if (cache->pos == NULL) {
|
||||
if (cache->pos == nullptr) {
|
||||
GPUVertFormat format = {0};
|
||||
struct {
|
||||
uint pos, col;
|
||||
@@ -358,7 +360,7 @@ static GPUIndexBuf *lattice_batch_cache_get_edges(LatticeRenderData *rdata,
|
||||
{
|
||||
BLI_assert(rdata->types & (LR_DATATYPE_VERT | LR_DATATYPE_EDGE));
|
||||
|
||||
if (cache->edges == NULL) {
|
||||
if (cache->edges == nullptr) {
|
||||
const int vert_len = lattice_render_data_verts_len_get(rdata);
|
||||
const int edge_len = lattice_render_data_edges_len_get(rdata);
|
||||
int edge_len_real = 0;
|
||||
@@ -418,7 +420,7 @@ static void lattice_batch_cache_create_overlay_batches(Lattice *lt)
|
||||
LatticeBatchCache *cache = lattice_batch_cache_get(lt);
|
||||
LatticeRenderData *rdata = lattice_render_data_create(lt, options);
|
||||
|
||||
if (cache->overlay_verts == NULL) {
|
||||
if (cache->overlay_verts == nullptr) {
|
||||
static GPUVertFormat format = {0};
|
||||
static struct {
|
||||
uint pos, data;
|
||||
@@ -450,7 +452,7 @@ static void lattice_batch_cache_create_overlay_batches(Lattice *lt)
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.data, i, &vflag);
|
||||
}
|
||||
|
||||
cache->overlay_verts = GPU_batch_create_ex(GPU_PRIM_POINTS, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
cache->overlay_verts = GPU_batch_create_ex(GPU_PRIM_POINTS, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
|
||||
lattice_render_data_free(rdata);
|
||||
@@ -460,7 +462,7 @@ GPUBatch *DRW_lattice_batch_cache_get_all_edges(Lattice *lt, bool use_weight, co
|
||||
{
|
||||
LatticeBatchCache *cache = lattice_batch_cache_get(lt);
|
||||
|
||||
if (cache->all_edges == NULL) {
|
||||
if (cache->all_edges == nullptr) {
|
||||
/* create batch from Lattice */
|
||||
LatticeRenderData *rdata = lattice_render_data_create(lt, LR_DATATYPE_VERT | LR_DATATYPE_EDGE);
|
||||
|
||||
@@ -479,11 +481,11 @@ GPUBatch *DRW_lattice_batch_cache_get_all_verts(Lattice *lt)
|
||||
{
|
||||
LatticeBatchCache *cache = lattice_batch_cache_get(lt);
|
||||
|
||||
if (cache->all_verts == NULL) {
|
||||
if (cache->all_verts == nullptr) {
|
||||
LatticeRenderData *rdata = lattice_render_data_create(lt, LR_DATATYPE_VERT);
|
||||
|
||||
cache->all_verts = GPU_batch_create(
|
||||
GPU_PRIM_POINTS, lattice_batch_cache_get_pos(rdata, cache, false, -1), NULL);
|
||||
GPU_PRIM_POINTS, lattice_batch_cache_get_pos(rdata, cache, false, -1), nullptr);
|
||||
|
||||
lattice_render_data_free(rdata);
|
||||
}
|
||||
@@ -495,7 +497,7 @@ GPUBatch *DRW_lattice_batch_cache_get_edit_verts(Lattice *lt)
|
||||
{
|
||||
LatticeBatchCache *cache = lattice_batch_cache_get(lt);
|
||||
|
||||
if (cache->overlay_verts == NULL) {
|
||||
if (cache->overlay_verts == nullptr) {
|
||||
lattice_batch_cache_create_overlay_batches(lt);
|
||||
}
|
||||
|
||||
@@ -47,14 +47,14 @@ static void particle_batch_cache_clear(ParticleSystem *psys);
|
||||
/* ---------------------------------------------------------------------- */
|
||||
/* Particle GPUBatch Cache */
|
||||
|
||||
typedef struct ParticlePointCache {
|
||||
struct ParticlePointCache {
|
||||
GPUVertBuf *pos;
|
||||
GPUBatch *points;
|
||||
int elems_len;
|
||||
int point_len;
|
||||
} ParticlePointCache;
|
||||
};
|
||||
|
||||
typedef struct ParticleBatchCache {
|
||||
struct ParticleBatchCache {
|
||||
/* Object mode strands for hair and points for particle,
|
||||
* strands for paths when in edit mode.
|
||||
*/
|
||||
@@ -78,20 +78,20 @@ typedef struct ParticleBatchCache {
|
||||
/* Settings to determine if cache is invalid. */
|
||||
bool is_dirty;
|
||||
bool edit_is_weight;
|
||||
} ParticleBatchCache;
|
||||
};
|
||||
|
||||
/* GPUBatch cache management. */
|
||||
|
||||
typedef struct HairAttributeID {
|
||||
struct HairAttributeID {
|
||||
uint pos;
|
||||
uint tan;
|
||||
uint ind;
|
||||
} HairAttributeID;
|
||||
};
|
||||
|
||||
typedef struct EditStrandData {
|
||||
struct EditStrandData {
|
||||
float pos[3];
|
||||
float selection;
|
||||
} EditStrandData;
|
||||
};
|
||||
|
||||
static GPUVertFormat *edit_points_vert_format_get(uint *r_pos_id, uint *r_selection_id)
|
||||
{
|
||||
@@ -110,9 +110,9 @@ static GPUVertFormat *edit_points_vert_format_get(uint *r_pos_id, uint *r_select
|
||||
|
||||
static bool particle_batch_cache_valid(ParticleSystem *psys)
|
||||
{
|
||||
ParticleBatchCache *cache = psys->batch_cache;
|
||||
ParticleBatchCache *cache = static_cast<ParticleBatchCache *>(psys->batch_cache);
|
||||
|
||||
if (cache == NULL) {
|
||||
if (cache == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -127,10 +127,11 @@ static bool particle_batch_cache_valid(ParticleSystem *psys)
|
||||
|
||||
static void particle_batch_cache_init(ParticleSystem *psys)
|
||||
{
|
||||
ParticleBatchCache *cache = psys->batch_cache;
|
||||
ParticleBatchCache *cache = static_cast<ParticleBatchCache *>(psys->batch_cache);
|
||||
|
||||
if (!cache) {
|
||||
cache = psys->batch_cache = MEM_callocN(sizeof(*cache), __func__);
|
||||
cache = static_cast<ParticleBatchCache *>(
|
||||
psys->batch_cache = MEM_callocN(sizeof(*cache), __func__));
|
||||
}
|
||||
else {
|
||||
memset(cache, 0, sizeof(*cache));
|
||||
@@ -145,13 +146,13 @@ static ParticleBatchCache *particle_batch_cache_get(ParticleSystem *psys)
|
||||
particle_batch_cache_clear(psys);
|
||||
particle_batch_cache_init(psys);
|
||||
}
|
||||
return psys->batch_cache;
|
||||
return static_cast<ParticleBatchCache *>(psys->batch_cache);
|
||||
}
|
||||
|
||||
void DRW_particle_batch_cache_dirty_tag(ParticleSystem *psys, int mode)
|
||||
{
|
||||
ParticleBatchCache *cache = psys->batch_cache;
|
||||
if (cache == NULL) {
|
||||
ParticleBatchCache *cache = static_cast<ParticleBatchCache *>(psys->batch_cache);
|
||||
if (cache == nullptr) {
|
||||
return;
|
||||
}
|
||||
switch (mode) {
|
||||
@@ -206,7 +207,7 @@ static void particle_batch_cache_clear_hair(ParticleHairCache *hair_cache)
|
||||
|
||||
static void particle_batch_cache_clear(ParticleSystem *psys)
|
||||
{
|
||||
ParticleBatchCache *cache = psys->batch_cache;
|
||||
ParticleBatchCache *cache = static_cast<ParticleBatchCache *>(psys->batch_cache);
|
||||
if (!cache) {
|
||||
return;
|
||||
}
|
||||
@@ -248,8 +249,8 @@ static void ensure_seg_pt_count(PTCacheEdit *edit,
|
||||
ParticleSystem *psys,
|
||||
ParticleHairCache *hair_cache)
|
||||
{
|
||||
if ((hair_cache->pos != NULL && hair_cache->indices != NULL) ||
|
||||
(hair_cache->proc_point_buf != NULL))
|
||||
if ((hair_cache->pos != nullptr && hair_cache->indices != nullptr) ||
|
||||
(hair_cache->proc_point_buf != nullptr))
|
||||
{
|
||||
return;
|
||||
}
|
||||
@@ -258,7 +259,7 @@ static void ensure_seg_pt_count(PTCacheEdit *edit,
|
||||
hair_cache->elems_len = 0;
|
||||
hair_cache->point_len = 0;
|
||||
|
||||
if (edit != NULL && edit->pathcache != NULL) {
|
||||
if (edit != nullptr && edit->pathcache != nullptr) {
|
||||
count_cache_segment_keys(edit->pathcache, edit->totcached, hair_cache);
|
||||
}
|
||||
else {
|
||||
@@ -288,7 +289,7 @@ static void particle_calculate_parent_uvs(ParticleSystem *psys,
|
||||
const MTFace **mtfaces,
|
||||
float (*r_uv)[2])
|
||||
{
|
||||
if (psmd == NULL) {
|
||||
if (psmd == nullptr) {
|
||||
return;
|
||||
}
|
||||
const int emit_from = psmd->psys->part->from;
|
||||
@@ -303,8 +304,9 @@ static void particle_calculate_parent_uvs(ParticleSystem *psys,
|
||||
}
|
||||
}
|
||||
if (!ELEM(num, DMCACHE_NOTFOUND, DMCACHE_ISCHILD)) {
|
||||
const MFace *mfaces = CustomData_get_layer(&psmd->mesh_final->fdata_legacy, CD_MFACE);
|
||||
if (UNLIKELY(mfaces == NULL)) {
|
||||
const MFace *mfaces = static_cast<const MFace *>(
|
||||
CustomData_get_layer(&psmd->mesh_final->fdata_legacy, CD_MFACE));
|
||||
if (UNLIKELY(mfaces == nullptr)) {
|
||||
BLI_assert_msg(psmd->mesh_final->faces_num == 0,
|
||||
"A mesh with polygons should always have a generated 'CD_MFACE' layer!");
|
||||
return;
|
||||
@@ -323,7 +325,7 @@ static void particle_calculate_parent_mcol(ParticleSystem *psys,
|
||||
const MCol **mcols,
|
||||
MCol *r_mcol)
|
||||
{
|
||||
if (psmd == NULL) {
|
||||
if (psmd == nullptr) {
|
||||
return;
|
||||
}
|
||||
const int emit_from = psmd->psys->part->from;
|
||||
@@ -338,8 +340,9 @@ static void particle_calculate_parent_mcol(ParticleSystem *psys,
|
||||
}
|
||||
}
|
||||
if (!ELEM(num, DMCACHE_NOTFOUND, DMCACHE_ISCHILD)) {
|
||||
const MFace *mfaces = CustomData_get_layer(&psmd->mesh_final->fdata_legacy, CD_MFACE);
|
||||
if (UNLIKELY(mfaces == NULL)) {
|
||||
const MFace *mfaces = static_cast<const MFace *>(
|
||||
CustomData_get_layer(&psmd->mesh_final->fdata_legacy, CD_MFACE));
|
||||
if (UNLIKELY(mfaces == nullptr)) {
|
||||
BLI_assert_msg(psmd->mesh_final->faces_num == 0,
|
||||
"A mesh with polygons should always have a generated 'CD_MFACE' layer!");
|
||||
return;
|
||||
@@ -360,7 +363,7 @@ static void particle_interpolate_children_uvs(ParticleSystem *psys,
|
||||
const MTFace **mtfaces,
|
||||
float (*r_uv)[2])
|
||||
{
|
||||
if (psmd == NULL) {
|
||||
if (psmd == nullptr) {
|
||||
return;
|
||||
}
|
||||
const int emit_from = psmd->psys->part->from;
|
||||
@@ -370,7 +373,8 @@ static void particle_interpolate_children_uvs(ParticleSystem *psys,
|
||||
ChildParticle *particle = &psys->child[child_index];
|
||||
int num = particle->num;
|
||||
if (num != DMCACHE_NOTFOUND) {
|
||||
const MFace *mfaces = CustomData_get_layer(&psmd->mesh_final->fdata_legacy, CD_MFACE);
|
||||
const MFace *mfaces = static_cast<const MFace *>(
|
||||
CustomData_get_layer(&psmd->mesh_final->fdata_legacy, CD_MFACE));
|
||||
const MFace *mface = &mfaces[num];
|
||||
for (int j = 0; j < num_uv_layers; j++) {
|
||||
psys_interpolate_uvs(mtfaces[j] + num, mface->v4, particle->fuv, r_uv[j]);
|
||||
@@ -385,7 +389,7 @@ static void particle_interpolate_children_mcol(ParticleSystem *psys,
|
||||
const MCol **mcols,
|
||||
MCol *r_mcol)
|
||||
{
|
||||
if (psmd == NULL) {
|
||||
if (psmd == nullptr) {
|
||||
return;
|
||||
}
|
||||
const int emit_from = psmd->psys->part->from;
|
||||
@@ -395,7 +399,8 @@ static void particle_interpolate_children_mcol(ParticleSystem *psys,
|
||||
ChildParticle *particle = &psys->child[child_index];
|
||||
int num = particle->num;
|
||||
if (num != DMCACHE_NOTFOUND) {
|
||||
const MFace *mfaces = CustomData_get_layer(&psmd->mesh_final->fdata_legacy, CD_MFACE);
|
||||
const MFace *mfaces = static_cast<const MFace *>(
|
||||
CustomData_get_layer(&psmd->mesh_final->fdata_legacy, CD_MFACE));
|
||||
const MFace *mface = &mfaces[num];
|
||||
for (int j = 0; j < num_col_layers; j++) {
|
||||
/* CustomDataLayer CD_MCOL has 4 structs per face. */
|
||||
@@ -414,19 +419,20 @@ static void particle_calculate_uvs(ParticleSystem *psys,
|
||||
float (**r_parent_uvs)[2],
|
||||
float (**r_uv)[2])
|
||||
{
|
||||
if (psmd == NULL) {
|
||||
if (psmd == nullptr) {
|
||||
return;
|
||||
}
|
||||
if (is_simple) {
|
||||
if (r_parent_uvs[parent_index] != NULL) {
|
||||
if (r_parent_uvs[parent_index] != nullptr) {
|
||||
*r_uv = r_parent_uvs[parent_index];
|
||||
}
|
||||
else {
|
||||
*r_uv = MEM_callocN(sizeof(**r_uv) * num_uv_layers, "Particle UVs");
|
||||
*r_uv = static_cast<float(*)[2]>(
|
||||
MEM_callocN(sizeof(**r_uv) * num_uv_layers, "Particle UVs"));
|
||||
}
|
||||
}
|
||||
else {
|
||||
*r_uv = MEM_callocN(sizeof(**r_uv) * num_uv_layers, "Particle UVs");
|
||||
*r_uv = static_cast<float(*)[2]>(MEM_callocN(sizeof(**r_uv) * num_uv_layers, "Particle UVs"));
|
||||
}
|
||||
if (child_index == -1) {
|
||||
/* Calculate UVs for parent particles. */
|
||||
@@ -457,19 +463,20 @@ static void particle_calculate_mcol(ParticleSystem *psys,
|
||||
MCol **r_parent_mcol,
|
||||
MCol **r_mcol)
|
||||
{
|
||||
if (psmd == NULL) {
|
||||
if (psmd == nullptr) {
|
||||
return;
|
||||
}
|
||||
if (is_simple) {
|
||||
if (r_parent_mcol[parent_index] != NULL) {
|
||||
if (r_parent_mcol[parent_index] != nullptr) {
|
||||
*r_mcol = r_parent_mcol[parent_index];
|
||||
}
|
||||
else {
|
||||
*r_mcol = MEM_callocN(sizeof(**r_mcol) * num_col_layers, "Particle MCol");
|
||||
*r_mcol = static_cast<MCol *>(
|
||||
MEM_callocN(sizeof(**r_mcol) * num_col_layers, "Particle MCol"));
|
||||
}
|
||||
}
|
||||
else {
|
||||
*r_mcol = MEM_callocN(sizeof(**r_mcol) * num_col_layers, "Particle MCol");
|
||||
*r_mcol = static_cast<MCol *>(MEM_callocN(sizeof(**r_mcol) * num_col_layers, "Particle MCol"));
|
||||
}
|
||||
if (child_index == -1) {
|
||||
/* Calculate MCols for parent particles. */
|
||||
@@ -491,10 +498,10 @@ static void particle_calculate_mcol(ParticleSystem *psys,
|
||||
}
|
||||
|
||||
/* Will return last filled index. */
|
||||
typedef enum ParticleSource {
|
||||
enum ParticleSource {
|
||||
PARTICLE_SOURCE_PARENT,
|
||||
PARTICLE_SOURCE_CHILDREN,
|
||||
} ParticleSource;
|
||||
};
|
||||
static int particle_batch_cache_fill_segments(ParticleSystem *psys,
|
||||
ParticleSystemModifierData *psmd,
|
||||
ParticleCacheKey **path_cache,
|
||||
@@ -516,12 +523,14 @@ static int particle_batch_cache_fill_segments(ParticleSystem *psys,
|
||||
{
|
||||
const bool is_simple = (psys->part->childtype == PART_CHILD_PARTICLES);
|
||||
const bool is_child = (particle_source == PARTICLE_SOURCE_CHILDREN);
|
||||
if (is_simple && *r_parent_uvs == NULL) {
|
||||
if (is_simple && *r_parent_uvs == nullptr) {
|
||||
/* TODO(sergey): For edit mode it should be edit->totcached. */
|
||||
*r_parent_uvs = MEM_callocN(sizeof(*r_parent_uvs) * psys->totpart, "Parent particle UVs");
|
||||
*r_parent_uvs = static_cast<float(**)[2]>(
|
||||
MEM_callocN(sizeof(*r_parent_uvs) * psys->totpart, "Parent particle UVs"));
|
||||
}
|
||||
if (is_simple && *r_parent_mcol == NULL) {
|
||||
*r_parent_mcol = MEM_callocN(sizeof(*r_parent_mcol) * psys->totpart, "Parent particle MCol");
|
||||
if (is_simple && *r_parent_mcol == nullptr) {
|
||||
*r_parent_mcol = static_cast<MCol **>(
|
||||
MEM_callocN(sizeof(*r_parent_mcol) * psys->totpart, "Parent particle MCol"));
|
||||
}
|
||||
int curr_point = start_index;
|
||||
for (int i = 0; i < num_path_keys; i++) {
|
||||
@@ -530,8 +539,8 @@ static int particle_batch_cache_fill_segments(ParticleSystem *psys,
|
||||
continue;
|
||||
}
|
||||
float tangent[3];
|
||||
float(*uv)[2] = NULL;
|
||||
MCol *mcol = NULL;
|
||||
float(*uv)[2] = nullptr;
|
||||
MCol *mcol = nullptr;
|
||||
particle_calculate_mcol(psys,
|
||||
psmd,
|
||||
is_simple,
|
||||
@@ -560,7 +569,7 @@ static int particle_batch_cache_fill_segments(ParticleSystem *psys,
|
||||
GPU_vertbuf_attr_set(hair_cache->pos, attr_id->pos, curr_point, path[j].co);
|
||||
GPU_vertbuf_attr_set(hair_cache->pos, attr_id->tan, curr_point, tangent);
|
||||
GPU_vertbuf_attr_set(hair_cache->pos, attr_id->ind, curr_point, &i);
|
||||
if (psmd != NULL) {
|
||||
if (psmd != nullptr) {
|
||||
for (int k = 0; k < num_uv_layers; k++) {
|
||||
GPU_vertbuf_attr_set(
|
||||
hair_cache->pos,
|
||||
@@ -587,7 +596,7 @@ static int particle_batch_cache_fill_segments(ParticleSystem *psys,
|
||||
GPU_vertbuf_attr_set(hair_cache->pos, attr_id->tan, curr_point, tangent);
|
||||
GPU_vertbuf_attr_set(hair_cache->pos, attr_id->ind, curr_point, &global_index);
|
||||
|
||||
if (psmd != NULL) {
|
||||
if (psmd != nullptr) {
|
||||
for (int k = 0; k < num_uv_layers; k++) {
|
||||
GPU_vertbuf_attr_set(hair_cache->pos,
|
||||
uv_id[k],
|
||||
@@ -627,7 +636,7 @@ static void particle_batch_cache_fill_segments_proc_pos(ParticleCacheKey **path_
|
||||
continue;
|
||||
}
|
||||
float total_len = 0.0f;
|
||||
float *co_prev = NULL, *seg_data_first;
|
||||
float *co_prev = nullptr, *seg_data_first;
|
||||
for (int j = 0; j <= path->segments; j++) {
|
||||
float *seg_data = (float *)GPU_vertbuf_raw_step(attr_step);
|
||||
copy_v3_v3(seg_data, path[j].co);
|
||||
@@ -669,8 +678,8 @@ static float particle_key_weight(const ParticleData *particle, int strand, float
|
||||
}
|
||||
|
||||
static int particle_batch_cache_fill_segments_edit(
|
||||
const PTCacheEdit *UNUSED(edit), /* NULL for weight data */
|
||||
const ParticleData *particle, /* NULL for select data */
|
||||
const PTCacheEdit * /*edit*/, /* nullptr for weight data */
|
||||
const ParticleData *particle, /* nullptr for select data */
|
||||
ParticleCacheKey **path_cache,
|
||||
const int start_index,
|
||||
const int num_path_keys,
|
||||
@@ -744,12 +753,14 @@ static int particle_batch_cache_fill_strands_data(ParticleSystem *psys,
|
||||
{
|
||||
const bool is_simple = (psys->part->childtype == PART_CHILD_PARTICLES);
|
||||
const bool is_child = (particle_source == PARTICLE_SOURCE_CHILDREN);
|
||||
if (is_simple && *r_parent_uvs == NULL) {
|
||||
if (is_simple && *r_parent_uvs == nullptr) {
|
||||
/* TODO(sergey): For edit mode it should be edit->totcached. */
|
||||
*r_parent_uvs = MEM_callocN(sizeof(*r_parent_uvs) * psys->totpart, "Parent particle UVs");
|
||||
*r_parent_uvs = static_cast<float(**)[2]>(
|
||||
MEM_callocN(sizeof(*r_parent_uvs) * psys->totpart, "Parent particle UVs"));
|
||||
}
|
||||
if (is_simple && *r_parent_mcol == NULL) {
|
||||
*r_parent_mcol = MEM_callocN(sizeof(*r_parent_mcol) * psys->totpart, "Parent particle MCol");
|
||||
if (is_simple && *r_parent_mcol == nullptr) {
|
||||
*r_parent_mcol = static_cast<MCol **>(
|
||||
MEM_callocN(sizeof(*r_parent_mcol) * psys->totpart, "Parent particle MCol"));
|
||||
}
|
||||
int curr_point = start_index;
|
||||
for (int i = 0; i < num_path_keys; i++) {
|
||||
@@ -762,9 +773,9 @@ static int particle_batch_cache_fill_strands_data(ParticleSystem *psys,
|
||||
*(ushort *)GPU_vertbuf_raw_step(seg_step) = path->segments;
|
||||
curr_point += path->segments + 1;
|
||||
|
||||
if (psmd != NULL) {
|
||||
float(*uv)[2] = NULL;
|
||||
MCol *mcol = NULL;
|
||||
if (psmd != nullptr) {
|
||||
float(*uv)[2] = nullptr;
|
||||
MCol *mcol = nullptr;
|
||||
|
||||
particle_calculate_uvs(psys,
|
||||
psmd,
|
||||
@@ -835,7 +846,7 @@ static void particle_batch_cache_ensure_procedural_strand_data(PTCacheEdit *edit
|
||||
|
||||
ParticleSystemModifierData *psmd = (ParticleSystemModifierData *)md;
|
||||
|
||||
if (psmd != NULL && psmd->mesh_final != NULL) {
|
||||
if (psmd != nullptr && psmd->mesh_final != nullptr) {
|
||||
if (CustomData_has_layer(&psmd->mesh_final->loop_data, CD_PROP_FLOAT2)) {
|
||||
cache->num_uv_layers = CustomData_number_of_layers(&psmd->mesh_final->loop_data,
|
||||
CD_PROP_FLOAT2);
|
||||
@@ -845,12 +856,12 @@ static void particle_batch_cache_ensure_procedural_strand_data(PTCacheEdit *edit
|
||||
if (CustomData_has_layer(&psmd->mesh_final->loop_data, CD_PROP_BYTE_COLOR)) {
|
||||
cache->num_col_layers = CustomData_number_of_layers(&psmd->mesh_final->loop_data,
|
||||
CD_PROP_BYTE_COLOR);
|
||||
if (psmd->mesh_final->active_color_attribute != NULL) {
|
||||
if (psmd->mesh_final->active_color_attribute != nullptr) {
|
||||
active_col = CustomData_get_named_layer(&psmd->mesh_final->loop_data,
|
||||
CD_PROP_BYTE_COLOR,
|
||||
psmd->mesh_final->active_color_attribute);
|
||||
}
|
||||
if (psmd->mesh_final->default_color_attribute != NULL) {
|
||||
if (psmd->mesh_final->default_color_attribute != nullptr) {
|
||||
render_col = CustomData_get_named_layer(&psmd->mesh_final->loop_data,
|
||||
CD_PROP_BYTE_COLOR,
|
||||
psmd->mesh_final->default_color_attribute);
|
||||
@@ -862,10 +873,10 @@ static void particle_batch_cache_ensure_procedural_strand_data(PTCacheEdit *edit
|
||||
GPUVertBufRaw uv_step[MAX_MTFACE];
|
||||
GPUVertBufRaw *col_step = BLI_array_alloca(col_step, cache->num_col_layers);
|
||||
|
||||
const MTFace *mtfaces[MAX_MTFACE] = {NULL};
|
||||
const MTFace *mtfaces[MAX_MTFACE] = {nullptr};
|
||||
const MCol **mcols = BLI_array_alloca(mcols, cache->num_col_layers);
|
||||
float(**parent_uvs)[2] = NULL;
|
||||
MCol **parent_mcol = NULL;
|
||||
float(**parent_uvs)[2] = nullptr;
|
||||
MCol **parent_mcol = nullptr;
|
||||
|
||||
GPUVertFormat format_data = {0};
|
||||
uint data_id = GPU_vertformat_attr_add(&format_data, "data", GPU_COMP_U32, 1, GPU_FETCH_INT);
|
||||
@@ -922,10 +933,12 @@ static void particle_batch_cache_ensure_procedural_strand_data(PTCacheEdit *edit
|
||||
MEM_SAFE_FREE(cache->col_tex);
|
||||
MEM_SAFE_FREE(cache->col_layer_names);
|
||||
|
||||
cache->proc_col_buf = MEM_calloc_arrayN(cache->num_col_layers, sizeof(void *), "proc_col_buf");
|
||||
cache->col_tex = MEM_calloc_arrayN(cache->num_col_layers, sizeof(void *), "col_tex");
|
||||
cache->col_layer_names = MEM_calloc_arrayN(
|
||||
cache->num_col_layers, sizeof(*cache->col_layer_names), "col_layer_names");
|
||||
cache->proc_col_buf = static_cast<GPUVertBuf **>(
|
||||
MEM_calloc_arrayN(cache->num_col_layers, sizeof(void *), "proc_col_buf"));
|
||||
cache->col_tex = static_cast<GPUTexture **>(
|
||||
MEM_calloc_arrayN(cache->num_col_layers, sizeof(void *), "col_tex"));
|
||||
cache->col_layer_names = static_cast<char(*)[4][14]>(MEM_calloc_arrayN(
|
||||
cache->num_col_layers, sizeof(*cache->col_layer_names), "col_layer_names"));
|
||||
|
||||
/* Vertex colors */
|
||||
for (int i = 0; i < cache->num_col_layers; i++) {
|
||||
@@ -969,7 +982,7 @@ static void particle_batch_cache_ensure_procedural_strand_data(PTCacheEdit *edit
|
||||
}
|
||||
}
|
||||
|
||||
if (edit != NULL && edit->pathcache != NULL) {
|
||||
if (edit != nullptr && edit->pathcache != nullptr) {
|
||||
particle_batch_cache_fill_strands_data(psys,
|
||||
psmd,
|
||||
edit->pathcache,
|
||||
@@ -989,8 +1002,8 @@ static void particle_batch_cache_ensure_procedural_strand_data(PTCacheEdit *edit
|
||||
}
|
||||
else {
|
||||
int curr_point = 0;
|
||||
if ((psys->pathcache != NULL) && (!psys->childcache || (psys->part->draw & PART_DRAW_PARENT)))
|
||||
{
|
||||
if ((psys->pathcache != nullptr) &&
|
||||
(!psys->childcache || (psys->part->draw & PART_DRAW_PARENT))) {
|
||||
curr_point = particle_batch_cache_fill_strands_data(psys,
|
||||
psmd,
|
||||
psys->pathcache,
|
||||
@@ -1029,14 +1042,14 @@ static void particle_batch_cache_ensure_procedural_strand_data(PTCacheEdit *edit
|
||||
}
|
||||
}
|
||||
/* Cleanup. */
|
||||
if (parent_uvs != NULL) {
|
||||
if (parent_uvs != nullptr) {
|
||||
/* TODO(sergey): For edit mode it should be edit->totcached. */
|
||||
for (int i = 0; i < psys->totpart; i++) {
|
||||
MEM_SAFE_FREE(parent_uvs[i]);
|
||||
}
|
||||
MEM_freeN(parent_uvs);
|
||||
}
|
||||
if (parent_mcol != NULL) {
|
||||
if (parent_mcol != nullptr) {
|
||||
for (int i = 0; i < psys->totpart; i++) {
|
||||
MEM_SAFE_FREE(parent_mcol[i]);
|
||||
}
|
||||
@@ -1061,7 +1074,7 @@ static void particle_batch_cache_ensure_procedural_indices(PTCacheEdit *edit,
|
||||
{
|
||||
BLI_assert(thickness_res <= MAX_THICKRES); /* Cylinder strip not currently supported. */
|
||||
|
||||
if (cache->final[subdiv].proc_hairs[thickness_res - 1] != NULL) {
|
||||
if (cache->final[subdiv].proc_hairs[thickness_res - 1] != nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1083,14 +1096,14 @@ static void particle_batch_cache_ensure_procedural_indices(PTCacheEdit *edit,
|
||||
GPUIndexBufBuilder elb;
|
||||
GPU_indexbuf_init_ex(&elb, prim_type, element_count, element_count);
|
||||
|
||||
if (edit != NULL && edit->pathcache != NULL) {
|
||||
if (edit != nullptr && edit->pathcache != nullptr) {
|
||||
particle_batch_cache_fill_segments_indices(
|
||||
edit->pathcache, 0, edit->totcached, verts_per_hair, &elb);
|
||||
}
|
||||
else {
|
||||
int curr_point = 0;
|
||||
if ((psys->pathcache != NULL) && (!psys->childcache || (psys->part->draw & PART_DRAW_PARENT)))
|
||||
{
|
||||
if ((psys->pathcache != nullptr) &&
|
||||
(!psys->childcache || (psys->part->draw & PART_DRAW_PARENT))) {
|
||||
curr_point = particle_batch_cache_fill_segments_indices(
|
||||
psys->pathcache, 0, psys->totpart, verts_per_hair, &elb);
|
||||
}
|
||||
@@ -1108,9 +1121,9 @@ static void particle_batch_cache_ensure_procedural_indices(PTCacheEdit *edit,
|
||||
static void particle_batch_cache_ensure_procedural_pos(PTCacheEdit *edit,
|
||||
ParticleSystem *psys,
|
||||
ParticleHairCache *cache,
|
||||
GPUMaterial *UNUSED(gpu_material))
|
||||
GPUMaterial * /*gpu_material*/)
|
||||
{
|
||||
if (cache->proc_point_buf == NULL) {
|
||||
if (cache->proc_point_buf == nullptr) {
|
||||
/* initialize vertex format */
|
||||
GPUVertFormat pos_format = {0};
|
||||
uint pos_id = GPU_vertformat_attr_add(
|
||||
@@ -1134,12 +1147,12 @@ static void particle_batch_cache_ensure_procedural_pos(PTCacheEdit *edit,
|
||||
GPUVertBufRaw length_step;
|
||||
GPU_vertbuf_attr_get_raw_data(cache->proc_length_buf, length_id, &length_step);
|
||||
|
||||
if (edit != NULL && edit->pathcache != NULL) {
|
||||
if (edit != nullptr && edit->pathcache != nullptr) {
|
||||
particle_batch_cache_fill_segments_proc_pos(
|
||||
edit->pathcache, edit->totcached, &pos_step, &length_step);
|
||||
}
|
||||
else {
|
||||
if ((psys->pathcache != NULL) &&
|
||||
if ((psys->pathcache != nullptr) &&
|
||||
(!psys->childcache || (psys->part->draw & PART_DRAW_PARENT))) {
|
||||
particle_batch_cache_fill_segments_proc_pos(
|
||||
psys->pathcache, psys->totpart, &pos_step, &length_step);
|
||||
@@ -1158,7 +1171,7 @@ static void particle_batch_cache_ensure_pos_and_seg(PTCacheEdit *edit,
|
||||
ModifierData *md,
|
||||
ParticleHairCache *hair_cache)
|
||||
{
|
||||
if (hair_cache->pos != NULL && hair_cache->indices != NULL) {
|
||||
if (hair_cache->pos != nullptr && hair_cache->indices != nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1170,18 +1183,18 @@ static void particle_batch_cache_ensure_pos_and_seg(PTCacheEdit *edit,
|
||||
|
||||
static GPUVertFormat format = {0};
|
||||
HairAttributeID attr_id;
|
||||
uint *uv_id = NULL;
|
||||
uint *col_id = NULL;
|
||||
uint *uv_id = nullptr;
|
||||
uint *col_id = nullptr;
|
||||
int num_uv_layers = 0;
|
||||
int num_col_layers = 0;
|
||||
int active_uv = 0;
|
||||
int active_col = 0;
|
||||
const MTFace **mtfaces = NULL;
|
||||
const MCol **mcols = NULL;
|
||||
float(**parent_uvs)[2] = NULL;
|
||||
MCol **parent_mcol = NULL;
|
||||
const MTFace **mtfaces = nullptr;
|
||||
const MCol **mcols = nullptr;
|
||||
float(**parent_uvs)[2] = nullptr;
|
||||
MCol **parent_mcol = nullptr;
|
||||
|
||||
if (psmd != NULL) {
|
||||
if (psmd != nullptr) {
|
||||
if (CustomData_has_layer(&psmd->mesh_final->loop_data, CD_PROP_FLOAT2)) {
|
||||
num_uv_layers = CustomData_number_of_layers(&psmd->mesh_final->loop_data, CD_PROP_FLOAT2);
|
||||
active_uv = CustomData_get_active_layer(&psmd->mesh_final->loop_data, CD_PROP_FLOAT2);
|
||||
@@ -1189,7 +1202,7 @@ static void particle_batch_cache_ensure_pos_and_seg(PTCacheEdit *edit,
|
||||
if (CustomData_has_layer(&psmd->mesh_final->loop_data, CD_PROP_BYTE_COLOR)) {
|
||||
num_col_layers = CustomData_number_of_layers(&psmd->mesh_final->loop_data,
|
||||
CD_PROP_BYTE_COLOR);
|
||||
if (psmd->mesh_final->active_color_attribute != NULL) {
|
||||
if (psmd->mesh_final->active_color_attribute != nullptr) {
|
||||
active_col = CustomData_get_named_layer(&psmd->mesh_final->loop_data,
|
||||
CD_PROP_BYTE_COLOR,
|
||||
psmd->mesh_final->active_color_attribute);
|
||||
@@ -1205,8 +1218,8 @@ static void particle_batch_cache_ensure_pos_and_seg(PTCacheEdit *edit,
|
||||
attr_id.ind = GPU_vertformat_attr_add(&format, "ind", GPU_COMP_I32, 1, GPU_FETCH_INT);
|
||||
|
||||
if (psmd) {
|
||||
uv_id = MEM_mallocN(sizeof(*uv_id) * num_uv_layers, "UV attr format");
|
||||
col_id = MEM_mallocN(sizeof(*col_id) * num_col_layers, "Col attr format");
|
||||
uv_id = static_cast<uint *>(MEM_mallocN(sizeof(*uv_id) * num_uv_layers, "UV attr format"));
|
||||
col_id = static_cast<uint *>(MEM_mallocN(sizeof(*col_id) * num_col_layers, "Col attr format"));
|
||||
|
||||
for (int i = 0; i < num_uv_layers; i++) {
|
||||
|
||||
@@ -1247,14 +1260,16 @@ static void particle_batch_cache_ensure_pos_and_seg(PTCacheEdit *edit,
|
||||
if (num_uv_layers || num_col_layers) {
|
||||
BKE_mesh_tessface_ensure(psmd->mesh_final);
|
||||
if (num_uv_layers) {
|
||||
mtfaces = MEM_mallocN(sizeof(*mtfaces) * num_uv_layers, "Faces UV layers");
|
||||
mtfaces = static_cast<const MTFace **>(
|
||||
MEM_mallocN(sizeof(*mtfaces) * num_uv_layers, "Faces UV layers"));
|
||||
for (int i = 0; i < num_uv_layers; i++) {
|
||||
mtfaces[i] = (const MTFace *)CustomData_get_layer_n(
|
||||
&psmd->mesh_final->fdata_legacy, CD_MTFACE, i);
|
||||
}
|
||||
}
|
||||
if (num_col_layers) {
|
||||
mcols = MEM_mallocN(sizeof(*mcols) * num_col_layers, "Color layers");
|
||||
mcols = static_cast<const MCol **>(
|
||||
MEM_mallocN(sizeof(*mcols) * num_col_layers, "Color layers"));
|
||||
for (int i = 0; i < num_col_layers; i++) {
|
||||
mcols[i] = (const MCol *)CustomData_get_layer_n(
|
||||
&psmd->mesh_final->fdata_legacy, CD_MCOL, i);
|
||||
@@ -1262,7 +1277,7 @@ static void particle_batch_cache_ensure_pos_and_seg(PTCacheEdit *edit,
|
||||
}
|
||||
}
|
||||
|
||||
if (edit != NULL && edit->pathcache != NULL) {
|
||||
if (edit != nullptr && edit->pathcache != nullptr) {
|
||||
curr_point = particle_batch_cache_fill_segments(psys,
|
||||
psmd,
|
||||
edit->pathcache,
|
||||
@@ -1283,8 +1298,8 @@ static void particle_batch_cache_ensure_pos_and_seg(PTCacheEdit *edit,
|
||||
hair_cache);
|
||||
}
|
||||
else {
|
||||
if ((psys->pathcache != NULL) && (!psys->childcache || (psys->part->draw & PART_DRAW_PARENT)))
|
||||
{
|
||||
if ((psys->pathcache != nullptr) &&
|
||||
(!psys->childcache || (psys->part->draw & PART_DRAW_PARENT))) {
|
||||
curr_point = particle_batch_cache_fill_segments(psys,
|
||||
psmd,
|
||||
psys->pathcache,
|
||||
@@ -1304,7 +1319,7 @@ static void particle_batch_cache_ensure_pos_and_seg(PTCacheEdit *edit,
|
||||
&attr_id,
|
||||
hair_cache);
|
||||
}
|
||||
if (psys->childcache != NULL) {
|
||||
if (psys->childcache != nullptr) {
|
||||
const int child_count = psys->totchild * psys->part->disp / 100;
|
||||
curr_point = particle_batch_cache_fill_segments(psys,
|
||||
psmd,
|
||||
@@ -1327,14 +1342,14 @@ static void particle_batch_cache_ensure_pos_and_seg(PTCacheEdit *edit,
|
||||
}
|
||||
}
|
||||
/* Cleanup. */
|
||||
if (parent_uvs != NULL) {
|
||||
if (parent_uvs != nullptr) {
|
||||
/* TODO(sergey): For edit mode it should be edit->totcached. */
|
||||
for (int i = 0; i < psys->totpart; i++) {
|
||||
MEM_SAFE_FREE(parent_uvs[i]);
|
||||
}
|
||||
MEM_freeN(parent_uvs);
|
||||
}
|
||||
if (parent_mcol != NULL) {
|
||||
if (parent_mcol != nullptr) {
|
||||
for (int i = 0; i < psys->totpart; i++) {
|
||||
MEM_SAFE_FREE(parent_mcol[i]);
|
||||
}
|
||||
@@ -1346,7 +1361,7 @@ static void particle_batch_cache_ensure_pos_and_seg(PTCacheEdit *edit,
|
||||
if (num_col_layers) {
|
||||
MEM_freeN((void *)mcols);
|
||||
}
|
||||
if (psmd != NULL) {
|
||||
if (psmd != nullptr) {
|
||||
MEM_freeN(uv_id);
|
||||
}
|
||||
hair_cache->indices = GPU_indexbuf_build(&elb);
|
||||
@@ -1356,7 +1371,7 @@ static void particle_batch_cache_ensure_pos(Object *object,
|
||||
ParticleSystem *psys,
|
||||
ParticlePointCache *point_cache)
|
||||
{
|
||||
if (point_cache->pos != NULL) {
|
||||
if (point_cache->pos != nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1365,7 +1380,7 @@ static void particle_batch_cache_ensure_pos(Object *object,
|
||||
int i, curr_point;
|
||||
ParticleData *pa;
|
||||
ParticleKey state;
|
||||
ParticleSimulationData sim = {NULL};
|
||||
ParticleSimulationData sim = {nullptr};
|
||||
const DRWContextState *draw_ctx = DRW_context_state_get();
|
||||
|
||||
sim.depsgraph = draw_ctx->depsgraph;
|
||||
@@ -1427,7 +1442,7 @@ static void drw_particle_update_ptcache_edit(Object *object_eval,
|
||||
ParticleSystem *psys,
|
||||
PTCacheEdit *edit)
|
||||
{
|
||||
if (edit->psys == NULL) {
|
||||
if (edit->psys == nullptr) {
|
||||
return;
|
||||
}
|
||||
/* NOTE: Get flag from particle system coming from drawing object.
|
||||
@@ -1440,7 +1455,7 @@ static void drw_particle_update_ptcache_edit(Object *object_eval,
|
||||
PE_update_object(draw_ctx->depsgraph, scene_orig, object_orig, 0);
|
||||
psys->flag &= ~PSYS_HAIR_UPDATED;
|
||||
}
|
||||
if (edit->pathcache == NULL) {
|
||||
if (edit->pathcache == nullptr) {
|
||||
Depsgraph *depsgraph = draw_ctx->depsgraph;
|
||||
psys_cache_edit_paths(depsgraph,
|
||||
scene_orig,
|
||||
@@ -1460,17 +1475,17 @@ static void drw_particle_update_ptcache(Object *object_eval, ParticleSystem *psy
|
||||
Scene *scene_orig = (Scene *)DEG_get_original_id(&draw_ctx->scene->id);
|
||||
Object *object_orig = DEG_get_original_object(object_eval);
|
||||
PTCacheEdit *edit = PE_create_current(draw_ctx->depsgraph, scene_orig, object_orig);
|
||||
if (edit != NULL) {
|
||||
if (edit != nullptr) {
|
||||
drw_particle_update_ptcache_edit(object_eval, psys, edit);
|
||||
}
|
||||
}
|
||||
|
||||
typedef struct ParticleDrawSource {
|
||||
struct ParticleDrawSource {
|
||||
Object *object;
|
||||
ParticleSystem *psys;
|
||||
ModifierData *md;
|
||||
PTCacheEdit *edit;
|
||||
} ParticleDrawSource;
|
||||
};
|
||||
|
||||
static void drw_particle_get_hair_source(Object *object,
|
||||
ParticleSystem *psys,
|
||||
@@ -1494,10 +1509,10 @@ GPUBatch *DRW_particles_batch_cache_get_hair(Object *object,
|
||||
ModifierData *md)
|
||||
{
|
||||
ParticleBatchCache *cache = particle_batch_cache_get(psys);
|
||||
if (cache->hair.hairs == NULL) {
|
||||
if (cache->hair.hairs == nullptr) {
|
||||
drw_particle_update_ptcache(object, psys);
|
||||
ParticleDrawSource source;
|
||||
drw_particle_get_hair_source(object, psys, md, NULL, &source);
|
||||
drw_particle_get_hair_source(object, psys, md, nullptr, &source);
|
||||
ensure_seg_pt_count(source.edit, source.psys, &cache->hair);
|
||||
particle_batch_cache_ensure_pos_and_seg(source.edit, source.psys, source.md, &cache->hair);
|
||||
cache->hair.hairs = GPU_batch_create(
|
||||
@@ -1510,9 +1525,9 @@ GPUBatch *DRW_particles_batch_cache_get_dots(Object *object, ParticleSystem *psy
|
||||
{
|
||||
ParticleBatchCache *cache = particle_batch_cache_get(psys);
|
||||
|
||||
if (cache->point.points == NULL) {
|
||||
if (cache->point.points == nullptr) {
|
||||
particle_batch_cache_ensure_pos(object, psys, &cache->point);
|
||||
cache->point.points = GPU_batch_create(GPU_PRIM_POINTS, cache->point.pos, NULL);
|
||||
cache->point.points = GPU_batch_create(GPU_PRIM_POINTS, cache->point.pos, nullptr);
|
||||
}
|
||||
|
||||
return cache->point.points;
|
||||
@@ -1520,15 +1535,15 @@ GPUBatch *DRW_particles_batch_cache_get_dots(Object *object, ParticleSystem *psy
|
||||
|
||||
static void particle_batch_cache_ensure_edit_pos_and_seg(PTCacheEdit *edit,
|
||||
ParticleSystem *psys,
|
||||
ModifierData *UNUSED(md),
|
||||
ModifierData * /*md*/,
|
||||
ParticleHairCache *hair_cache,
|
||||
bool use_weight)
|
||||
{
|
||||
if (hair_cache->pos != NULL && hair_cache->indices != NULL) {
|
||||
if (hair_cache->pos != nullptr && hair_cache->indices != nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
ParticleData *particle = (use_weight) ? psys->particles : NULL;
|
||||
ParticleData *particle = (use_weight) ? psys->particles : nullptr;
|
||||
|
||||
GPU_VERTBUF_DISCARD_SAFE(hair_cache->pos);
|
||||
GPU_INDEXBUF_DISCARD_SAFE(hair_cache->indices);
|
||||
@@ -1544,7 +1559,7 @@ static void particle_batch_cache_ensure_edit_pos_and_seg(PTCacheEdit *edit,
|
||||
|
||||
GPU_indexbuf_init_ex(&elb, GPU_PRIM_LINE_STRIP, hair_cache->elems_len, hair_cache->point_len);
|
||||
|
||||
if (edit != NULL && edit->pathcache != NULL) {
|
||||
if (edit != nullptr && edit->pathcache != nullptr) {
|
||||
particle_batch_cache_fill_segments_edit(
|
||||
edit, particle, edit->pathcache, 0, edit->totcached, &elb, &data_step);
|
||||
}
|
||||
@@ -1564,12 +1579,12 @@ GPUBatch *DRW_particles_batch_cache_get_edit_strands(Object *object,
|
||||
GPU_VERTBUF_DISCARD_SAFE(cache->edit_hair.pos);
|
||||
GPU_BATCH_DISCARD_SAFE(cache->edit_hair.hairs);
|
||||
}
|
||||
if (cache->edit_hair.hairs != NULL) {
|
||||
if (cache->edit_hair.hairs != nullptr) {
|
||||
return cache->edit_hair.hairs;
|
||||
}
|
||||
drw_particle_update_ptcache_edit(object, psys, edit);
|
||||
ensure_seg_pt_count(edit, psys, &cache->edit_hair);
|
||||
particle_batch_cache_ensure_edit_pos_and_seg(edit, psys, NULL, &cache->edit_hair, use_weight);
|
||||
particle_batch_cache_ensure_edit_pos_and_seg(edit, psys, nullptr, &cache->edit_hair, use_weight);
|
||||
cache->edit_hair.hairs = GPU_batch_create(
|
||||
GPU_PRIM_LINE_STRIP, cache->edit_hair.pos, cache->edit_hair.indices);
|
||||
cache->edit_is_weight = use_weight;
|
||||
@@ -1578,7 +1593,7 @@ GPUBatch *DRW_particles_batch_cache_get_edit_strands(Object *object,
|
||||
|
||||
static void ensure_edit_inner_points_count(const PTCacheEdit *edit, ParticleBatchCache *cache)
|
||||
{
|
||||
if (cache->edit_inner_pos != NULL) {
|
||||
if (cache->edit_inner_pos != nullptr) {
|
||||
return;
|
||||
}
|
||||
cache->edit_inner_point_len = 0;
|
||||
@@ -1595,7 +1610,7 @@ static void ensure_edit_inner_points_count(const PTCacheEdit *edit, ParticleBatc
|
||||
static void particle_batch_cache_ensure_edit_inner_pos(PTCacheEdit *edit,
|
||||
ParticleBatchCache *cache)
|
||||
{
|
||||
if (cache->edit_inner_pos != NULL) {
|
||||
if (cache->edit_inner_pos != nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1626,19 +1641,19 @@ GPUBatch *DRW_particles_batch_cache_get_edit_inner_points(Object *object,
|
||||
PTCacheEdit *edit)
|
||||
{
|
||||
ParticleBatchCache *cache = particle_batch_cache_get(psys);
|
||||
if (cache->edit_inner_points != NULL) {
|
||||
if (cache->edit_inner_points != nullptr) {
|
||||
return cache->edit_inner_points;
|
||||
}
|
||||
drw_particle_update_ptcache_edit(object, psys, edit);
|
||||
ensure_edit_inner_points_count(edit, cache);
|
||||
particle_batch_cache_ensure_edit_inner_pos(edit, cache);
|
||||
cache->edit_inner_points = GPU_batch_create(GPU_PRIM_POINTS, cache->edit_inner_pos, NULL);
|
||||
cache->edit_inner_points = GPU_batch_create(GPU_PRIM_POINTS, cache->edit_inner_pos, nullptr);
|
||||
return cache->edit_inner_points;
|
||||
}
|
||||
|
||||
static void ensure_edit_tip_points_count(const PTCacheEdit *edit, ParticleBatchCache *cache)
|
||||
{
|
||||
if (cache->edit_tip_pos != NULL) {
|
||||
if (cache->edit_tip_pos != nullptr) {
|
||||
return;
|
||||
}
|
||||
cache->edit_tip_point_len = 0;
|
||||
@@ -1653,7 +1668,7 @@ static void ensure_edit_tip_points_count(const PTCacheEdit *edit, ParticleBatchC
|
||||
|
||||
static void particle_batch_cache_ensure_edit_tip_pos(PTCacheEdit *edit, ParticleBatchCache *cache)
|
||||
{
|
||||
if (cache->edit_tip_pos != NULL) {
|
||||
if (cache->edit_tip_pos != nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1683,13 +1698,13 @@ GPUBatch *DRW_particles_batch_cache_get_edit_tip_points(Object *object,
|
||||
PTCacheEdit *edit)
|
||||
{
|
||||
ParticleBatchCache *cache = particle_batch_cache_get(psys);
|
||||
if (cache->edit_tip_points != NULL) {
|
||||
if (cache->edit_tip_points != nullptr) {
|
||||
return cache->edit_tip_points;
|
||||
}
|
||||
drw_particle_update_ptcache_edit(object, psys, edit);
|
||||
ensure_edit_tip_points_count(edit, cache);
|
||||
particle_batch_cache_ensure_edit_tip_pos(edit, cache);
|
||||
cache->edit_tip_points = GPU_batch_create(GPU_PRIM_POINTS, cache->edit_tip_pos, NULL);
|
||||
cache->edit_tip_points = GPU_batch_create(GPU_PRIM_POINTS, cache->edit_tip_pos, nullptr);
|
||||
return cache->edit_tip_points;
|
||||
}
|
||||
|
||||
@@ -1706,7 +1721,7 @@ bool particles_ensure_procedural_data(Object *object,
|
||||
drw_particle_update_ptcache(object, psys);
|
||||
|
||||
ParticleDrawSource source;
|
||||
drw_particle_get_hair_source(object, psys, md, NULL, &source);
|
||||
drw_particle_get_hair_source(object, psys, md, nullptr, &source);
|
||||
|
||||
ParticleSettings *part = source.psys->part;
|
||||
ParticleBatchCache *cache = particle_batch_cache_get(source.psys);
|
||||
@@ -1715,8 +1730,8 @@ bool particles_ensure_procedural_data(Object *object,
|
||||
(*r_hair_cache)->final[subdiv].strands_res = 1 << (part->draw_step + subdiv);
|
||||
|
||||
/* Refreshed on combing and simulation. */
|
||||
if ((*r_hair_cache)->proc_point_buf == NULL ||
|
||||
(gpu_material && (*r_hair_cache)->proc_length_buf == NULL))
|
||||
if ((*r_hair_cache)->proc_point_buf == nullptr ||
|
||||
(gpu_material && (*r_hair_cache)->proc_length_buf == nullptr))
|
||||
{
|
||||
ensure_seg_pt_count(source.edit, source.psys, &cache->hair);
|
||||
particle_batch_cache_ensure_procedural_pos(
|
||||
@@ -1725,17 +1740,17 @@ bool particles_ensure_procedural_data(Object *object,
|
||||
}
|
||||
|
||||
/* Refreshed if active layer or custom data changes. */
|
||||
if ((*r_hair_cache)->proc_strand_buf == NULL) {
|
||||
if ((*r_hair_cache)->proc_strand_buf == nullptr) {
|
||||
particle_batch_cache_ensure_procedural_strand_data(
|
||||
source.edit, source.psys, source.md, &cache->hair);
|
||||
}
|
||||
|
||||
/* Refreshed only on subdiv count change. */
|
||||
if ((*r_hair_cache)->final[subdiv].proc_buf == NULL) {
|
||||
if ((*r_hair_cache)->final[subdiv].proc_buf == nullptr) {
|
||||
particle_batch_cache_ensure_procedural_final_points(&cache->hair, subdiv);
|
||||
need_ft_update = true;
|
||||
}
|
||||
if ((*r_hair_cache)->final[subdiv].proc_hairs[thickness_res - 1] == NULL) {
|
||||
if ((*r_hair_cache)->final[subdiv].proc_hairs[thickness_res - 1] == nullptr) {
|
||||
particle_batch_cache_ensure_procedural_indices(
|
||||
source.edit, source.psys, &cache->hair, thickness_res, subdiv);
|
||||
}
|
||||
@@ -11,6 +11,10 @@
|
||||
#include "GPU_batch.h"
|
||||
#include "MEM_guardedalloc.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Common */
|
||||
// #define DRW_DEBUG_MESH_CACHE_REQUEST
|
||||
|
||||
@@ -80,3 +84,7 @@ BLI_INLINE bool DRW_vbo_requested(GPUVertBuf *vbo)
|
||||
{
|
||||
return (vbo != NULL && (GPU_vertbuf_get_status(vbo) & GPU_VERTBUF_INIT) == 0);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
/**
|
||||
* Colors & Constant.
|
||||
*/
|
||||
struct DRW_Global G_draw = {{{0}}};
|
||||
DRW_Global G_draw{};
|
||||
|
||||
static bool weight_ramp_custom = false;
|
||||
static ColorBand weight_ramp_copy;
|
||||
@@ -88,7 +88,7 @@ void DRW_globals_update(void)
|
||||
interp_v4_v4v4(gb->color_edit_mesh_middle, gb->color_vertex_select, gb->color_wire_edit, 0.35f);
|
||||
copy_v3_fl(gb->color_edit_mesh_middle,
|
||||
dot_v3v3(gb->color_edit_mesh_middle,
|
||||
(float[3]){0.3333f, 0.3333f, 0.3333f})); /* Desaturate */
|
||||
blender::float3{0.3333f, 0.3333f, 0.3333f})); /* Desaturate */
|
||||
|
||||
#ifdef WITH_FREESTYLE
|
||||
UI_GetThemeColor4fv(TH_FREESTYLE_EDGE_MARK, gb->color_edge_freestyle);
|
||||
@@ -196,7 +196,7 @@ void DRW_globals_update(void)
|
||||
} while (color <= gb->UBO_LAST_COLOR);
|
||||
}
|
||||
|
||||
if (G_draw.block_ubo == NULL) {
|
||||
if (G_draw.block_ubo == nullptr) {
|
||||
G_draw.block_ubo = GPU_uniformbuf_create_ex(
|
||||
sizeof(GlobalsUboStorage), gb, "GlobalsUboStorage");
|
||||
}
|
||||
@@ -236,7 +236,7 @@ void DRW_globals_update(void)
|
||||
DRW_TEXTURE_FREE_SAFE(G_draw.weight_ramp);
|
||||
}
|
||||
|
||||
if (G_draw.weight_ramp == NULL) {
|
||||
if (G_draw.weight_ramp == nullptr) {
|
||||
weight_ramp_custom = user_weight_ramp;
|
||||
memcpy(&weight_ramp_copy, &U.coba_weight, sizeof(ColorBand));
|
||||
|
||||
@@ -324,7 +324,7 @@ int DRW_object_wire_theme_get(Object *ob, ViewLayer *view_layer, float **r_color
|
||||
}
|
||||
}
|
||||
|
||||
if (r_color != NULL) {
|
||||
if (r_color != nullptr) {
|
||||
if (UNLIKELY(ob->base_flag & BASE_FROM_SET)) {
|
||||
*r_color = G_draw.block.color_wire;
|
||||
}
|
||||
@@ -441,7 +441,7 @@ bool DRW_object_is_flat(Object *ob, int *r_axis)
|
||||
bool DRW_object_axis_orthogonal_to_view(Object *ob, int axis)
|
||||
{
|
||||
float ob_rot[3][3], invviewmat[4][4];
|
||||
DRW_view_viewmat_get(NULL, invviewmat, true);
|
||||
DRW_view_viewmat_get(nullptr, invviewmat, true);
|
||||
BKE_object_rot_to_mat3(ob, ob_rot, true);
|
||||
float dot = dot_v3v3(ob_rot[axis], invviewmat[2]);
|
||||
if (fabsf(dot) < 1e-3) {
|
||||
@@ -172,11 +172,11 @@ static GPUTexture *create_volume_texture(const int dim[3],
|
||||
eGPUDataFormat data_format,
|
||||
const void *data)
|
||||
{
|
||||
GPUTexture *tex = NULL;
|
||||
GPUTexture *tex = nullptr;
|
||||
int final_dim[3] = {UNPACK3(dim)};
|
||||
|
||||
if (data == NULL) {
|
||||
return NULL;
|
||||
if (data == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
@@ -185,9 +185,9 @@ static GPUTexture *create_volume_texture(const int dim[3],
|
||||
1,
|
||||
texture_format,
|
||||
GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW,
|
||||
NULL);
|
||||
nullptr);
|
||||
|
||||
if (tex != NULL) {
|
||||
if (tex != nullptr) {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -200,7 +200,7 @@ static GPUTexture *create_volume_texture(const int dim[3],
|
||||
}
|
||||
}
|
||||
|
||||
if (tex == NULL) {
|
||||
if (tex == nullptr) {
|
||||
printf("Error: Could not create 3D texture.\n");
|
||||
tex = GPU_texture_create_error(3, false);
|
||||
}
|
||||
@@ -215,7 +215,7 @@ static GPUTexture *create_volume_texture(const int dim[3],
|
||||
else {
|
||||
/* We need to resize the input. */
|
||||
int channels = ELEM(texture_format, GPU_R8, GPU_R16F, GPU_R32F) ? 1 : 4;
|
||||
float *rescaled_data = rescale_3d(dim, final_dim, channels, data);
|
||||
float *rescaled_data = rescale_3d(dim, final_dim, channels, static_cast<const float *>(data));
|
||||
if (rescaled_data) {
|
||||
GPU_texture_update_sub(tex, GPU_DATA_FLOAT, rescaled_data, 0, 0, 0, UNPACK3(final_dim));
|
||||
MEM_freeN(rescaled_data);
|
||||
@@ -231,7 +231,7 @@ static GPUTexture *create_volume_texture(const int dim[3],
|
||||
|
||||
static GPUTexture *create_field_texture(FluidDomainSettings *fds, bool single_precision)
|
||||
{
|
||||
void *field = NULL;
|
||||
void *field = nullptr;
|
||||
eGPUDataFormat data_format = GPU_DATA_FLOAT;
|
||||
eGPUTextureFormat texture_format = GPU_R8;
|
||||
|
||||
@@ -308,11 +308,11 @@ static GPUTexture *create_field_texture(FluidDomainSettings *fds, bool single_pr
|
||||
texture_format = GPU_R16F;
|
||||
break;
|
||||
default:
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (field == NULL) {
|
||||
return NULL;
|
||||
if (field == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
GPUTexture *tex = create_volume_texture(fds->res, texture_format, data_format, field);
|
||||
@@ -332,8 +332,8 @@ static GPUTexture *create_density_texture(FluidDomainSettings *fds, int highres)
|
||||
data = manta_smoke_get_density(fds->fluid);
|
||||
}
|
||||
|
||||
if (data == NULL) {
|
||||
return NULL;
|
||||
if (data == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
GPUTexture *tex = create_volume_texture(dim, GPU_R8, GPU_DATA_FLOAT, data);
|
||||
@@ -347,15 +347,15 @@ static GPUTexture *create_color_texture(FluidDomainSettings *fds, int highres)
|
||||
manta_smoke_has_colors(fds->fluid);
|
||||
|
||||
if (!has_color) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
int cell_count = (highres) ? manta_noise_get_cells(fds->fluid) : fds->total_cells;
|
||||
int *dim = (highres) ? fds->res_noise : fds->res;
|
||||
float *data = (float *)MEM_callocN(sizeof(float) * cell_count * 4, "smokeColorTexture");
|
||||
|
||||
if (data == NULL) {
|
||||
return NULL;
|
||||
if (data == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (highres) {
|
||||
@@ -374,13 +374,13 @@ static GPUTexture *create_color_texture(FluidDomainSettings *fds, int highres)
|
||||
|
||||
static GPUTexture *create_flame_texture(FluidDomainSettings *fds, int highres)
|
||||
{
|
||||
float *source = NULL;
|
||||
float *source = nullptr;
|
||||
const bool has_fuel = (highres) ? manta_noise_has_fuel(fds->fluid) :
|
||||
manta_smoke_has_fuel(fds->fluid);
|
||||
int *dim = (highres) ? fds->res_noise : fds->res;
|
||||
|
||||
if (!has_fuel) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (highres) {
|
||||
@@ -478,7 +478,7 @@ void DRW_smoke_ensure(FluidModifierData *fmd, int highres)
|
||||
BLI_addtail(&DST.vmempool->smoke_textures, BLI_genericNodeN(&fds->tex_flame));
|
||||
}
|
||||
if (!fds->tex_flame_coba && fds->tex_flame) {
|
||||
fds->tex_flame_coba = create_transfer_function(TFUNC_FLAME_SPECTRUM, NULL);
|
||||
fds->tex_flame_coba = create_transfer_function(TFUNC_FLAME_SPECTRUM, nullptr);
|
||||
BLI_addtail(&DST.vmempool->smoke_textures, BLI_genericNodeN(&fds->tex_flame_coba));
|
||||
}
|
||||
if (!fds->tex_shadow) {
|
||||
@@ -497,14 +497,14 @@ void DRW_smoke_ensure_velocity(FluidModifierData *fmd)
|
||||
#else
|
||||
if (fmd->type & MOD_FLUID_TYPE_DOMAIN) {
|
||||
FluidDomainSettings *fds = fmd->domain;
|
||||
float *vel_x = NULL, *vel_y = NULL, *vel_z = NULL;
|
||||
float *vel_x = nullptr, *vel_y = nullptr, *vel_z = nullptr;
|
||||
|
||||
if (!get_smoke_velocity_field(fds, &vel_x, &vel_y, &vel_z)) {
|
||||
fds->vector_field = FLUID_DOMAIN_VECTOR_FIELD_VELOCITY;
|
||||
get_smoke_velocity_field(fds, &vel_x, &vel_y, &vel_z);
|
||||
}
|
||||
|
||||
if (ELEM(NULL, vel_x, vel_y, vel_z)) {
|
||||
if (ELEM(nullptr, vel_x, vel_y, vel_z)) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -54,15 +54,15 @@ struct DRWInstanceDataList {
|
||||
BLI_memblock *pool_buffers;
|
||||
};
|
||||
|
||||
typedef struct DRWTempBufferHandle {
|
||||
struct DRWTempBufferHandle {
|
||||
GPUVertBuf *buf;
|
||||
/** Format pointer for reuse. */
|
||||
GPUVertFormat *format;
|
||||
/** Touched vertex length for resize. */
|
||||
int *vert_len;
|
||||
} DRWTempBufferHandle;
|
||||
};
|
||||
|
||||
typedef struct DRWTempInstancingHandle {
|
||||
struct DRWTempInstancingHandle {
|
||||
/** Copy of geom but with the per-instance attributes. */
|
||||
GPUBatch *batch;
|
||||
/** Batch containing instancing attributes. */
|
||||
@@ -71,9 +71,9 @@ typedef struct DRWTempInstancingHandle {
|
||||
GPUVertBuf *buf;
|
||||
/** Original non-instanced batch pointer. */
|
||||
GPUBatch *geom;
|
||||
} DRWTempInstancingHandle;
|
||||
};
|
||||
|
||||
static ListBase g_idatalists = {NULL, NULL};
|
||||
static ListBase g_idatalists = {nullptr, nullptr};
|
||||
|
||||
static void instancing_batch_references_add(GPUBatch *batch)
|
||||
{
|
||||
@@ -103,10 +103,11 @@ GPUVertBuf *DRW_temp_buffer_request(DRWInstanceDataList *idatalist,
|
||||
GPUVertFormat *format,
|
||||
int *vert_len)
|
||||
{
|
||||
BLI_assert(format != NULL);
|
||||
BLI_assert(vert_len != NULL);
|
||||
BLI_assert(format != nullptr);
|
||||
BLI_assert(vert_len != nullptr);
|
||||
|
||||
DRWTempBufferHandle *handle = BLI_memblock_alloc(idatalist->pool_buffers);
|
||||
DRWTempBufferHandle *handle = static_cast<DRWTempBufferHandle *>(
|
||||
BLI_memblock_alloc(idatalist->pool_buffers));
|
||||
|
||||
if (handle->format != format) {
|
||||
handle->format = format;
|
||||
@@ -128,12 +129,13 @@ GPUBatch *DRW_temp_batch_instance_request(DRWInstanceDataList *idatalist,
|
||||
GPUBatch *geom)
|
||||
{
|
||||
/* Do not call this with a batch that is already an instancing batch. */
|
||||
BLI_assert(geom->inst[0] == NULL);
|
||||
BLI_assert(geom->inst[0] == nullptr);
|
||||
/* Only call with one of them. */
|
||||
BLI_assert((instancer != NULL) != (buf != NULL));
|
||||
BLI_assert((instancer != nullptr) != (buf != nullptr));
|
||||
|
||||
DRWTempInstancingHandle *handle = BLI_memblock_alloc(idatalist->pool_instancing);
|
||||
if (handle->batch == NULL) {
|
||||
DRWTempInstancingHandle *handle = static_cast<DRWTempInstancingHandle *>(
|
||||
BLI_memblock_alloc(idatalist->pool_instancing));
|
||||
if (handle->batch == nullptr) {
|
||||
handle->batch = GPU_batch_calloc();
|
||||
}
|
||||
|
||||
@@ -166,8 +168,8 @@ GPUBatch *DRW_temp_batch_request(DRWInstanceDataList *idatalist,
|
||||
GPUVertBuf *buf,
|
||||
GPUPrimType prim_type)
|
||||
{
|
||||
GPUBatch **batch_ptr = BLI_memblock_alloc(idatalist->pool_batching);
|
||||
if (*batch_ptr == NULL) {
|
||||
GPUBatch **batch_ptr = static_cast<GPUBatch **>(BLI_memblock_alloc(idatalist->pool_batching));
|
||||
if (*batch_ptr == nullptr) {
|
||||
*batch_ptr = GPU_batch_calloc();
|
||||
}
|
||||
|
||||
@@ -176,14 +178,14 @@ GPUBatch *DRW_temp_batch_request(DRWInstanceDataList *idatalist,
|
||||
(GPU_vertbuf_get_status(buf) & GPU_VERTBUF_DATA_UPLOADED);
|
||||
if (!is_compatible) {
|
||||
GPU_batch_clear(batch);
|
||||
GPU_batch_init(batch, prim_type, buf, NULL);
|
||||
GPU_batch_init(batch, prim_type, buf, nullptr);
|
||||
}
|
||||
return batch;
|
||||
}
|
||||
|
||||
static void temp_buffer_handle_free(DRWTempBufferHandle *handle)
|
||||
{
|
||||
handle->format = NULL;
|
||||
handle->format = nullptr;
|
||||
GPU_VERTBUF_DISCARD_SAFE(handle->buf);
|
||||
}
|
||||
|
||||
@@ -204,8 +206,8 @@ void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist)
|
||||
BLI_memblock_iter iter;
|
||||
DRWTempBufferHandle *handle;
|
||||
BLI_memblock_iternew(idatalist->pool_buffers, &iter);
|
||||
while ((handle = BLI_memblock_iterstep(&iter))) {
|
||||
if (handle->vert_len != NULL) {
|
||||
while ((handle = static_cast<DRWTempBufferHandle *>(BLI_memblock_iterstep(&iter)))) {
|
||||
if (handle->vert_len != nullptr) {
|
||||
uint vert_len = *(handle->vert_len);
|
||||
uint target_buf_size = ((vert_len / DRW_BUFFER_VERTS_CHUNK) + 1) * DRW_BUFFER_VERTS_CHUNK;
|
||||
if (target_buf_size < GPU_vertbuf_get_vertex_alloc(handle->buf)) {
|
||||
@@ -218,14 +220,14 @@ void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist)
|
||||
/* Finish pending instancing batches. */
|
||||
DRWTempInstancingHandle *handle_inst;
|
||||
BLI_memblock_iternew(idatalist->pool_instancing, &iter);
|
||||
while ((handle_inst = BLI_memblock_iterstep(&iter))) {
|
||||
while ((handle_inst = static_cast<DRWTempInstancingHandle *>(BLI_memblock_iterstep(&iter)))) {
|
||||
GPUBatch *batch = handle_inst->batch;
|
||||
if (batch && batch->flag == GPU_BATCH_BUILDING) {
|
||||
GPUVertBuf *inst_buf = handle_inst->buf;
|
||||
GPUBatch *inst_batch = handle_inst->instancer;
|
||||
GPUBatch *geom = handle_inst->geom;
|
||||
GPU_batch_copy(batch, geom);
|
||||
if (inst_batch != NULL) {
|
||||
if (inst_batch != nullptr) {
|
||||
for (int i = 0; i < GPU_BATCH_INST_VBO_MAX_LEN && inst_batch->verts[i]; i++) {
|
||||
GPU_batch_instbuf_add(batch, inst_batch->verts[i], false);
|
||||
}
|
||||
@@ -252,8 +254,9 @@ void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist)
|
||||
|
||||
static DRWInstanceData *drw_instance_data_create(DRWInstanceDataList *idatalist, uint attr_size)
|
||||
{
|
||||
DRWInstanceData *idata = MEM_callocN(sizeof(DRWInstanceData), "DRWInstanceData");
|
||||
idata->next = NULL;
|
||||
DRWInstanceData *idata = static_cast<DRWInstanceData *>(
|
||||
MEM_callocN(sizeof(DRWInstanceData), "DRWInstanceData"));
|
||||
idata->next = nullptr;
|
||||
idata->used = true;
|
||||
idata->data_size = attr_size;
|
||||
idata->mempool = BLI_mempool_create(sizeof(float) * idata->data_size, 0, 16, 0);
|
||||
@@ -261,7 +264,7 @@ static DRWInstanceData *drw_instance_data_create(DRWInstanceDataList *idatalist,
|
||||
BLI_assert(attr_size > 0);
|
||||
|
||||
/* Push to linked list. */
|
||||
if (idatalist->idata_head[attr_size - 1] == NULL) {
|
||||
if (idatalist->idata_head[attr_size - 1] == nullptr) {
|
||||
idatalist->idata_head[attr_size - 1] = idata;
|
||||
}
|
||||
else {
|
||||
@@ -307,7 +310,8 @@ DRWInstanceData *DRW_instance_data_request(DRWInstanceDataList *idatalist, uint
|
||||
|
||||
DRWInstanceDataList *DRW_instance_data_list_create(void)
|
||||
{
|
||||
DRWInstanceDataList *idatalist = MEM_callocN(sizeof(DRWInstanceDataList), "DRWInstanceDataList");
|
||||
DRWInstanceDataList *idatalist = static_cast<DRWInstanceDataList *>(
|
||||
MEM_callocN(sizeof(DRWInstanceDataList), "DRWInstanceDataList"));
|
||||
|
||||
idatalist->pool_batching = BLI_memblock_create(sizeof(GPUBatch *));
|
||||
idatalist->pool_instancing = BLI_memblock_create(sizeof(DRWTempInstancingHandle));
|
||||
@@ -328,8 +332,8 @@ void DRW_instance_data_list_free(DRWInstanceDataList *idatalist)
|
||||
DRW_instance_data_free(idata);
|
||||
MEM_freeN(idata);
|
||||
}
|
||||
idatalist->idata_head[i] = NULL;
|
||||
idatalist->idata_tail[i] = NULL;
|
||||
idatalist->idata_head[i] = nullptr;
|
||||
idatalist->idata_tail[i] = nullptr;
|
||||
}
|
||||
|
||||
BLI_memblock_destroy(idatalist->pool_buffers, (MemblockValFreeFP)temp_buffer_handle_free);
|
||||
@@ -358,7 +362,7 @@ void DRW_instance_data_list_free_unused(DRWInstanceDataList *idatalist)
|
||||
|
||||
/* Remove unused data blocks and sanitize each list. */
|
||||
for (int i = 0; i < MAX_INSTANCE_DATA_SIZE; i++) {
|
||||
idatalist->idata_tail[i] = NULL;
|
||||
idatalist->idata_tail[i] = nullptr;
|
||||
for (idata = idatalist->idata_head[i]; idata; idata = next_idata) {
|
||||
next_idata = idata->next;
|
||||
if (idata->used == false) {
|
||||
@@ -373,7 +377,7 @@ void DRW_instance_data_list_free_unused(DRWInstanceDataList *idatalist)
|
||||
MEM_freeN(idata);
|
||||
}
|
||||
else {
|
||||
if (idatalist->idata_tail[i] != NULL) {
|
||||
if (idatalist->idata_tail[i] != nullptr) {
|
||||
idatalist->idata_tail[i]->next = idata;
|
||||
}
|
||||
idatalist->idata_tail[i] = idata;
|
||||
@@ -402,7 +406,7 @@ void DRW_instance_data_list_resize(DRWInstanceDataList *idatalist)
|
||||
#define CHUNK_LIST_STEP (1 << 4)
|
||||
|
||||
/** A chunked UBO manager that doesn't actually allocate unneeded chunks. */
|
||||
typedef struct DRWSparseUniformBuf {
|
||||
struct DRWSparseUniformBuf {
|
||||
/* Memory buffers used to stage chunk data before transfer to UBOs. */
|
||||
char **chunk_buffers;
|
||||
/* Uniform buffer objects with flushed data. */
|
||||
@@ -412,15 +416,15 @@ typedef struct DRWSparseUniformBuf {
|
||||
|
||||
int num_chunks;
|
||||
uint item_size, chunk_size, chunk_bytes;
|
||||
} DRWSparseUniformBuf;
|
||||
};
|
||||
|
||||
static void drw_sparse_uniform_buffer_init(DRWSparseUniformBuf *buffer,
|
||||
uint item_size,
|
||||
uint chunk_size)
|
||||
{
|
||||
buffer->chunk_buffers = NULL;
|
||||
buffer->chunk_used = NULL;
|
||||
buffer->chunk_ubos = NULL;
|
||||
buffer->chunk_buffers = nullptr;
|
||||
buffer->chunk_used = nullptr;
|
||||
buffer->chunk_ubos = nullptr;
|
||||
buffer->num_chunks = 0;
|
||||
buffer->item_size = item_size;
|
||||
buffer->chunk_size = chunk_size;
|
||||
@@ -429,7 +433,8 @@ static void drw_sparse_uniform_buffer_init(DRWSparseUniformBuf *buffer,
|
||||
|
||||
DRWSparseUniformBuf *DRW_sparse_uniform_buffer_new(uint item_size, uint chunk_size)
|
||||
{
|
||||
DRWSparseUniformBuf *buf = MEM_mallocN(sizeof(DRWSparseUniformBuf), __func__);
|
||||
DRWSparseUniformBuf *buf = static_cast<DRWSparseUniformBuf *>(
|
||||
MEM_mallocN(sizeof(DRWSparseUniformBuf), __func__));
|
||||
drw_sparse_uniform_buffer_init(buf, item_size, chunk_size);
|
||||
return buf;
|
||||
}
|
||||
@@ -438,7 +443,7 @@ void DRW_sparse_uniform_buffer_flush(DRWSparseUniformBuf *buffer)
|
||||
{
|
||||
for (int i = 0; i < buffer->num_chunks; i++) {
|
||||
if (BLI_BITMAP_TEST(buffer->chunk_used, i)) {
|
||||
if (buffer->chunk_ubos[i] == NULL) {
|
||||
if (buffer->chunk_ubos[i] == nullptr) {
|
||||
buffer->chunk_ubos[i] = GPU_uniformbuf_create(buffer->chunk_bytes);
|
||||
}
|
||||
GPU_uniformbuf_update(buffer->chunk_ubos[i], buffer->chunk_buffers[i]);
|
||||
@@ -457,7 +462,7 @@ void DRW_sparse_uniform_buffer_clear(DRWSparseUniformBuf *buffer, bool free_all)
|
||||
|
||||
if (buffer->chunk_ubos[i]) {
|
||||
GPU_uniformbuf_free(buffer->chunk_ubos[i]);
|
||||
buffer->chunk_ubos[i] = NULL;
|
||||
buffer->chunk_ubos[i] = nullptr;
|
||||
}
|
||||
}
|
||||
else {
|
||||
@@ -479,9 +484,10 @@ void DRW_sparse_uniform_buffer_clear(DRWSparseUniformBuf *buffer, bool free_all)
|
||||
}
|
||||
|
||||
if (buffer->num_chunks != old_num_chunks) {
|
||||
buffer->chunk_buffers = MEM_recallocN(buffer->chunk_buffers,
|
||||
buffer->num_chunks * sizeof(void *));
|
||||
buffer->chunk_ubos = MEM_recallocN(buffer->chunk_ubos, buffer->num_chunks * sizeof(void *));
|
||||
buffer->chunk_buffers = static_cast<char **>(
|
||||
MEM_recallocN(buffer->chunk_buffers, buffer->num_chunks * sizeof(void *)));
|
||||
buffer->chunk_ubos = static_cast<GPUUniformBuf **>(
|
||||
MEM_recallocN(buffer->chunk_ubos, buffer->num_chunks * sizeof(void *)));
|
||||
BLI_BITMAP_RESIZE(buffer->chunk_used, buffer->num_chunks);
|
||||
}
|
||||
|
||||
@@ -504,7 +510,7 @@ static GPUUniformBuf *drw_sparse_uniform_buffer_get_ubo(DRWSparseUniformBuf *buf
|
||||
if (buffer && chunk < buffer->num_chunks && BLI_BITMAP_TEST(buffer->chunk_used, chunk)) {
|
||||
return buffer->chunk_ubos[chunk];
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void DRW_sparse_uniform_buffer_bind(DRWSparseUniformBuf *buffer, int chunk, int location)
|
||||
@@ -527,16 +533,18 @@ void *DRW_sparse_uniform_buffer_ensure_item(DRWSparseUniformBuf *buffer, int chu
|
||||
{
|
||||
if (chunk >= buffer->num_chunks) {
|
||||
buffer->num_chunks = (chunk + CHUNK_LIST_STEP) & ~(CHUNK_LIST_STEP - 1);
|
||||
buffer->chunk_buffers = MEM_recallocN(buffer->chunk_buffers,
|
||||
buffer->num_chunks * sizeof(void *));
|
||||
buffer->chunk_ubos = MEM_recallocN(buffer->chunk_ubos, buffer->num_chunks * sizeof(void *));
|
||||
buffer->chunk_buffers = static_cast<char **>(
|
||||
MEM_recallocN(buffer->chunk_buffers, buffer->num_chunks * sizeof(void *)));
|
||||
buffer->chunk_ubos = static_cast<GPUUniformBuf **>(
|
||||
MEM_recallocN(buffer->chunk_ubos, buffer->num_chunks * sizeof(void *)));
|
||||
BLI_BITMAP_RESIZE(buffer->chunk_used, buffer->num_chunks);
|
||||
}
|
||||
|
||||
char *chunk_buffer = buffer->chunk_buffers[chunk];
|
||||
|
||||
if (chunk_buffer == NULL) {
|
||||
buffer->chunk_buffers[chunk] = chunk_buffer = MEM_callocN(buffer->chunk_bytes, __func__);
|
||||
if (chunk_buffer == nullptr) {
|
||||
buffer->chunk_buffers[chunk] = chunk_buffer = static_cast<char *>(
|
||||
MEM_callocN(buffer->chunk_bytes, __func__));
|
||||
}
|
||||
else if (!BLI_BITMAP_TEST(buffer->chunk_used, chunk)) {
|
||||
memset(chunk_buffer, 0, buffer->chunk_bytes);
|
||||
@@ -554,7 +562,7 @@ void *DRW_sparse_uniform_buffer_ensure_item(DRWSparseUniformBuf *buffer, int chu
|
||||
* \{ */
|
||||
|
||||
/** Sparse UBO buffer for a specific uniform attribute list. */
|
||||
typedef struct DRWUniformAttrBuf {
|
||||
struct DRWUniformAttrBuf {
|
||||
/* Attribute list (also used as hash table key) handled by this buffer. */
|
||||
GPUUniformAttrList key;
|
||||
/* Sparse UBO buffer containing the attribute values. */
|
||||
@@ -563,7 +571,7 @@ typedef struct DRWUniformAttrBuf {
|
||||
DRWResourceHandle last_handle;
|
||||
/* Linked list pointer used for freeing the empty unneeded buffers. */
|
||||
struct DRWUniformAttrBuf *next_empty;
|
||||
} DRWUniformAttrBuf;
|
||||
};
|
||||
|
||||
static DRWUniformAttrBuf *drw_uniform_attrs_pool_ensure(GHash *table,
|
||||
const GPUUniformAttrList *key)
|
||||
@@ -571,7 +579,8 @@ static DRWUniformAttrBuf *drw_uniform_attrs_pool_ensure(GHash *table,
|
||||
void **pkey, **pval;
|
||||
|
||||
if (!BLI_ghash_ensure_p_ex(table, key, &pkey, &pval)) {
|
||||
DRWUniformAttrBuf *buffer = MEM_callocN(sizeof(*buffer), __func__);
|
||||
DRWUniformAttrBuf *buffer = static_cast<DRWUniformAttrBuf *>(
|
||||
MEM_callocN(sizeof(*buffer), __func__));
|
||||
|
||||
*pkey = &buffer->key;
|
||||
*pval = buffer;
|
||||
@@ -597,7 +606,7 @@ static void drw_uniform_attribute_lookup(GPUUniformAttr *attr,
|
||||
BKE_object_dupli_find_rgba_attribute(ob, dupli_source, dupli_parent, attr->name, r_data);
|
||||
}
|
||||
else {
|
||||
BKE_object_dupli_find_rgba_attribute(ob, NULL, NULL, attr->name, r_data);
|
||||
BKE_object_dupli_find_rgba_attribute(ob, nullptr, nullptr, attr->name, r_data);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -615,7 +624,8 @@ void drw_uniform_attrs_pool_update(GHash *table,
|
||||
|
||||
int chunk = DRW_handle_chunk_get(handle);
|
||||
int item = DRW_handle_id_get(handle);
|
||||
float(*values)[4] = DRW_sparse_uniform_buffer_ensure_item(&buffer->ubos, chunk, item);
|
||||
float(*values)[4] = static_cast<float(*)[4]>(
|
||||
DRW_sparse_uniform_buffer_ensure_item(&buffer->ubos, chunk, item));
|
||||
|
||||
LISTBASE_FOREACH (GPUUniformAttr *, attr, &buffer->key.list) {
|
||||
drw_uniform_attribute_lookup(attr, ob, dupli_parent, dupli_source, *values++);
|
||||
@@ -627,16 +637,16 @@ GPUUniformBuf *drw_ensure_layer_attribute_buffer(void)
|
||||
{
|
||||
DRWData *data = DST.vmempool;
|
||||
|
||||
if (data->vlattrs_ubo_ready && data->vlattrs_ubo != NULL) {
|
||||
if (data->vlattrs_ubo_ready && data->vlattrs_ubo != nullptr) {
|
||||
return data->vlattrs_ubo;
|
||||
}
|
||||
|
||||
/* Allocate the buffer data. */
|
||||
const int buf_size = DRW_RESOURCE_CHUNK_LEN;
|
||||
|
||||
if (data->vlattrs_buf == NULL) {
|
||||
data->vlattrs_buf = MEM_calloc_arrayN(
|
||||
buf_size, sizeof(LayerAttribute), "View Layer Attr Data");
|
||||
if (data->vlattrs_buf == nullptr) {
|
||||
data->vlattrs_buf = static_cast<LayerAttribute *>(
|
||||
MEM_calloc_arrayN(buf_size, sizeof(LayerAttribute), "View Layer Attr Data"));
|
||||
}
|
||||
|
||||
/* Look up attributes.
|
||||
@@ -667,7 +677,7 @@ GPUUniformBuf *drw_ensure_layer_attribute_buffer(void)
|
||||
buffer[0].buffer_length = count;
|
||||
|
||||
/* Update or create the UBO object. */
|
||||
if (data->vlattrs_ubo != NULL) {
|
||||
if (data->vlattrs_ubo != nullptr) {
|
||||
GPU_uniformbuf_update(data->vlattrs_ubo, buffer);
|
||||
}
|
||||
else {
|
||||
@@ -682,8 +692,8 @@ GPUUniformBuf *drw_ensure_layer_attribute_buffer(void)
|
||||
|
||||
DRWSparseUniformBuf *DRW_uniform_attrs_pool_find_ubo(GHash *table, const GPUUniformAttrList *key)
|
||||
{
|
||||
DRWUniformAttrBuf *buffer = BLI_ghash_lookup(table, key);
|
||||
return buffer ? &buffer->ubos : NULL;
|
||||
DRWUniformAttrBuf *buffer = static_cast<DRWUniformAttrBuf *>(BLI_ghash_lookup(table, key));
|
||||
return buffer ? &buffer->ubos : nullptr;
|
||||
}
|
||||
|
||||
GHash *DRW_uniform_attrs_pool_new(void)
|
||||
@@ -701,7 +711,7 @@ void DRW_uniform_attrs_pool_flush_all(GHash *table)
|
||||
|
||||
static void drw_uniform_attrs_pool_free_cb(void *ptr)
|
||||
{
|
||||
DRWUniformAttrBuf *buffer = ptr;
|
||||
DRWUniformAttrBuf *buffer = static_cast<DRWUniformAttrBuf *>(ptr);
|
||||
|
||||
GPU_uniform_attr_list_free(&buffer->key);
|
||||
DRW_sparse_uniform_buffer_clear(&buffer->ubos, true);
|
||||
@@ -710,7 +720,7 @@ static void drw_uniform_attrs_pool_free_cb(void *ptr)
|
||||
|
||||
void DRW_uniform_attrs_pool_clear_all(GHash *table)
|
||||
{
|
||||
DRWUniformAttrBuf *remove_list = NULL;
|
||||
DRWUniformAttrBuf *remove_list = nullptr;
|
||||
|
||||
GHASH_FOREACH_BEGIN (DRWUniformAttrBuf *, buffer, table) {
|
||||
buffer->last_handle = (DRWResourceHandle)-1;
|
||||
@@ -726,13 +736,13 @@ void DRW_uniform_attrs_pool_clear_all(GHash *table)
|
||||
while (remove_list) {
|
||||
DRWUniformAttrBuf *buffer = remove_list;
|
||||
remove_list = buffer->next_empty;
|
||||
BLI_ghash_remove(table, &buffer->key, NULL, drw_uniform_attrs_pool_free_cb);
|
||||
BLI_ghash_remove(table, &buffer->key, nullptr, drw_uniform_attrs_pool_free_cb);
|
||||
}
|
||||
}
|
||||
|
||||
void DRW_uniform_attrs_pool_free(GHash *table)
|
||||
{
|
||||
BLI_ghash_free(table, NULL, drw_uniform_attrs_pool_free_cb);
|
||||
BLI_ghash_free(table, nullptr, drw_uniform_attrs_pool_free_cb);
|
||||
}
|
||||
|
||||
/** \} */
|
||||
@@ -106,12 +106,12 @@
|
||||
#include "DRW_select_buffer.h"
|
||||
|
||||
/** Render State: No persistent data between draw calls. */
|
||||
DRWManager DST = {NULL};
|
||||
DRWManager DST = {nullptr};
|
||||
|
||||
static struct {
|
||||
ListBase /*DRWRegisteredDrawEngine*/ engines;
|
||||
int len;
|
||||
} g_registered_engines = {{NULL}};
|
||||
} g_registered_engines = {{nullptr}};
|
||||
|
||||
static void drw_state_prepare_clean_for_draw(DRWManager *dst)
|
||||
{
|
||||
@@ -131,7 +131,7 @@ static void drw_state_ensure_not_reused(DRWManager *dst)
|
||||
|
||||
static bool drw_draw_show_annotation(void)
|
||||
{
|
||||
if (DST.draw_ctx.space_data == NULL) {
|
||||
if (DST.draw_ctx.space_data == nullptr) {
|
||||
View3D *v3d = DST.draw_ctx.v3d;
|
||||
return (v3d && ((v3d->flag2 & V3D_SHOW_ANNOTATION) != 0) &&
|
||||
((v3d->flag2 & V3D_HIDE_OVERLAYS) == 0));
|
||||
@@ -158,7 +158,7 @@ static bool drw_draw_show_annotation(void)
|
||||
|
||||
static void drw_task_graph_init(void)
|
||||
{
|
||||
BLI_assert(DST.task_graph == NULL);
|
||||
BLI_assert(DST.task_graph == nullptr);
|
||||
DST.task_graph = BLI_task_graph_create();
|
||||
DST.delayed_extraction = BLI_gset_ptr_new(__func__);
|
||||
}
|
||||
@@ -169,11 +169,11 @@ static void drw_task_graph_deinit(void)
|
||||
|
||||
BLI_gset_free(DST.delayed_extraction,
|
||||
(void (*)(void *key))drw_batch_cache_generate_requested_evaluated_mesh_or_curve);
|
||||
DST.delayed_extraction = NULL;
|
||||
DST.delayed_extraction = nullptr;
|
||||
BLI_task_graph_work_and_wait(DST.task_graph);
|
||||
|
||||
BLI_task_graph_free(DST.task_graph);
|
||||
DST.task_graph = NULL;
|
||||
DST.task_graph = nullptr;
|
||||
}
|
||||
|
||||
/** \} */
|
||||
@@ -263,12 +263,12 @@ bool DRW_object_is_visible_psys_in_active_context(const Object *object, const Pa
|
||||
return true;
|
||||
}
|
||||
|
||||
Object *DRW_object_get_dupli_parent(const Object *UNUSED(ob))
|
||||
Object *DRW_object_get_dupli_parent(const Object * /*ob*/)
|
||||
{
|
||||
return DST.dupli_parent;
|
||||
}
|
||||
|
||||
DupliObject *DRW_object_get_dupli(const Object *UNUSED(ob))
|
||||
DupliObject *DRW_object_get_dupli(const Object * /*ob*/)
|
||||
{
|
||||
return DST.dupli_source;
|
||||
}
|
||||
@@ -306,7 +306,7 @@ const float *DRW_viewport_pixelsize_get(void)
|
||||
static void drw_context_state_init(void)
|
||||
{
|
||||
if (DST.draw_ctx.obact) {
|
||||
DST.draw_ctx.object_mode = DST.draw_ctx.obact->mode;
|
||||
DST.draw_ctx.object_mode = eObjectMode(DST.draw_ctx.obact->mode);
|
||||
}
|
||||
else {
|
||||
DST.draw_ctx.object_mode = OB_MODE_OBJECT;
|
||||
@@ -317,7 +317,7 @@ static void drw_context_state_init(void)
|
||||
DST.draw_ctx.object_edit = DST.draw_ctx.obact;
|
||||
}
|
||||
else {
|
||||
DST.draw_ctx.object_edit = NULL;
|
||||
DST.draw_ctx.object_edit = nullptr;
|
||||
}
|
||||
|
||||
/* Pose object. */
|
||||
@@ -328,7 +328,7 @@ static void drw_context_state_init(void)
|
||||
DST.draw_ctx.object_pose = BKE_object_pose_armature_get(DST.draw_ctx.obact);
|
||||
}
|
||||
else {
|
||||
DST.draw_ctx.object_pose = NULL;
|
||||
DST.draw_ctx.object_pose = nullptr;
|
||||
}
|
||||
|
||||
DST.draw_ctx.sh_cfg = GPU_SHADER_CFG_DEFAULT;
|
||||
@@ -339,9 +339,10 @@ static void drw_context_state_init(void)
|
||||
|
||||
static void draw_unit_state_create(void)
|
||||
{
|
||||
DRWObjectInfos *infos = BLI_memblock_alloc(DST.vmempool->obinfos);
|
||||
DRWObjectMatrix *mats = BLI_memblock_alloc(DST.vmempool->obmats);
|
||||
DRWCullingState *culling = BLI_memblock_alloc(DST.vmempool->cullstates);
|
||||
DRWObjectInfos *infos = static_cast<DRWObjectInfos *>(BLI_memblock_alloc(DST.vmempool->obinfos));
|
||||
DRWObjectMatrix *mats = static_cast<DRWObjectMatrix *>(BLI_memblock_alloc(DST.vmempool->obmats));
|
||||
DRWCullingState *culling = static_cast<DRWCullingState *>(
|
||||
BLI_memblock_alloc(DST.vmempool->cullstates));
|
||||
|
||||
unit_m4(mats->model);
|
||||
unit_m4(mats->modelinverse);
|
||||
@@ -356,14 +357,14 @@ static void draw_unit_state_create(void)
|
||||
|
||||
/* TODO(fclem): get rid of this. */
|
||||
culling->bsphere.radius = -1.0f;
|
||||
culling->user_data = NULL;
|
||||
culling->user_data = nullptr;
|
||||
|
||||
DRW_handle_increment(&DST.resource_handle);
|
||||
}
|
||||
|
||||
DRWData *DRW_viewport_data_create(void)
|
||||
{
|
||||
DRWData *drw_data = MEM_callocN(sizeof(DRWData), "DRWData");
|
||||
DRWData *drw_data = static_cast<DRWData *>(MEM_callocN(sizeof(DRWData), "DRWData"));
|
||||
|
||||
drw_data->texture_pool = DRW_texture_pool_create();
|
||||
|
||||
@@ -409,7 +410,7 @@ static void draw_texture_release(DRWData *drw_data)
|
||||
BLI_memblock_iter iter;
|
||||
GPUTexture **tex;
|
||||
BLI_memblock_iternew(drw_data->images, &iter);
|
||||
while ((tex = BLI_memblock_iterstep(&iter))) {
|
||||
while ((tex = static_cast<GPUTexture **>(BLI_memblock_iterstep(&iter)))) {
|
||||
GPU_texture_free(*tex);
|
||||
}
|
||||
}
|
||||
@@ -422,7 +423,7 @@ static void draw_prune_vlattrs(DRWData *drw_data)
|
||||
LISTBASE_FOREACH_MUTABLE (GPULayerAttr *, attr, &drw_data->vlattrs_name_list) {
|
||||
if (++attr->users > 10) {
|
||||
BLI_ghash_remove(
|
||||
drw_data->vlattrs_name_cache, POINTER_FROM_UINT(attr->hash_code), NULL, NULL);
|
||||
drw_data->vlattrs_name_cache, POINTER_FROM_UINT(attr->hash_code), nullptr, nullptr);
|
||||
BLI_freelinkN(&drw_data->vlattrs_name_list, attr);
|
||||
}
|
||||
}
|
||||
@@ -433,17 +434,17 @@ static void drw_viewport_data_reset(DRWData *drw_data)
|
||||
draw_texture_release(drw_data);
|
||||
draw_prune_vlattrs(drw_data);
|
||||
|
||||
BLI_memblock_clear(drw_data->commands, NULL);
|
||||
BLI_memblock_clear(drw_data->commands_small, NULL);
|
||||
BLI_memblock_clear(drw_data->callbuffers, NULL);
|
||||
BLI_memblock_clear(drw_data->obmats, NULL);
|
||||
BLI_memblock_clear(drw_data->obinfos, NULL);
|
||||
BLI_memblock_clear(drw_data->cullstates, NULL);
|
||||
BLI_memblock_clear(drw_data->shgroups, NULL);
|
||||
BLI_memblock_clear(drw_data->uniforms, NULL);
|
||||
BLI_memblock_clear(drw_data->passes, NULL);
|
||||
BLI_memblock_clear(drw_data->views, NULL);
|
||||
BLI_memblock_clear(drw_data->images, NULL);
|
||||
BLI_memblock_clear(drw_data->commands, nullptr);
|
||||
BLI_memblock_clear(drw_data->commands_small, nullptr);
|
||||
BLI_memblock_clear(drw_data->callbuffers, nullptr);
|
||||
BLI_memblock_clear(drw_data->obmats, nullptr);
|
||||
BLI_memblock_clear(drw_data->obinfos, nullptr);
|
||||
BLI_memblock_clear(drw_data->cullstates, nullptr);
|
||||
BLI_memblock_clear(drw_data->shgroups, nullptr);
|
||||
BLI_memblock_clear(drw_data->uniforms, nullptr);
|
||||
BLI_memblock_clear(drw_data->passes, nullptr);
|
||||
BLI_memblock_clear(drw_data->views, nullptr);
|
||||
BLI_memblock_clear(drw_data->images, nullptr);
|
||||
DRW_uniform_attrs_pool_clear_all(drw_data->obattrs_ubo_pool);
|
||||
DRW_instance_data_list_free_unused(drw_data->idatalist);
|
||||
DRW_instance_data_list_resize(drw_data->idatalist);
|
||||
@@ -455,19 +456,19 @@ void DRW_viewport_data_free(DRWData *drw_data)
|
||||
{
|
||||
draw_texture_release(drw_data);
|
||||
|
||||
BLI_memblock_destroy(drw_data->commands, NULL);
|
||||
BLI_memblock_destroy(drw_data->commands_small, NULL);
|
||||
BLI_memblock_destroy(drw_data->callbuffers, NULL);
|
||||
BLI_memblock_destroy(drw_data->obmats, NULL);
|
||||
BLI_memblock_destroy(drw_data->obinfos, NULL);
|
||||
BLI_memblock_destroy(drw_data->cullstates, NULL);
|
||||
BLI_memblock_destroy(drw_data->shgroups, NULL);
|
||||
BLI_memblock_destroy(drw_data->uniforms, NULL);
|
||||
BLI_memblock_destroy(drw_data->views, NULL);
|
||||
BLI_memblock_destroy(drw_data->passes, NULL);
|
||||
BLI_memblock_destroy(drw_data->images, NULL);
|
||||
BLI_memblock_destroy(drw_data->commands, nullptr);
|
||||
BLI_memblock_destroy(drw_data->commands_small, nullptr);
|
||||
BLI_memblock_destroy(drw_data->callbuffers, nullptr);
|
||||
BLI_memblock_destroy(drw_data->obmats, nullptr);
|
||||
BLI_memblock_destroy(drw_data->obinfos, nullptr);
|
||||
BLI_memblock_destroy(drw_data->cullstates, nullptr);
|
||||
BLI_memblock_destroy(drw_data->shgroups, nullptr);
|
||||
BLI_memblock_destroy(drw_data->uniforms, nullptr);
|
||||
BLI_memblock_destroy(drw_data->views, nullptr);
|
||||
BLI_memblock_destroy(drw_data->passes, nullptr);
|
||||
BLI_memblock_destroy(drw_data->images, nullptr);
|
||||
DRW_uniform_attrs_pool_free(drw_data->obattrs_ubo_pool);
|
||||
BLI_ghash_free(drw_data->vlattrs_name_cache, NULL, NULL);
|
||||
BLI_ghash_free(drw_data->vlattrs_name_cache, nullptr, nullptr);
|
||||
BLI_freelistN(&drw_data->vlattrs_name_list);
|
||||
if (drw_data->vlattrs_ubo) {
|
||||
GPU_uniformbuf_free(drw_data->vlattrs_ubo);
|
||||
@@ -478,7 +479,7 @@ void DRW_viewport_data_free(DRWData *drw_data)
|
||||
for (int i = 0; i < 2; i++) {
|
||||
DRW_view_data_free(drw_data->view_data[i]);
|
||||
}
|
||||
if (drw_data->matrices_ubo != NULL) {
|
||||
if (drw_data->matrices_ubo != nullptr) {
|
||||
for (int i = 0; i < drw_data->ubo_len; i++) {
|
||||
GPU_uniformbuf_free(drw_data->matrices_ubo[i]);
|
||||
GPU_uniformbuf_free(drw_data->obinfos_ubo[i]);
|
||||
@@ -496,7 +497,7 @@ static DRWData *drw_viewport_data_ensure(GPUViewport *viewport)
|
||||
DRWData **vmempool_p = GPU_viewport_data_get(viewport);
|
||||
DRWData *vmempool = *vmempool_p;
|
||||
|
||||
if (vmempool == NULL) {
|
||||
if (vmempool == nullptr) {
|
||||
*vmempool_p = vmempool = DRW_viewport_data_create();
|
||||
}
|
||||
return vmempool;
|
||||
@@ -505,10 +506,10 @@ static DRWData *drw_viewport_data_ensure(GPUViewport *viewport)
|
||||
/**
|
||||
* Sets DST.viewport, DST.size and a lot of other important variables.
|
||||
* Needs to be called before enabling any draw engine.
|
||||
* - viewport can be NULL. In this case the data will not be stored and will be free at
|
||||
* - viewport can be nullptr. In this case the data will not be stored and will be free at
|
||||
* drw_manager_exit().
|
||||
* - size can be NULL to get it from viewport.
|
||||
* - if viewport and size are NULL, size is set to (1, 1).
|
||||
* - size can be nullptr to get it from viewport.
|
||||
* - if viewport and size are nullptr, size is set to (1, 1).
|
||||
*
|
||||
* IMPORTANT: #drw_manager_init can be called multiple times before #drw_manager_exit.
|
||||
*/
|
||||
@@ -523,7 +524,7 @@ static void drw_manager_init(DRWManager *dst, GPUViewport *viewport, const int s
|
||||
/* Manager was init first without a viewport, created DRWData, but is being re-init.
|
||||
* In this case, keep the old data. */
|
||||
/* If it is being re-init with a valid viewport, it means there is something wrong. */
|
||||
BLI_assert(viewport == NULL);
|
||||
BLI_assert(viewport == nullptr);
|
||||
}
|
||||
else if (viewport) {
|
||||
/* Use viewport's persistent DRWData. */
|
||||
@@ -543,12 +544,12 @@ static void drw_manager_init(DRWManager *dst, GPUViewport *viewport, const int s
|
||||
drw_viewport_data_reset(dst->vmempool);
|
||||
|
||||
bool do_validation = true;
|
||||
if (size == NULL && viewport == NULL) {
|
||||
if (size == nullptr && viewport == nullptr) {
|
||||
/* Avoid division by 0. Engines will either override this or not use it. */
|
||||
dst->size[0] = 1.0f;
|
||||
dst->size[1] = 1.0f;
|
||||
}
|
||||
else if (size == NULL) {
|
||||
else if (size == nullptr) {
|
||||
BLI_assert(viewport);
|
||||
GPUTexture *tex = GPU_viewport_color_texture(viewport, 0);
|
||||
dst->size[0] = GPU_texture_width(tex);
|
||||
@@ -565,7 +566,8 @@ static void drw_manager_init(DRWManager *dst, GPUViewport *viewport, const int s
|
||||
dst->inv_size[1] = 1.0f / dst->size[1];
|
||||
|
||||
if (do_validation) {
|
||||
DRW_view_data_texture_list_size_validate(dst->view_data_active, (int[2]){UNPACK2(dst->size)});
|
||||
DRW_view_data_texture_list_size_validate(dst->view_data_active,
|
||||
blender::int2{int(dst->size[0]), int(dst->size[1])});
|
||||
}
|
||||
|
||||
if (viewport) {
|
||||
@@ -577,9 +579,9 @@ static void drw_manager_init(DRWManager *dst, GPUViewport *viewport, const int s
|
||||
|
||||
draw_unit_state_create();
|
||||
|
||||
if (rv3d != NULL) {
|
||||
if (rv3d != nullptr) {
|
||||
dst->pixsize = rv3d->pixsize;
|
||||
dst->view_default = DRW_view_create(rv3d->viewmat, rv3d->winmat, NULL, NULL, NULL);
|
||||
dst->view_default = DRW_view_create(rv3d->viewmat, rv3d->winmat, nullptr, nullptr, nullptr);
|
||||
|
||||
if (dst->draw_ctx.sh_cfg == GPU_SHADER_CFG_CLIPPED) {
|
||||
int plane_len = (RV3D_LOCK_FLAGS(rv3d) & RV3D_BOXCLIP) ? 4 : 6;
|
||||
@@ -587,7 +589,7 @@ static void drw_manager_init(DRWManager *dst, GPUViewport *viewport, const int s
|
||||
}
|
||||
|
||||
dst->view_active = dst->view_default;
|
||||
dst->view_previous = NULL;
|
||||
dst->view_previous = nullptr;
|
||||
}
|
||||
else if (region) {
|
||||
View2D *v2d = ®ion->v2d;
|
||||
@@ -603,15 +605,15 @@ static void drw_manager_init(DRWManager *dst, GPUViewport *viewport, const int s
|
||||
winmat[3][0] = -1.0f;
|
||||
winmat[3][1] = -1.0f;
|
||||
|
||||
dst->view_default = DRW_view_create(viewmat, winmat, NULL, NULL, NULL);
|
||||
dst->view_default = DRW_view_create(viewmat, winmat, nullptr, nullptr, nullptr);
|
||||
dst->view_active = dst->view_default;
|
||||
dst->view_previous = NULL;
|
||||
dst->view_previous = nullptr;
|
||||
}
|
||||
else {
|
||||
dst->pixsize = 1.0f;
|
||||
dst->view_default = NULL;
|
||||
dst->view_active = NULL;
|
||||
dst->view_previous = NULL;
|
||||
dst->view_default = nullptr;
|
||||
dst->view_active = nullptr;
|
||||
dst->view_previous = nullptr;
|
||||
}
|
||||
|
||||
/* fclem: Is this still needed ? */
|
||||
@@ -619,16 +621,16 @@ static void drw_manager_init(DRWManager *dst, GPUViewport *viewport, const int s
|
||||
ED_view3d_init_mats_rv3d(dst->draw_ctx.object_edit, rv3d);
|
||||
}
|
||||
|
||||
if (G_draw.view_ubo == NULL) {
|
||||
G_draw.view_ubo = GPU_uniformbuf_create_ex(sizeof(ViewMatrices), NULL, "G_draw.view_ubo");
|
||||
if (G_draw.view_ubo == nullptr) {
|
||||
G_draw.view_ubo = GPU_uniformbuf_create_ex(sizeof(ViewMatrices), nullptr, "G_draw.view_ubo");
|
||||
}
|
||||
|
||||
if (G_draw.clipping_ubo == NULL) {
|
||||
if (G_draw.clipping_ubo == nullptr) {
|
||||
G_draw.clipping_ubo = GPU_uniformbuf_create_ex(
|
||||
sizeof(float4) * 6, NULL, "G_draw.clipping_ubo");
|
||||
sizeof(float4) * 6, nullptr, "G_draw.clipping_ubo");
|
||||
}
|
||||
|
||||
if (dst->draw_list == NULL) {
|
||||
if (dst->draw_list == nullptr) {
|
||||
dst->draw_list = GPU_draw_list_create(DRW_DRAWLIST_LEN);
|
||||
}
|
||||
|
||||
@@ -637,11 +639,11 @@ static void drw_manager_init(DRWManager *dst, GPUViewport *viewport, const int s
|
||||
|
||||
static void drw_manager_exit(DRWManager *dst)
|
||||
{
|
||||
if (dst->vmempool != NULL && dst->viewport == NULL) {
|
||||
if (dst->vmempool != nullptr && dst->viewport == nullptr) {
|
||||
DRW_viewport_data_free(dst->vmempool);
|
||||
}
|
||||
dst->vmempool = NULL;
|
||||
dst->viewport = NULL;
|
||||
dst->vmempool = nullptr;
|
||||
dst->viewport = nullptr;
|
||||
#ifdef DEBUG
|
||||
/* Avoid accidental reuse. */
|
||||
drw_state_ensure_not_reused(dst);
|
||||
@@ -687,7 +689,7 @@ static bool dupli_key_cmp(const void *key1, const void *key2)
|
||||
static void drw_duplidata_load(Object *ob)
|
||||
{
|
||||
DupliObject *dupli = DST.dupli_source;
|
||||
if (dupli == NULL) {
|
||||
if (dupli == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -700,11 +702,11 @@ static void drw_duplidata_load(Object *ob)
|
||||
return;
|
||||
}
|
||||
|
||||
if (DST.dupli_ghash == NULL) {
|
||||
if (DST.dupli_ghash == nullptr) {
|
||||
DST.dupli_ghash = BLI_ghash_new(dupli_key_hash, dupli_key_cmp, __func__);
|
||||
}
|
||||
|
||||
DupliKey *key = MEM_callocN(sizeof(DupliKey), __func__);
|
||||
DupliKey *key = static_cast<DupliKey *>(MEM_callocN(sizeof(DupliKey), __func__));
|
||||
key->ob = dupli->ob;
|
||||
key->ob_data = dupli->ob_data;
|
||||
|
||||
@@ -724,7 +726,7 @@ static void drw_duplidata_load(Object *ob)
|
||||
|
||||
static void duplidata_value_free(void *val)
|
||||
{
|
||||
void **dupli_datas = val;
|
||||
void **dupli_datas = static_cast<void **>(val);
|
||||
for (int i = 0; i < g_registered_engines.len; i++) {
|
||||
MEM_SAFE_FREE(dupli_datas[i]);
|
||||
}
|
||||
@@ -738,9 +740,9 @@ static void duplidata_key_free(void *key)
|
||||
drw_batch_cache_generate_requested(dupli_key->ob);
|
||||
}
|
||||
else {
|
||||
Object temp_object = *dupli_key->ob;
|
||||
Object temp_object = blender::dna::shallow_copy(*dupli_key->ob);
|
||||
/* Do not modify the original bound-box. */
|
||||
temp_object.runtime.bb = NULL;
|
||||
temp_object.runtime.bb = nullptr;
|
||||
BKE_object_replace_data_on_shallow_copy(&temp_object, dupli_key->ob_data);
|
||||
drw_batch_cache_generate_requested(&temp_object);
|
||||
MEM_SAFE_FREE(temp_object.runtime.bb);
|
||||
@@ -750,16 +752,16 @@ static void duplidata_key_free(void *key)
|
||||
|
||||
static void drw_duplidata_free(void)
|
||||
{
|
||||
if (DST.dupli_ghash != NULL) {
|
||||
if (DST.dupli_ghash != nullptr) {
|
||||
BLI_ghash_free(DST.dupli_ghash, duplidata_key_free, duplidata_value_free);
|
||||
DST.dupli_ghash = NULL;
|
||||
DST.dupli_ghash = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void **DRW_duplidata_get(void *vedata)
|
||||
{
|
||||
if (DST.dupli_source == NULL) {
|
||||
return NULL;
|
||||
if (DST.dupli_source == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
ViewportEngineData *ved = (ViewportEngineData *)vedata;
|
||||
DRWRegisteredDrawEngine *engine_type = (DRWRegisteredDrawEngine *)ved->engine_type;
|
||||
@@ -779,7 +781,7 @@ void *DRW_view_layer_engine_data_get(DrawEngineType *engine_type)
|
||||
return sled->storage;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void **DRW_view_layer_engine_data_ensure_ex(ViewLayer *view_layer,
|
||||
@@ -788,13 +790,16 @@ void **DRW_view_layer_engine_data_ensure_ex(ViewLayer *view_layer,
|
||||
{
|
||||
ViewLayerEngineData *sled;
|
||||
|
||||
for (sled = view_layer->drawdata.first; sled; sled = sled->next) {
|
||||
for (sled = static_cast<ViewLayerEngineData *>(view_layer->drawdata.first); sled;
|
||||
sled = sled->next)
|
||||
{
|
||||
if (sled->engine_type == engine_type) {
|
||||
return &sled->storage;
|
||||
}
|
||||
}
|
||||
|
||||
sled = MEM_callocN(sizeof(ViewLayerEngineData), "ViewLayerEngineData");
|
||||
sled = static_cast<ViewLayerEngineData *>(
|
||||
MEM_callocN(sizeof(ViewLayerEngineData), "ViewLayerEngineData"));
|
||||
sled->engine_type = engine_type;
|
||||
sled->free = callback;
|
||||
BLI_addtail(&view_layer->drawdata, sled);
|
||||
@@ -818,11 +823,11 @@ void **DRW_view_layer_engine_data_ensure(DrawEngineType *engine_type,
|
||||
* All ID-data-blocks which have their own 'local' DrawData
|
||||
* should have the same arrangement in their structs.
|
||||
*/
|
||||
typedef struct IdDdtTemplate {
|
||||
struct IdDdtTemplate {
|
||||
ID id;
|
||||
struct AnimData *adt;
|
||||
DrawDataList drawdata;
|
||||
} IdDdtTemplate;
|
||||
};
|
||||
|
||||
/* Check if ID can have AnimData */
|
||||
static bool id_type_can_have_drawdata(const short id_type)
|
||||
@@ -847,7 +852,7 @@ static bool id_type_can_have_drawdata(const short id_type)
|
||||
static bool id_can_have_drawdata(const ID *id)
|
||||
{
|
||||
/* sanity check */
|
||||
if (id == NULL) {
|
||||
if (id == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -865,15 +870,15 @@ DrawDataList *DRW_drawdatalist_from_id(ID *id)
|
||||
return &idt->drawdata;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
DrawData *DRW_drawdata_get(ID *id, DrawEngineType *engine_type)
|
||||
{
|
||||
DrawDataList *drawdata = DRW_drawdatalist_from_id(id);
|
||||
|
||||
if (drawdata == NULL) {
|
||||
return NULL;
|
||||
if (drawdata == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
LISTBASE_FOREACH (DrawData *, dd, drawdata) {
|
||||
@@ -881,7 +886,7 @@ DrawData *DRW_drawdata_get(ID *id, DrawEngineType *engine_type)
|
||||
return dd;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
DrawData *DRW_drawdata_ensure(ID *id,
|
||||
@@ -894,7 +899,7 @@ DrawData *DRW_drawdata_ensure(ID *id,
|
||||
BLI_assert(id_can_have_drawdata(id));
|
||||
/* Try to re-use existing data. */
|
||||
DrawData *dd = DRW_drawdata_get(id, engine_type);
|
||||
if (dd != NULL) {
|
||||
if (dd != nullptr) {
|
||||
return dd;
|
||||
}
|
||||
|
||||
@@ -903,25 +908,25 @@ DrawData *DRW_drawdata_ensure(ID *id,
|
||||
/* Allocate new data. */
|
||||
if ((GS(id->name) == ID_OB) && (((Object *)id)->base_flag & BASE_FROM_DUPLI) != 0) {
|
||||
/* NOTE: data is not persistent in this case. It is reset each redraw. */
|
||||
BLI_assert(free_cb == NULL); /* No callback allowed. */
|
||||
BLI_assert(free_cb == nullptr); /* No callback allowed. */
|
||||
/* Round to sizeof(float) for DRW_instance_data_request(). */
|
||||
const size_t t = sizeof(float) - 1;
|
||||
size = (size + t) & ~t;
|
||||
size_t fsize = size / sizeof(float);
|
||||
BLI_assert(fsize < MAX_INSTANCE_DATA_SIZE);
|
||||
if (DST.object_instance_data[fsize] == NULL) {
|
||||
if (DST.object_instance_data[fsize] == nullptr) {
|
||||
DST.object_instance_data[fsize] = DRW_instance_data_request(DST.vmempool->idatalist, fsize);
|
||||
}
|
||||
dd = (DrawData *)DRW_instance_data_next(DST.object_instance_data[fsize]);
|
||||
memset(dd, 0, size);
|
||||
}
|
||||
else {
|
||||
dd = MEM_callocN(size, "DrawData");
|
||||
dd = static_cast<DrawData *>(MEM_callocN(size, "DrawData"));
|
||||
}
|
||||
dd->engine_type = engine_type;
|
||||
dd->free = free_cb;
|
||||
/* Perform user-side initialization, if needed. */
|
||||
if (init_cb != NULL) {
|
||||
if (init_cb != nullptr) {
|
||||
init_cb(dd);
|
||||
}
|
||||
/* Register in the list. */
|
||||
@@ -933,12 +938,12 @@ void DRW_drawdata_free(ID *id)
|
||||
{
|
||||
DrawDataList *drawdata = DRW_drawdatalist_from_id(id);
|
||||
|
||||
if (drawdata == NULL) {
|
||||
if (drawdata == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
LISTBASE_FOREACH (DrawData *, dd, drawdata) {
|
||||
if (dd->free != NULL) {
|
||||
if (dd->free != nullptr) {
|
||||
dd->free(dd);
|
||||
}
|
||||
}
|
||||
@@ -952,7 +957,7 @@ static void drw_drawdata_unlink_dupli(ID *id)
|
||||
if ((GS(id->name) == ID_OB) && (((Object *)id)->base_flag & BASE_FROM_DUPLI) != 0) {
|
||||
DrawDataList *drawdata = DRW_drawdatalist_from_id(id);
|
||||
|
||||
if (drawdata == NULL) {
|
||||
if (drawdata == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -979,10 +984,14 @@ void DRW_cache_free_old_batches(Main *bmain)
|
||||
|
||||
lasttime = ctime;
|
||||
|
||||
for (scene = bmain->scenes.first; scene; scene = scene->id.next) {
|
||||
for (view_layer = scene->view_layers.first; view_layer; view_layer = view_layer->next) {
|
||||
for (scene = static_cast<Scene *>(bmain->scenes.first); scene;
|
||||
scene = static_cast<Scene *>(scene->id.next))
|
||||
{
|
||||
for (view_layer = static_cast<ViewLayer *>(scene->view_layers.first); view_layer;
|
||||
view_layer = view_layer->next)
|
||||
{
|
||||
Depsgraph *depsgraph = BKE_scene_get_depsgraph(scene, view_layer);
|
||||
if (depsgraph == NULL) {
|
||||
if (depsgraph == nullptr) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -1028,9 +1037,9 @@ static void drw_engines_cache_init(void)
|
||||
DRW_ENABLED_ENGINE_ITER (DST.view_data_active, engine, data) {
|
||||
if (data->text_draw_cache) {
|
||||
DRW_text_cache_destroy(data->text_draw_cache);
|
||||
data->text_draw_cache = NULL;
|
||||
data->text_draw_cache = nullptr;
|
||||
}
|
||||
if (DST.text_store_p == NULL) {
|
||||
if (DST.text_store_p == nullptr) {
|
||||
DST.text_store_p = &data->text_draw_cache;
|
||||
}
|
||||
|
||||
@@ -1042,7 +1051,7 @@ static void drw_engines_cache_init(void)
|
||||
|
||||
static void drw_engines_world_update(Scene *scene)
|
||||
{
|
||||
if (scene->world == NULL) {
|
||||
if (scene->world == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1140,7 +1149,7 @@ void DRW_draw_region_engine_info(int xoffset, int *yoffset, int line_height)
|
||||
UI_FontThemeColor(font_id, TH_TEXT_HI);
|
||||
|
||||
BLF_enable(font_id, BLF_SHADOW);
|
||||
BLF_shadow(font_id, 5, (const float[4]){0.0f, 0.0f, 0.0f, 1.0f});
|
||||
BLF_shadow(font_id, 5, blender::float4{0.0f, 0.0f, 0.0f, 1.0f});
|
||||
BLF_shadow_offset(font_id, 1, -1);
|
||||
|
||||
const char *buf_step = data->info;
|
||||
@@ -1179,7 +1188,7 @@ static void drw_engines_enable_from_engine(const RenderEngineType *engine_type,
|
||||
case OB_MATERIAL:
|
||||
case OB_RENDER:
|
||||
default:
|
||||
if (engine_type->draw_engine != NULL) {
|
||||
if (engine_type->draw_engine != nullptr) {
|
||||
use_drw_engine(engine_type->draw_engine);
|
||||
}
|
||||
else if ((engine_type->flag & RE_INTERNAL) == 0) {
|
||||
@@ -1263,12 +1272,12 @@ static bool is_compositor_enabled(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void drw_engines_enable(ViewLayer *UNUSED(view_layer),
|
||||
static void drw_engines_enable(ViewLayer * /*view_layer*/,
|
||||
RenderEngineType *engine_type,
|
||||
bool gpencil_engine_needed)
|
||||
{
|
||||
View3D *v3d = DST.draw_ctx.v3d;
|
||||
const eDrawType drawtype = v3d->shading.type;
|
||||
const eDrawType drawtype = eDrawType(v3d->shading.type);
|
||||
const bool use_xray = XRAY_ENABLED(v3d);
|
||||
|
||||
drw_engines_enable_from_engine(engine_type, drawtype);
|
||||
@@ -1325,7 +1334,7 @@ void DRW_notify_view_update(const DRWUpdateContext *update_ctx)
|
||||
RenderEngineType *engine_type = update_ctx->engine_type;
|
||||
ARegion *region = update_ctx->region;
|
||||
View3D *v3d = update_ctx->v3d;
|
||||
RegionView3D *rv3d = region->regiondata;
|
||||
RegionView3D *rv3d = static_cast<RegionView3D *>(region->regiondata);
|
||||
Depsgraph *depsgraph = update_ctx->depsgraph;
|
||||
Scene *scene = update_ctx->scene;
|
||||
ViewLayer *view_layer = update_ctx->view_layer;
|
||||
@@ -1346,17 +1355,16 @@ void DRW_notify_view_update(const DRWUpdateContext *update_ctx)
|
||||
drw_state_prepare_clean_for_draw(&DST);
|
||||
|
||||
BKE_view_layer_synced_ensure(scene, view_layer);
|
||||
DST.draw_ctx = (DRWContextState){
|
||||
.region = region,
|
||||
.rv3d = rv3d,
|
||||
.v3d = v3d,
|
||||
.scene = scene,
|
||||
.view_layer = view_layer,
|
||||
.obact = BKE_view_layer_active_object_get(view_layer),
|
||||
.engine_type = engine_type,
|
||||
.depsgraph = depsgraph,
|
||||
.object_mode = OB_MODE_OBJECT,
|
||||
};
|
||||
DST.draw_ctx = {};
|
||||
DST.draw_ctx.region = region;
|
||||
DST.draw_ctx.rv3d = rv3d;
|
||||
DST.draw_ctx.v3d = v3d;
|
||||
DST.draw_ctx.scene = scene;
|
||||
DST.draw_ctx.view_layer = view_layer;
|
||||
DST.draw_ctx.obact = BKE_view_layer_active_object_get(view_layer);
|
||||
DST.draw_ctx.engine_type = engine_type;
|
||||
DST.draw_ctx.depsgraph = depsgraph;
|
||||
DST.draw_ctx.object_mode = OB_MODE_OBJECT;
|
||||
|
||||
/* Custom lightweight initialize to avoid resetting the memory-pools. */
|
||||
DST.viewport = viewport;
|
||||
@@ -1392,7 +1400,7 @@ static void drw_notify_view_update_offscreen(Depsgraph *depsgraph,
|
||||
|
||||
Scene *scene = DEG_get_evaluated_scene(depsgraph);
|
||||
ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
|
||||
RegionView3D *rv3d = region->regiondata;
|
||||
RegionView3D *rv3d = static_cast<RegionView3D *>(region->regiondata);
|
||||
|
||||
const bool gpencil_engine_needed = drw_gpencil_engine_needed(depsgraph, v3d);
|
||||
|
||||
@@ -1400,16 +1408,15 @@ static void drw_notify_view_update_offscreen(Depsgraph *depsgraph,
|
||||
drw_state_prepare_clean_for_draw(&DST);
|
||||
|
||||
BKE_view_layer_synced_ensure(scene, view_layer);
|
||||
DST.draw_ctx = (DRWContextState){
|
||||
.region = region,
|
||||
.rv3d = rv3d,
|
||||
.v3d = v3d,
|
||||
.scene = scene,
|
||||
.view_layer = view_layer,
|
||||
.obact = BKE_view_layer_active_object_get(view_layer),
|
||||
.engine_type = engine_type,
|
||||
.depsgraph = depsgraph,
|
||||
};
|
||||
DST.draw_ctx = {};
|
||||
DST.draw_ctx.region = region;
|
||||
DST.draw_ctx.rv3d = rv3d;
|
||||
DST.draw_ctx.v3d = v3d;
|
||||
DST.draw_ctx.scene = scene;
|
||||
DST.draw_ctx.view_layer = view_layer;
|
||||
DST.draw_ctx.obact = BKE_view_layer_active_object_get(view_layer);
|
||||
DST.draw_ctx.engine_type = engine_type;
|
||||
DST.draw_ctx.depsgraph = depsgraph;
|
||||
|
||||
/* Custom lightweight initialize to avoid resetting the memory-pools. */
|
||||
DST.viewport = viewport;
|
||||
@@ -1603,7 +1610,7 @@ void DRW_draw_callbacks_post_scene(void)
|
||||
struct DRWTextStore *DRW_text_cache_ensure(void)
|
||||
{
|
||||
BLI_assert(DST.text_store_p);
|
||||
if (*DST.text_store_p == NULL) {
|
||||
if (*DST.text_store_p == nullptr) {
|
||||
*DST.text_store_p = DRW_text_cache_create();
|
||||
}
|
||||
return *DST.text_store_p;
|
||||
@@ -1651,27 +1658,27 @@ void DRW_draw_render_loop_ex(Depsgraph *depsgraph,
|
||||
{
|
||||
Scene *scene = DEG_get_evaluated_scene(depsgraph);
|
||||
ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
|
||||
RegionView3D *rv3d = region->regiondata;
|
||||
RegionView3D *rv3d = static_cast<RegionView3D *>(region->regiondata);
|
||||
|
||||
BKE_view_layer_synced_ensure(scene, view_layer);
|
||||
DST.draw_ctx.evil_C = evil_C;
|
||||
DST.draw_ctx = (DRWContextState){
|
||||
.region = region,
|
||||
.rv3d = rv3d,
|
||||
.v3d = v3d,
|
||||
.scene = scene,
|
||||
.view_layer = view_layer,
|
||||
.obact = BKE_view_layer_active_object_get(view_layer),
|
||||
.engine_type = engine_type,
|
||||
.depsgraph = depsgraph,
|
||||
DST.draw_ctx = {};
|
||||
DST.draw_ctx.region = region;
|
||||
DST.draw_ctx.rv3d = rv3d;
|
||||
DST.draw_ctx.v3d = v3d;
|
||||
DST.draw_ctx.scene = scene;
|
||||
DST.draw_ctx.view_layer = view_layer;
|
||||
DST.draw_ctx.obact = BKE_view_layer_active_object_get(view_layer);
|
||||
DST.draw_ctx.engine_type = engine_type;
|
||||
DST.draw_ctx.depsgraph = depsgraph;
|
||||
|
||||
/* reuse if caller sets */
|
||||
DST.draw_ctx.evil_C = DST.draw_ctx.evil_C;
|
||||
|
||||
/* reuse if caller sets */
|
||||
.evil_C = DST.draw_ctx.evil_C,
|
||||
};
|
||||
drw_task_graph_init();
|
||||
drw_context_state_init();
|
||||
|
||||
drw_manager_init(&DST, viewport, NULL);
|
||||
drw_manager_init(&DST, viewport, nullptr);
|
||||
DRW_viewport_colormanagement_set(viewport);
|
||||
|
||||
const int object_type_exclude_viewport = v3d->object_type_exclude_viewport;
|
||||
@@ -1710,8 +1717,8 @@ void DRW_draw_render_loop_ex(Depsgraph *depsgraph,
|
||||
|
||||
/* Only iterate over objects for internal engines or when overlays are enabled */
|
||||
if (do_populate_loop) {
|
||||
DST.dupli_origin = NULL;
|
||||
DST.dupli_origin_data = NULL;
|
||||
DST.dupli_origin = nullptr;
|
||||
DST.dupli_origin_data = nullptr;
|
||||
DEGObjectIterSettings deg_iter_settings = {0};
|
||||
deg_iter_settings.depsgraph = depsgraph;
|
||||
deg_iter_settings.flags = DEG_OBJECT_ITER_FOR_RENDER_ENGINE_FLAGS;
|
||||
@@ -1798,7 +1805,7 @@ void DRW_draw_render_loop(Depsgraph *depsgraph,
|
||||
Scene *scene = DEG_get_evaluated_scene(depsgraph);
|
||||
RenderEngineType *engine_type = ED_view3d_engine_type(scene, v3d->shading.type);
|
||||
|
||||
DRW_draw_render_loop_ex(depsgraph, engine_type, region, v3d, viewport, NULL);
|
||||
DRW_draw_render_loop_ex(depsgraph, engine_type, region, v3d, viewport, nullptr);
|
||||
}
|
||||
|
||||
void DRW_draw_render_loop_offscreen(Depsgraph *depsgraph,
|
||||
@@ -1815,7 +1822,7 @@ void DRW_draw_render_loop_offscreen(Depsgraph *depsgraph,
|
||||
|
||||
/* Create temporary viewport if needed or update the existing viewport. */
|
||||
GPUViewport *render_viewport = viewport;
|
||||
if (viewport == NULL) {
|
||||
if (viewport == nullptr) {
|
||||
render_viewport = GPU_viewport_create();
|
||||
}
|
||||
else {
|
||||
@@ -1831,7 +1838,7 @@ void DRW_draw_render_loop_offscreen(Depsgraph *depsgraph,
|
||||
drw_state_prepare_clean_for_draw(&DST);
|
||||
DST.options.is_image_render = is_image_render;
|
||||
DST.options.draw_background = draw_background;
|
||||
DRW_draw_render_loop_ex(depsgraph, engine_type, region, v3d, render_viewport, NULL);
|
||||
DRW_draw_render_loop_ex(depsgraph, engine_type, region, v3d, render_viewport, nullptr);
|
||||
|
||||
if (draw_background) {
|
||||
/* HACK(@fclem): In this case we need to make sure the final alpha is 1.
|
||||
@@ -1859,14 +1866,14 @@ void DRW_draw_render_loop_offscreen(Depsgraph *depsgraph,
|
||||
}
|
||||
|
||||
/* Free temporary viewport. */
|
||||
if (viewport == NULL) {
|
||||
if (viewport == nullptr) {
|
||||
GPU_viewport_free(render_viewport);
|
||||
}
|
||||
}
|
||||
|
||||
bool DRW_render_check_grease_pencil(Depsgraph *depsgraph)
|
||||
{
|
||||
if (!drw_gpencil_engine_needed(depsgraph, NULL)) {
|
||||
if (!drw_gpencil_engine_needed(depsgraph, nullptr)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -1921,18 +1928,18 @@ void DRW_render_gpencil(RenderEngine *engine, Depsgraph *depsgraph)
|
||||
DST.options.draw_background = scene->r.alphamode == R_ADDSKY;
|
||||
DST.buffer_finish_called = true;
|
||||
|
||||
DST.draw_ctx = (DRWContextState){
|
||||
.scene = scene,
|
||||
.view_layer = view_layer,
|
||||
.engine_type = engine_type,
|
||||
.depsgraph = depsgraph,
|
||||
.object_mode = OB_MODE_OBJECT,
|
||||
};
|
||||
DST.draw_ctx = {};
|
||||
DST.draw_ctx.scene = scene;
|
||||
DST.draw_ctx.view_layer = view_layer;
|
||||
DST.draw_ctx.engine_type = engine_type;
|
||||
DST.draw_ctx.depsgraph = depsgraph;
|
||||
DST.draw_ctx.object_mode = OB_MODE_OBJECT;
|
||||
|
||||
drw_context_state_init();
|
||||
|
||||
const int size[2] = {engine->resolution_x, engine->resolution_y};
|
||||
|
||||
drw_manager_init(&DST, NULL, size);
|
||||
drw_manager_init(&DST, nullptr, size);
|
||||
|
||||
/* Main rendering. */
|
||||
rctf view_rect;
|
||||
@@ -1944,7 +1951,8 @@ void DRW_render_gpencil(RenderEngine *engine, Depsgraph *depsgraph)
|
||||
|
||||
RenderResult *render_result = RE_engine_get_result(engine);
|
||||
RenderLayer *render_layer = RE_GetRenderLayer(render_result, view_layer->name);
|
||||
for (RenderView *render_view = render_result->views.first; render_view != NULL;
|
||||
for (RenderView *render_view = static_cast<RenderView *>(render_result->views.first);
|
||||
render_view != nullptr;
|
||||
render_view = render_view->next)
|
||||
{
|
||||
RE_SetActiveRenderView(render, render_view->name);
|
||||
@@ -1984,13 +1992,13 @@ void DRW_render_to_image(RenderEngine *engine, Depsgraph *depsgraph)
|
||||
DST.options.is_image_render = true;
|
||||
DST.options.is_scene_render = true;
|
||||
DST.options.draw_background = scene->r.alphamode == R_ADDSKY;
|
||||
DST.draw_ctx = (DRWContextState){
|
||||
.scene = scene,
|
||||
.view_layer = view_layer,
|
||||
.engine_type = engine_type,
|
||||
.depsgraph = depsgraph,
|
||||
.object_mode = OB_MODE_OBJECT,
|
||||
};
|
||||
DST.draw_ctx = {};
|
||||
DST.draw_ctx.scene = scene;
|
||||
DST.draw_ctx.view_layer = view_layer;
|
||||
DST.draw_ctx.engine_type = engine_type;
|
||||
DST.draw_ctx.depsgraph = depsgraph;
|
||||
DST.draw_ctx.object_mode = OB_MODE_OBJECT;
|
||||
|
||||
drw_context_state_init();
|
||||
|
||||
/* Begin GPU workload Boundary */
|
||||
@@ -1998,7 +2006,7 @@ void DRW_render_to_image(RenderEngine *engine, Depsgraph *depsgraph)
|
||||
|
||||
const int size[2] = {engine->resolution_x, engine->resolution_y};
|
||||
|
||||
drw_manager_init(&DST, NULL, size);
|
||||
drw_manager_init(&DST, nullptr, size);
|
||||
|
||||
ViewportEngineData *data = DRW_view_data_engine_data_get_ensure(DST.view_data_active,
|
||||
draw_engine_type);
|
||||
@@ -2024,9 +2032,10 @@ void DRW_render_to_image(RenderEngine *engine, Depsgraph *depsgraph)
|
||||
size[0],
|
||||
size[1],
|
||||
view_layer->name,
|
||||
/*RR_ALL_VIEWS*/ NULL);
|
||||
RenderLayer *render_layer = render_result->layers.first;
|
||||
for (RenderView *render_view = render_result->views.first; render_view != NULL;
|
||||
/*RR_ALL_VIEWS*/ nullptr);
|
||||
RenderLayer *render_layer = static_cast<RenderLayer *>(render_result->layers.first);
|
||||
for (RenderView *render_view = static_cast<RenderView *>(render_result->views.first);
|
||||
render_view != nullptr;
|
||||
render_view = render_view->next)
|
||||
{
|
||||
RE_SetActiveRenderView(render, render_view->name);
|
||||
@@ -2072,8 +2081,8 @@ void DRW_render_object_iter(
|
||||
const int object_type_exclude_viewport = draw_ctx->v3d ?
|
||||
draw_ctx->v3d->object_type_exclude_viewport :
|
||||
0;
|
||||
DST.dupli_origin = NULL;
|
||||
DST.dupli_origin_data = NULL;
|
||||
DST.dupli_origin = nullptr;
|
||||
DST.dupli_origin_data = nullptr;
|
||||
DEGObjectIterSettings deg_iter_settings = {0};
|
||||
deg_iter_settings.depsgraph = depsgraph;
|
||||
deg_iter_settings.flags = DEG_OBJECT_ITER_FOR_RENDER_ENGINE_FLAGS;
|
||||
@@ -2110,16 +2119,16 @@ void DRW_custom_pipeline_begin(DrawEngineType *draw_engine_type, Depsgraph *deps
|
||||
DST.options.is_scene_render = true;
|
||||
DST.options.draw_background = false;
|
||||
|
||||
DST.draw_ctx = (DRWContextState){
|
||||
.scene = scene,
|
||||
.view_layer = view_layer,
|
||||
.engine_type = NULL,
|
||||
.depsgraph = depsgraph,
|
||||
.object_mode = OB_MODE_OBJECT,
|
||||
};
|
||||
DST.draw_ctx = {};
|
||||
DST.draw_ctx.scene = scene;
|
||||
DST.draw_ctx.view_layer = view_layer;
|
||||
DST.draw_ctx.engine_type = nullptr;
|
||||
DST.draw_ctx.depsgraph = depsgraph;
|
||||
DST.draw_ctx.object_mode = OB_MODE_OBJECT;
|
||||
|
||||
drw_context_state_init();
|
||||
|
||||
drw_manager_init(&DST, NULL, NULL);
|
||||
drw_manager_init(&DST, nullptr, nullptr);
|
||||
|
||||
DRW_pointcloud_init();
|
||||
DRW_curves_init(DST.vmempool);
|
||||
@@ -2168,7 +2177,7 @@ void DRW_cache_restart(void)
|
||||
{
|
||||
DRW_smoke_exit(DST.vmempool);
|
||||
|
||||
drw_manager_init(&DST, DST.viewport, (int[2]){UNPACK2(DST.size)});
|
||||
drw_manager_init(&DST, DST.viewport, blender::int2{int(DST.size[0]), int(DST.size[1])});
|
||||
|
||||
DST.buffer_finish_called = false;
|
||||
|
||||
@@ -2188,20 +2197,19 @@ void DRW_draw_render_loop_2d_ex(Depsgraph *depsgraph,
|
||||
|
||||
BKE_view_layer_synced_ensure(scene, view_layer);
|
||||
DST.draw_ctx.evil_C = evil_C;
|
||||
DST.draw_ctx = (DRWContextState){
|
||||
.region = region,
|
||||
.scene = scene,
|
||||
.view_layer = view_layer,
|
||||
.obact = BKE_view_layer_active_object_get(view_layer),
|
||||
.depsgraph = depsgraph,
|
||||
.space_data = CTX_wm_space_data(evil_C),
|
||||
DST.draw_ctx = {};
|
||||
DST.draw_ctx.region = region;
|
||||
DST.draw_ctx.scene = scene;
|
||||
DST.draw_ctx.view_layer = view_layer;
|
||||
DST.draw_ctx.obact = BKE_view_layer_active_object_get(view_layer);
|
||||
DST.draw_ctx.depsgraph = depsgraph;
|
||||
DST.draw_ctx.space_data = CTX_wm_space_data(evil_C);
|
||||
|
||||
/* reuse if caller sets */
|
||||
.evil_C = DST.draw_ctx.evil_C,
|
||||
};
|
||||
/* reuse if caller sets */
|
||||
DST.draw_ctx.evil_C = DST.draw_ctx.evil_C;
|
||||
|
||||
drw_context_state_init();
|
||||
drw_manager_init(&DST, viewport, NULL);
|
||||
drw_manager_init(&DST, viewport, nullptr);
|
||||
DRW_viewport_colormanagement_set(viewport);
|
||||
|
||||
/* TODO(jbakker): Only populate when editor needs to draw object.
|
||||
@@ -2339,31 +2347,31 @@ void DRW_draw_render_loop_2d_ex(Depsgraph *depsgraph,
|
||||
static struct DRWSelectBuffer {
|
||||
GPUFrameBuffer *framebuffer_depth_only;
|
||||
GPUTexture *texture_depth;
|
||||
} g_select_buffer = {NULL};
|
||||
} g_select_buffer = {nullptr};
|
||||
|
||||
static void draw_select_framebuffer_depth_only_setup(const int size[2])
|
||||
{
|
||||
if (g_select_buffer.framebuffer_depth_only == NULL) {
|
||||
if (g_select_buffer.framebuffer_depth_only == nullptr) {
|
||||
g_select_buffer.framebuffer_depth_only = GPU_framebuffer_create("framebuffer_depth_only");
|
||||
}
|
||||
|
||||
if ((g_select_buffer.texture_depth != NULL) &&
|
||||
if ((g_select_buffer.texture_depth != nullptr) &&
|
||||
((GPU_texture_width(g_select_buffer.texture_depth) != size[0]) ||
|
||||
(GPU_texture_height(g_select_buffer.texture_depth) != size[1])))
|
||||
{
|
||||
GPU_texture_free(g_select_buffer.texture_depth);
|
||||
g_select_buffer.texture_depth = NULL;
|
||||
g_select_buffer.texture_depth = nullptr;
|
||||
}
|
||||
|
||||
if (g_select_buffer.texture_depth == NULL) {
|
||||
if (g_select_buffer.texture_depth == nullptr) {
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT;
|
||||
g_select_buffer.texture_depth = GPU_texture_create_2d(
|
||||
"select_depth", size[0], size[1], 1, GPU_DEPTH_COMPONENT24, usage, NULL);
|
||||
"select_depth", size[0], size[1], 1, GPU_DEPTH_COMPONENT24, usage, nullptr);
|
||||
|
||||
GPU_framebuffer_texture_attach(
|
||||
g_select_buffer.framebuffer_depth_only, g_select_buffer.texture_depth, 0, 0);
|
||||
|
||||
GPU_framebuffer_check_valid(g_select_buffer.framebuffer_depth_only, NULL);
|
||||
GPU_framebuffer_check_valid(g_select_buffer.framebuffer_depth_only, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2387,7 +2395,7 @@ void DRW_draw_select_loop(Depsgraph *depsgraph,
|
||||
View3D *v3d,
|
||||
bool use_obedit_skip,
|
||||
bool draw_surface,
|
||||
bool UNUSED(use_nearest),
|
||||
bool /*use_nearest*/,
|
||||
const bool do_material_sub_selection,
|
||||
const rcti *rect,
|
||||
DRW_SelectPassFn select_pass_fn,
|
||||
@@ -2401,11 +2409,11 @@ void DRW_draw_select_loop(Depsgraph *depsgraph,
|
||||
|
||||
BKE_view_layer_synced_ensure(scene, view_layer);
|
||||
Object *obact = BKE_view_layer_active_object_get(view_layer);
|
||||
Object *obedit = use_obedit_skip ? NULL : OBEDIT_FROM_OBACT(obact);
|
||||
Object *obedit = use_obedit_skip ? nullptr : OBEDIT_FROM_OBACT(obact);
|
||||
#ifndef USE_GPU_SELECT
|
||||
UNUSED_VARS(scene, view_layer, v3d, region, rect);
|
||||
#else
|
||||
RegionView3D *rv3d = region->regiondata;
|
||||
RegionView3D *rv3d = static_cast<RegionView3D *>(region->regiondata);
|
||||
|
||||
/* Reset before using it. */
|
||||
drw_state_prepare_clean_for_draw(&DST);
|
||||
@@ -2416,9 +2424,9 @@ void DRW_draw_select_loop(Depsgraph *depsgraph,
|
||||
/* object_mode is used for filtering objects in the depsgraph */
|
||||
eObjectMode object_mode;
|
||||
int object_type = 0;
|
||||
if (obedit != NULL) {
|
||||
if (obedit != nullptr) {
|
||||
object_type = obedit->type;
|
||||
object_mode = obedit->mode;
|
||||
object_mode = eObjectMode(obedit->mode);
|
||||
if (obedit->type == OB_MBALL) {
|
||||
use_obedit = true;
|
||||
// obedit_ctx_mode = CTX_MODE_EDIT_METABALL;
|
||||
@@ -2432,7 +2440,7 @@ void DRW_draw_select_loop(Depsgraph *depsgraph,
|
||||
if (!(v3d->flag2 & V3D_HIDE_OVERLAYS)) {
|
||||
/* NOTE: don't use "BKE_object_pose_armature_get" here, it breaks selection. */
|
||||
Object *obpose = OBPOSE_FROM_OBACT(obact);
|
||||
if (obpose == NULL) {
|
||||
if (obpose == nullptr) {
|
||||
Object *obweight = OBWEIGHTPAINT_FROM_OBACT(obact);
|
||||
if (obweight) {
|
||||
/* Only use Armature pose selection, when connected armature is in pose mode. */
|
||||
@@ -2446,27 +2454,27 @@ void DRW_draw_select_loop(Depsgraph *depsgraph,
|
||||
if (obpose) {
|
||||
use_obedit = true;
|
||||
object_type = obpose->type;
|
||||
object_mode = obpose->mode;
|
||||
object_mode = eObjectMode(obpose->mode);
|
||||
// obedit_ctx_mode = CTX_MODE_POSE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Instead of 'DRW_context_state_init(C, &DST.draw_ctx)', assign from args */
|
||||
DST.draw_ctx = (DRWContextState){
|
||||
.region = region,
|
||||
.rv3d = rv3d,
|
||||
.v3d = v3d,
|
||||
.scene = scene,
|
||||
.view_layer = view_layer,
|
||||
.obact = obact,
|
||||
.engine_type = engine_type,
|
||||
.depsgraph = depsgraph,
|
||||
};
|
||||
DST.draw_ctx = {};
|
||||
DST.draw_ctx.region = region;
|
||||
DST.draw_ctx.rv3d = rv3d;
|
||||
DST.draw_ctx.v3d = v3d;
|
||||
DST.draw_ctx.scene = scene;
|
||||
DST.draw_ctx.view_layer = view_layer;
|
||||
DST.draw_ctx.obact = obact;
|
||||
DST.draw_ctx.engine_type = engine_type;
|
||||
DST.draw_ctx.depsgraph = depsgraph;
|
||||
|
||||
drw_context_state_init();
|
||||
|
||||
const int viewport_size[2] = {BLI_rcti_size_x(rect), BLI_rcti_size_y(rect)};
|
||||
drw_manager_init(&DST, NULL, viewport_size);
|
||||
drw_manager_init(&DST, nullptr, viewport_size);
|
||||
|
||||
DST.options.is_select = true;
|
||||
DST.options.is_material_select = do_material_sub_selection;
|
||||
@@ -2523,13 +2531,13 @@ void DRW_draw_select_loop(Depsgraph *depsgraph,
|
||||
else {
|
||||
/* When selecting pose-bones in pose mode, check for visibility not select-ability
|
||||
* as pose-bones have their own selection restriction flag. */
|
||||
const bool use_pose_exception = (DST.draw_ctx.object_pose != NULL);
|
||||
const bool use_pose_exception = (DST.draw_ctx.object_pose != nullptr);
|
||||
|
||||
const int object_type_exclude_select = (v3d->object_type_exclude_viewport |
|
||||
v3d->object_type_exclude_select);
|
||||
bool filter_exclude = false;
|
||||
DST.dupli_origin = NULL;
|
||||
DST.dupli_origin_data = NULL;
|
||||
DST.dupli_origin = nullptr;
|
||||
DST.dupli_origin_data = nullptr;
|
||||
DEGObjectIterSettings deg_iter_settings = {0};
|
||||
deg_iter_settings.depsgraph = depsgraph;
|
||||
deg_iter_settings.flags = DEG_OBJECT_ITER_FOR_RENDER_ENGINE_FLAGS;
|
||||
@@ -2553,7 +2561,7 @@ void DRW_draw_select_loop(Depsgraph *depsgraph,
|
||||
}
|
||||
|
||||
if ((object_type_exclude_select & (1 << ob->type)) == 0) {
|
||||
if (object_filter_fn != NULL) {
|
||||
if (object_filter_fn != nullptr) {
|
||||
if (ob->base_flag & BASE_FROM_DUPLI) {
|
||||
/* pass (use previous filter_exclude value) */
|
||||
}
|
||||
@@ -2587,7 +2595,7 @@ void DRW_draw_select_loop(Depsgraph *depsgraph,
|
||||
GPU_framebuffer_bind(g_select_buffer.framebuffer_depth_only);
|
||||
GPU_framebuffer_clear_depth(g_select_buffer.framebuffer_depth_only, 1.0f);
|
||||
/* WORKAROUND: Needed for Select-Next for keeping the same code-flow as Overlay-Next. */
|
||||
BLI_assert(DRW_viewport_texture_list_get()->depth == NULL);
|
||||
BLI_assert(DRW_viewport_texture_list_get()->depth == nullptr);
|
||||
DRW_viewport_texture_list_get()->depth = g_select_buffer.texture_depth;
|
||||
|
||||
/* Start Drawing */
|
||||
@@ -2608,7 +2616,7 @@ void DRW_draw_select_loop(Depsgraph *depsgraph,
|
||||
drw_engines_draw_scene();
|
||||
|
||||
if (!U.experimental.enable_overlay_next) {
|
||||
DRW_state_lock(0);
|
||||
DRW_state_lock(DRWState(0));
|
||||
}
|
||||
|
||||
if (!select_pass_fn(DRW_SELECT_PASS_POST, select_pass_user_data)) {
|
||||
@@ -2619,7 +2627,7 @@ void DRW_draw_select_loop(Depsgraph *depsgraph,
|
||||
DRW_smoke_exit(DST.vmempool);
|
||||
|
||||
/* WORKAROUND: Do not leave ownership to the viewport list. */
|
||||
DRW_viewport_texture_list_get()->depth = NULL;
|
||||
DRW_viewport_texture_list_get()->depth = nullptr;
|
||||
|
||||
DRW_state_reset();
|
||||
drw_engines_disable();
|
||||
@@ -2642,7 +2650,7 @@ void DRW_draw_depth_loop(Depsgraph *depsgraph,
|
||||
Scene *scene = DEG_get_evaluated_scene(depsgraph);
|
||||
RenderEngineType *engine_type = ED_view3d_engine_type(scene, v3d->shading.type);
|
||||
ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
|
||||
RegionView3D *rv3d = region->regiondata;
|
||||
RegionView3D *rv3d = static_cast<RegionView3D *>(region->regiondata);
|
||||
|
||||
/* Reset before using it. */
|
||||
drw_state_prepare_clean_for_draw(&DST);
|
||||
@@ -2651,18 +2659,18 @@ void DRW_draw_depth_loop(Depsgraph *depsgraph,
|
||||
|
||||
/* Instead of 'DRW_context_state_init(C, &DST.draw_ctx)', assign from args */
|
||||
BKE_view_layer_synced_ensure(scene, view_layer);
|
||||
DST.draw_ctx = (DRWContextState){
|
||||
.region = region,
|
||||
.rv3d = rv3d,
|
||||
.v3d = v3d,
|
||||
.scene = scene,
|
||||
.view_layer = view_layer,
|
||||
.obact = BKE_view_layer_active_object_get(view_layer),
|
||||
.engine_type = engine_type,
|
||||
.depsgraph = depsgraph,
|
||||
};
|
||||
DST.draw_ctx = {};
|
||||
DST.draw_ctx.region = region;
|
||||
DST.draw_ctx.rv3d = rv3d;
|
||||
DST.draw_ctx.v3d = v3d;
|
||||
DST.draw_ctx.scene = scene;
|
||||
DST.draw_ctx.view_layer = view_layer;
|
||||
DST.draw_ctx.obact = BKE_view_layer_active_object_get(view_layer);
|
||||
DST.draw_ctx.engine_type = engine_type;
|
||||
DST.draw_ctx.depsgraph = depsgraph;
|
||||
|
||||
drw_context_state_init();
|
||||
drw_manager_init(&DST, viewport, NULL);
|
||||
drw_manager_init(&DST, viewport, nullptr);
|
||||
|
||||
if (use_gpencil) {
|
||||
use_drw_engine(U.experimental.use_grease_pencil_version3 ? &draw_engine_gpencil_next_type :
|
||||
@@ -2680,7 +2688,7 @@ void DRW_draw_depth_loop(Depsgraph *depsgraph,
|
||||
/* Setup frame-buffer. */
|
||||
GPUTexture *depth_tx = GPU_viewport_depth_texture(viewport);
|
||||
|
||||
GPUFrameBuffer *depth_fb = NULL;
|
||||
GPUFrameBuffer *depth_fb = nullptr;
|
||||
GPU_framebuffer_ensure_config(&depth_fb,
|
||||
{
|
||||
GPU_ATTACHMENT_TEXTURE(depth_tx),
|
||||
@@ -2705,8 +2713,8 @@ void DRW_draw_depth_loop(Depsgraph *depsgraph,
|
||||
drw_engines_world_update(DST.draw_ctx.scene);
|
||||
|
||||
const int object_type_exclude_viewport = v3d->object_type_exclude_viewport;
|
||||
DST.dupli_origin = NULL;
|
||||
DST.dupli_origin_data = NULL;
|
||||
DST.dupli_origin = nullptr;
|
||||
DST.dupli_origin_data = nullptr;
|
||||
DEGObjectIterSettings deg_iter_settings = {0};
|
||||
deg_iter_settings.depsgraph = DST.draw_ctx.depsgraph;
|
||||
deg_iter_settings.flags = DEG_OBJECT_ITER_FOR_RENDER_ENGINE_FLAGS;
|
||||
@@ -2776,19 +2784,19 @@ void DRW_draw_select_id(Depsgraph *depsgraph, ARegion *region, View3D *v3d, cons
|
||||
|
||||
/* Instead of 'DRW_context_state_init(C, &DST.draw_ctx)', assign from args */
|
||||
BKE_view_layer_synced_ensure(scene, view_layer);
|
||||
DST.draw_ctx = (DRWContextState){
|
||||
.region = region,
|
||||
.rv3d = region->regiondata,
|
||||
.v3d = v3d,
|
||||
.scene = scene,
|
||||
.view_layer = view_layer,
|
||||
.obact = BKE_view_layer_active_object_get(view_layer),
|
||||
.depsgraph = depsgraph,
|
||||
};
|
||||
DST.draw_ctx = {};
|
||||
DST.draw_ctx.region = region;
|
||||
DST.draw_ctx.rv3d = static_cast<RegionView3D *>(region->regiondata);
|
||||
DST.draw_ctx.v3d = v3d;
|
||||
DST.draw_ctx.scene = scene;
|
||||
DST.draw_ctx.view_layer = view_layer;
|
||||
DST.draw_ctx.obact = BKE_view_layer_active_object_get(view_layer);
|
||||
DST.draw_ctx.depsgraph = depsgraph;
|
||||
|
||||
drw_task_graph_init();
|
||||
drw_context_state_init();
|
||||
|
||||
drw_manager_init(&DST, viewport, NULL);
|
||||
drw_manager_init(&DST, viewport, nullptr);
|
||||
|
||||
/* Update UBO's */
|
||||
UI_SetTheme(SPACE_VIEW3D, RGN_TYPE_WINDOW);
|
||||
@@ -2855,7 +2863,7 @@ void DRW_draw_select_id(Depsgraph *depsgraph, ARegion *region, View3D *v3d, cons
|
||||
void DRW_draw_depth_object(
|
||||
Scene *scene, ARegion *region, View3D *v3d, GPUViewport *viewport, Object *object)
|
||||
{
|
||||
RegionView3D *rv3d = region->regiondata;
|
||||
RegionView3D *rv3d = static_cast<RegionView3D *>(region->regiondata);
|
||||
|
||||
GPU_matrix_projection_set(rv3d->winmat);
|
||||
GPU_matrix_set(rv3d->viewmat);
|
||||
@@ -2864,7 +2872,7 @@ void DRW_draw_depth_object(
|
||||
/* Setup frame-buffer. */
|
||||
GPUTexture *depth_tx = GPU_viewport_depth_texture(viewport);
|
||||
|
||||
GPUFrameBuffer *depth_fb = NULL;
|
||||
GPUFrameBuffer *depth_fb = nullptr;
|
||||
GPU_framebuffer_ensure_config(&depth_fb,
|
||||
{
|
||||
GPU_ATTACHMENT_TEXTURE(depth_tx),
|
||||
@@ -2883,7 +2891,7 @@ void DRW_draw_depth_object(
|
||||
for (int i = 0; i < 6; i++) {
|
||||
copy_v4_v4(planes.world[i], rv3d->clip_local[i]);
|
||||
}
|
||||
copy_m4_m4(planes.ClipModelMatrix, object->object_to_world);
|
||||
copy_m4_m4(planes.ClipModelMatrix.ptr(), object->object_to_world);
|
||||
}
|
||||
|
||||
drw_batch_cache_validate(object);
|
||||
@@ -2892,7 +2900,7 @@ void DRW_draw_depth_object(
|
||||
case OB_MESH: {
|
||||
GPUBatch *batch;
|
||||
|
||||
Mesh *me = object->data;
|
||||
Mesh *me = static_cast<Mesh *>(object->data);
|
||||
|
||||
if (object->mode & OB_MODE_EDIT) {
|
||||
batch = DRW_mesh_batch_cache_get_edit_triangles(me);
|
||||
@@ -2909,7 +2917,7 @@ void DRW_draw_depth_object(
|
||||
GPU_SHADER_CFG_DEFAULT;
|
||||
GPU_batch_program_set_builtin_with_config(batch, GPU_SHADER_3D_DEPTH_ONLY, sh_cfg);
|
||||
|
||||
GPUUniformBuf *ubo = NULL;
|
||||
GPUUniformBuf *ubo = nullptr;
|
||||
if (use_clipping_planes) {
|
||||
ubo = GPU_uniformbuf_create_ex(sizeof(struct GPUClipPlanes), &planes, __func__);
|
||||
GPU_batch_uniformbuf_bind(batch, "clipPlanes", ubo);
|
||||
@@ -2943,7 +2951,7 @@ void DRW_draw_depth_object(
|
||||
|
||||
bool DRW_state_is_fbo(void)
|
||||
{
|
||||
return ((DST.default_framebuffer != NULL) || DST.options.is_image_render) &&
|
||||
return ((DST.default_framebuffer != nullptr) || DST.options.is_image_render) &&
|
||||
!DRW_state_is_depth() && !DRW_state_is_select();
|
||||
}
|
||||
|
||||
@@ -2980,9 +2988,9 @@ bool DRW_state_is_viewport_image_render(void)
|
||||
|
||||
bool DRW_state_is_playback(void)
|
||||
{
|
||||
if (DST.draw_ctx.evil_C != NULL) {
|
||||
if (DST.draw_ctx.evil_C != nullptr) {
|
||||
wmWindowManager *wm = CTX_wm_manager(DST.draw_ctx.evil_C);
|
||||
return ED_screen_animation_playing(wm) != NULL;
|
||||
return ED_screen_animation_playing(wm) != nullptr;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@@ -3002,7 +3010,7 @@ bool DRW_state_show_text(void)
|
||||
bool DRW_state_draw_support(void)
|
||||
{
|
||||
View3D *v3d = DST.draw_ctx.v3d;
|
||||
return (DRW_state_is_scene_render() == false) && (v3d != NULL) &&
|
||||
return (DRW_state_is_scene_render() == false) && (v3d != nullptr) &&
|
||||
((v3d->flag2 & V3D_HIDE_OVERLAYS) == 0);
|
||||
}
|
||||
|
||||
@@ -3035,7 +3043,8 @@ bool DRW_engine_render_support(DrawEngineType *draw_engine_type)
|
||||
|
||||
void DRW_engine_register(DrawEngineType *draw_engine_type)
|
||||
{
|
||||
DRWRegisteredDrawEngine *draw_engine = MEM_mallocN(sizeof(DRWRegisteredDrawEngine), __func__);
|
||||
DRWRegisteredDrawEngine *draw_engine = static_cast<DRWRegisteredDrawEngine *>(
|
||||
MEM_mallocN(sizeof(DRWRegisteredDrawEngine), __func__));
|
||||
draw_engine->draw_engine = draw_engine_type;
|
||||
draw_engine->index = g_registered_engines.len;
|
||||
|
||||
@@ -3110,8 +3119,12 @@ void DRW_engines_register(void)
|
||||
static void drw_registered_engines_free(void)
|
||||
{
|
||||
DRWRegisteredDrawEngine *next;
|
||||
for (DRWRegisteredDrawEngine *type = g_registered_engines.engines.first; type; type = next) {
|
||||
next = type->next;
|
||||
for (DRWRegisteredDrawEngine *type =
|
||||
static_cast<DRWRegisteredDrawEngine *>(g_registered_engines.engines.first);
|
||||
type;
|
||||
type = next)
|
||||
{
|
||||
next = static_cast<DRWRegisteredDrawEngine *>(type->next);
|
||||
BLI_remlink(&R_engines, type);
|
||||
|
||||
if (type->draw_engine->engine_free) {
|
||||
@@ -3128,7 +3141,7 @@ void DRW_engines_free(void)
|
||||
{
|
||||
drw_registered_engines_free();
|
||||
|
||||
if (DST.system_gpu_context == NULL) {
|
||||
if (DST.system_gpu_context == nullptr) {
|
||||
/* Nothing has been setup. Nothing to clear.
|
||||
* Otherwise, DRW_gpu_context_enable can
|
||||
* create a context in background mode. (see #62355) */
|
||||
@@ -3149,7 +3162,7 @@ void DRW_engines_free(void)
|
||||
DRW_globals_free();
|
||||
|
||||
drw_debug_module_free(DST.debug);
|
||||
DST.debug = NULL;
|
||||
DST.debug = nullptr;
|
||||
|
||||
DRW_UBO_FREE_SAFE(G_draw.block_ubo);
|
||||
DRW_UBO_FREE_SAFE(G_draw.view_ubo);
|
||||
@@ -3166,7 +3179,7 @@ void DRW_engines_free(void)
|
||||
|
||||
void DRW_render_context_enable(Render *render)
|
||||
{
|
||||
if (G.background && DST.system_gpu_context == NULL) {
|
||||
if (G.background && DST.system_gpu_context == nullptr) {
|
||||
WM_init_gpu();
|
||||
}
|
||||
|
||||
@@ -3181,7 +3194,7 @@ void DRW_render_context_enable(Render *render)
|
||||
void *re_system_gpu_context = RE_system_gpu_context_get(render);
|
||||
|
||||
/* Changing Context */
|
||||
if (re_system_gpu_context != NULL) {
|
||||
if (re_system_gpu_context != nullptr) {
|
||||
DRW_system_gpu_render_context_enable(re_system_gpu_context);
|
||||
/* We need to query gpu context after a gl context has been bound. */
|
||||
void *re_blender_gpu_context = RE_blender_gpu_context_ensure(render);
|
||||
@@ -3203,7 +3216,7 @@ void DRW_render_context_disable(Render *render)
|
||||
|
||||
void *re_system_gpu_context = RE_system_gpu_context_get(render);
|
||||
|
||||
if (re_system_gpu_context != NULL) {
|
||||
if (re_system_gpu_context != nullptr) {
|
||||
void *re_blender_gpu_context = RE_blender_gpu_context_ensure(render);
|
||||
/* GPU rendering may occur during context disable. */
|
||||
DRW_blender_gpu_render_context_disable(re_blender_gpu_context);
|
||||
@@ -3224,7 +3237,7 @@ void DRW_render_context_disable(Render *render)
|
||||
|
||||
void DRW_gpu_context_create(void)
|
||||
{
|
||||
BLI_assert(DST.system_gpu_context == NULL); /* Ensure it's called once */
|
||||
BLI_assert(DST.system_gpu_context == nullptr); /* Ensure it's called once */
|
||||
|
||||
DST.system_gpu_context_mutex = BLI_ticket_mutex_alloc();
|
||||
/* This changes the active context. */
|
||||
@@ -3239,7 +3252,7 @@ void DRW_gpu_context_create(void)
|
||||
void DRW_gpu_context_destroy(void)
|
||||
{
|
||||
BLI_assert(BLI_thread_is_main());
|
||||
if (DST.system_gpu_context != NULL) {
|
||||
if (DST.system_gpu_context != nullptr) {
|
||||
WM_system_gpu_context_activate(DST.system_gpu_context);
|
||||
GPU_context_active_set(DST.blender_gpu_context);
|
||||
GPU_context_discard(DST.blender_gpu_context);
|
||||
@@ -3248,9 +3261,9 @@ void DRW_gpu_context_destroy(void)
|
||||
}
|
||||
}
|
||||
|
||||
void DRW_gpu_context_enable_ex(bool UNUSED(restore))
|
||||
void DRW_gpu_context_enable_ex(bool /*restore*/)
|
||||
{
|
||||
if (DST.system_gpu_context != NULL) {
|
||||
if (DST.system_gpu_context != nullptr) {
|
||||
/* IMPORTANT: We don't support immediate mode in render mode!
|
||||
* This shall remain in effect until immediate mode supports
|
||||
* multiple threads. */
|
||||
@@ -3263,7 +3276,7 @@ void DRW_gpu_context_enable_ex(bool UNUSED(restore))
|
||||
|
||||
void DRW_gpu_context_disable_ex(bool restore)
|
||||
{
|
||||
if (DST.system_gpu_context != NULL) {
|
||||
if (DST.system_gpu_context != nullptr) {
|
||||
#ifdef __APPLE__
|
||||
/* Need to flush before disabling draw context, otherwise it does not
|
||||
* always finish drawing and viewport can be empty or partially drawn */
|
||||
@@ -3277,7 +3290,7 @@ void DRW_gpu_context_disable_ex(bool restore)
|
||||
}
|
||||
else {
|
||||
WM_system_gpu_context_release(DST.system_gpu_context);
|
||||
GPU_context_active_set(NULL);
|
||||
GPU_context_active_set(nullptr);
|
||||
}
|
||||
|
||||
/* Render boundaries are opened and closed here as this may be
|
||||
@@ -3292,7 +3305,7 @@ void DRW_gpu_context_enable(void)
|
||||
{
|
||||
/* TODO: should be replace by a more elegant alternative. */
|
||||
|
||||
if (G.background && DST.system_gpu_context == NULL) {
|
||||
if (G.background && DST.system_gpu_context == nullptr) {
|
||||
WM_init_gpu();
|
||||
}
|
||||
DRW_gpu_context_enable_ex(true);
|
||||
@@ -3325,13 +3338,13 @@ void DRW_blender_gpu_render_context_enable(void *re_gpu_context)
|
||||
/* If thread is main you should use DRW_gpu_context_enable(). */
|
||||
BLI_assert(!BLI_thread_is_main());
|
||||
|
||||
GPU_context_active_set(re_gpu_context);
|
||||
GPU_context_active_set(static_cast<GPUContext *>(re_gpu_context));
|
||||
}
|
||||
|
||||
void DRW_blender_gpu_render_context_disable(void *UNUSED(re_gpu_context))
|
||||
void DRW_blender_gpu_render_context_disable(void * /*re_gpu_context*/)
|
||||
{
|
||||
GPU_flush();
|
||||
GPU_context_active_set(NULL);
|
||||
GPU_context_active_set(nullptr);
|
||||
}
|
||||
|
||||
/** \} */
|
||||
@@ -3433,7 +3446,7 @@ bool DRW_gpu_context_release(void)
|
||||
return false;
|
||||
}
|
||||
|
||||
GPU_context_active_set(NULL);
|
||||
GPU_context_active_set(nullptr);
|
||||
WM_system_gpu_context_release(DST.system_gpu_context);
|
||||
|
||||
return true;
|
||||
@@ -1270,10 +1270,10 @@ void DRW_sculpt_debug_cb(
|
||||
|
||||
#if 0 /* Nodes hierarchy. */
|
||||
if (flag & PBVH_Leaf) {
|
||||
DRW_debug_bbox(&bb, (float[4]){0.0f, 1.0f, 0.0f, 1.0f});
|
||||
DRW_debug_bbox(&bb, blender::float4{0.0f, 1.0f, 0.0f, 1.0f});
|
||||
}
|
||||
else {
|
||||
DRW_debug_bbox(&bb, (float[4]){0.5f, 0.5f, 0.5f, 0.6f});
|
||||
DRW_debug_bbox(&bb, blender::float4{0.5f, 0.5f, 0.5f, 0.6f});
|
||||
}
|
||||
#else /* Color coded leaf bounds. */
|
||||
if (flag & (PBVH_Leaf | PBVH_TexLeaf)) {
|
||||
@@ -2008,7 +2008,7 @@ static void draw_frustum_boundbox_calc(const float (*viewinv)[4],
|
||||
|
||||
#if 0 /* Equivalent to this but it has accuracy problems. */
|
||||
BKE_boundbox_init_from_minmax(
|
||||
&bbox, (const float[3]){-1.0f, -1.0f, -1.0f}, (const float[3]){1.0f, 1.0f, 1.0f});
|
||||
&bbox, blender::float3{-1.0f, -1.0f, -1.0f}, blender::float3{1.0f, 1.0f, 1.0f});
|
||||
for (int i = 0; i < 8; i++) {
|
||||
mul_project_m4_v3(projinv, bbox.vec[i]);
|
||||
}
|
||||
@@ -2311,8 +2311,8 @@ void DRW_view_update(DRWView *view,
|
||||
#ifdef DRW_DEBUG_CULLING
|
||||
if (G.debug_value != 0) {
|
||||
DRW_debug_sphere(
|
||||
view->frustum_bsphere.center, view->frustum_bsphere.radius, (const float[4]){1, 1, 0, 1});
|
||||
DRW_debug_bbox(&view->frustum_corners, (const float[4]){1, 1, 0, 1});
|
||||
view->frustum_bsphere.center, view->frustum_bsphere.radius, blender::float4{1, 1, 0, 1});
|
||||
DRW_debug_bbox(&view->frustum_corners, blender::float4{1, 1, 0, 1});
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ void DRW_select_load_id(uint id)
|
||||
|
||||
#define DEBUG_UBO_BINDING
|
||||
|
||||
typedef struct DRWCommandsState {
|
||||
struct DRWCommandsState {
|
||||
GPUBatch *batch;
|
||||
int resource_chunk;
|
||||
int resource_id;
|
||||
@@ -60,7 +60,7 @@ typedef struct DRWCommandsState {
|
||||
/* Drawing State */
|
||||
DRWState drw_state_enabled;
|
||||
DRWState drw_state_disabled;
|
||||
} DRWCommandsState;
|
||||
};
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
/** \name Draw State (DRW_state)
|
||||
@@ -75,13 +75,13 @@ void drw_state_set(DRWState state)
|
||||
return;
|
||||
}
|
||||
|
||||
eGPUWriteMask write_mask = 0;
|
||||
eGPUBlend blend = 0;
|
||||
eGPUFaceCullTest culling_test = 0;
|
||||
eGPUDepthTest depth_test = 0;
|
||||
eGPUStencilTest stencil_test = 0;
|
||||
eGPUStencilOp stencil_op = 0;
|
||||
eGPUProvokingVertex provoking_vert = 0;
|
||||
eGPUWriteMask write_mask = eGPUWriteMask(0);
|
||||
eGPUBlend blend = eGPUBlend(0);
|
||||
eGPUFaceCullTest culling_test = eGPUFaceCullTest(0);
|
||||
eGPUDepthTest depth_test = eGPUDepthTest(0);
|
||||
eGPUStencilTest stencil_test = eGPUStencilTest(0);
|
||||
eGPUStencilOp stencil_op = eGPUStencilOp(0);
|
||||
eGPUProvokingVertex provoking_vert = eGPUProvokingVertex(0);
|
||||
|
||||
if (state & DRW_STATE_WRITE_DEPTH) {
|
||||
write_mask |= GPU_WRITE_DEPTH;
|
||||
@@ -339,13 +339,14 @@ void DRW_state_reset(void)
|
||||
|
||||
static bool draw_call_is_culled(const DRWResourceHandle *handle, DRWView *view)
|
||||
{
|
||||
DRWCullingState *culling = DRW_memblock_elem_from_handle(DST.vmempool->cullstates, handle);
|
||||
DRWCullingState *culling = static_cast<DRWCullingState *>(
|
||||
DRW_memblock_elem_from_handle(DST.vmempool->cullstates, handle));
|
||||
return (culling->mask & view->culling_mask) != 0;
|
||||
}
|
||||
|
||||
void DRW_view_set_active(const DRWView *view)
|
||||
{
|
||||
DST.view_active = (view != NULL) ? ((DRWView *)view) : DST.view_default;
|
||||
DST.view_active = (view != nullptr) ? ((DRWView *)view) : DST.view_default;
|
||||
}
|
||||
|
||||
const DRWView *DRW_view_get_active(void)
|
||||
@@ -477,7 +478,7 @@ static void draw_compute_culling(DRWView *view)
|
||||
BLI_memblock_iter iter;
|
||||
BLI_memblock_iternew(DST.vmempool->cullstates, &iter);
|
||||
DRWCullingState *cull;
|
||||
while ((cull = BLI_memblock_iterstep(&iter))) {
|
||||
while ((cull = static_cast<DRWCullingState *>(BLI_memblock_iterstep(&iter)))) {
|
||||
if (cull->bsphere.radius < 0.0) {
|
||||
cull->mask = 0;
|
||||
}
|
||||
@@ -489,11 +490,11 @@ static void draw_compute_culling(DRWView *view)
|
||||
if (G.debug_value != 0) {
|
||||
if (culled) {
|
||||
DRW_debug_sphere(
|
||||
cull->bsphere.center, cull->bsphere.radius, (const float[4]){1, 0, 0, 1});
|
||||
cull->bsphere.center, cull->bsphere.radius, blender::float4{1, 0, 0, 1});
|
||||
}
|
||||
else {
|
||||
DRW_debug_sphere(
|
||||
cull->bsphere.center, cull->bsphere.radius, (const float[4]){0, 1, 0, 1});
|
||||
cull->bsphere.center, cull->bsphere.radius, blender::float4{0, 1, 0, 1});
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -521,7 +522,8 @@ BLI_INLINE void draw_legacy_matrix_update(DRWShadingGroup *shgroup,
|
||||
float obinv_loc)
|
||||
{
|
||||
/* Still supported for compatibility with gpu_shader_* but should be forbidden. */
|
||||
DRWObjectMatrix *ob_mats = DRW_memblock_elem_from_handle(DST.vmempool->obmats, handle);
|
||||
DRWObjectMatrix *ob_mats = static_cast<DRWObjectMatrix *>(
|
||||
DRW_memblock_elem_from_handle(DST.vmempool->obmats, handle));
|
||||
if (obmat_loc != -1) {
|
||||
GPU_shader_uniform_float_ex(shgroup->shader, obmat_loc, 16, 1, (float *)ob_mats->model);
|
||||
}
|
||||
@@ -632,8 +634,11 @@ static void draw_update_uniforms(DRWShadingGroup *shgroup,
|
||||
}
|
||||
break;
|
||||
case DRW_UNIFORM_INT:
|
||||
GPU_shader_uniform_int_ex(
|
||||
shgroup->shader, uni->location, uni->length, uni->arraysize, uni->pvalue);
|
||||
GPU_shader_uniform_int_ex(shgroup->shader,
|
||||
uni->location,
|
||||
uni->length,
|
||||
uni->arraysize,
|
||||
static_cast<const int *>(uni->pvalue));
|
||||
break;
|
||||
case DRW_UNIFORM_FLOAT_COPY:
|
||||
BLI_assert(uni->arraysize == 1);
|
||||
@@ -643,8 +648,11 @@ static void draw_update_uniforms(DRWShadingGroup *shgroup,
|
||||
}
|
||||
break;
|
||||
case DRW_UNIFORM_FLOAT:
|
||||
GPU_shader_uniform_float_ex(
|
||||
shgroup->shader, uni->location, uni->length, uni->arraysize, uni->pvalue);
|
||||
GPU_shader_uniform_float_ex(shgroup->shader,
|
||||
uni->location,
|
||||
uni->length,
|
||||
uni->arraysize,
|
||||
static_cast<const float *>(uni->pvalue));
|
||||
break;
|
||||
case DRW_UNIFORM_TEXTURE:
|
||||
GPU_texture_bind_ex(uni->texture, uni->sampler_state, uni->location);
|
||||
@@ -714,7 +722,7 @@ static void draw_update_uniforms(DRWShadingGroup *shgroup,
|
||||
case DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE:
|
||||
GPU_vertbuf_bind_as_ssbo(uni->vertbuf, uni->location);
|
||||
break;
|
||||
/* Legacy/Fallback support. */
|
||||
/* Legacy/Fallback support. */
|
||||
case DRW_UNIFORM_BASE_INSTANCE:
|
||||
state->baseinst_loc = uni->location;
|
||||
break;
|
||||
@@ -738,13 +746,14 @@ BLI_INLINE void draw_select_buffer(DRWShadingGroup *shgroup,
|
||||
GPUBatch *batch,
|
||||
const DRWResourceHandle *handle)
|
||||
{
|
||||
const bool is_instancing = (batch->inst[0] != NULL);
|
||||
const bool is_instancing = (batch->inst[0] != nullptr);
|
||||
int start = 0;
|
||||
int count = 1;
|
||||
int tot = is_instancing ? GPU_vertbuf_get_vertex_len(batch->inst[0]) :
|
||||
GPU_vertbuf_get_vertex_len(batch->verts[0]);
|
||||
/* HACK: get VBO data without actually drawing. */
|
||||
int *select_id = (void *)GPU_vertbuf_get_data(state->select_buf);
|
||||
int *select_id = static_cast<int *>(
|
||||
(void *)static_cast<int *>(GPU_vertbuf_get_data(state->select_buf)));
|
||||
|
||||
/* Batching */
|
||||
if (!is_instancing) {
|
||||
@@ -770,10 +779,10 @@ BLI_INLINE void draw_select_buffer(DRWShadingGroup *shgroup,
|
||||
}
|
||||
}
|
||||
|
||||
typedef struct DRWCommandIterator {
|
||||
struct DRWCommandIterator {
|
||||
int cmd_index;
|
||||
DRWCommandChunk *curr_chunk;
|
||||
} DRWCommandIterator;
|
||||
};
|
||||
|
||||
static void draw_command_iter_begin(DRWCommandIterator *iter, DRWShadingGroup *shgroup)
|
||||
{
|
||||
@@ -795,7 +804,7 @@ static DRWCommand *draw_command_iter_step(DRWCommandIterator *iter, eDRWCommandT
|
||||
}
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static void draw_call_resource_bind(DRWCommandsState *state, const DRWResourceHandle *handle)
|
||||
@@ -841,7 +850,7 @@ static void draw_call_batching_flush(DRWShadingGroup *shgroup, DRWCommandsState
|
||||
draw_indirect_call(shgroup, state);
|
||||
GPU_draw_list_submit(DST.draw_list);
|
||||
|
||||
state->batch = NULL;
|
||||
state->batch = nullptr;
|
||||
state->inst_count = 0;
|
||||
state->base_inst = -1;
|
||||
}
|
||||
@@ -866,7 +875,7 @@ static void draw_call_single_do(DRWShadingGroup *shgroup,
|
||||
}
|
||||
|
||||
if (G.f & G_FLAG_PICKSEL) {
|
||||
if (state->select_buf != NULL) {
|
||||
if (state->select_buf != nullptr) {
|
||||
draw_select_buffer(shgroup, state, batch, &handle);
|
||||
return;
|
||||
}
|
||||
@@ -909,10 +918,10 @@ static void draw_call_batching_start(DRWCommandsState *state)
|
||||
state->resource_id = -1;
|
||||
state->base_inst = 0;
|
||||
state->inst_count = 0;
|
||||
state->batch = NULL;
|
||||
state->batch = nullptr;
|
||||
|
||||
state->select_id = -1;
|
||||
state->select_buf = NULL;
|
||||
state->select_buf = nullptr;
|
||||
}
|
||||
|
||||
/* NOTE: Does not support batches with instancing VBOs. */
|
||||
@@ -978,20 +987,19 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
|
||||
{
|
||||
BLI_assert(shgroup->shader);
|
||||
|
||||
DRWCommandsState state = {
|
||||
.obmats_loc = -1,
|
||||
.obinfos_loc = -1,
|
||||
.obattrs_loc = -1,
|
||||
.vlattrs_loc = -1,
|
||||
.baseinst_loc = -1,
|
||||
.chunkid_loc = -1,
|
||||
.resourceid_loc = -1,
|
||||
.obmat_loc = -1,
|
||||
.obinv_loc = -1,
|
||||
.obattrs_ubo = NULL,
|
||||
.drw_state_enabled = 0,
|
||||
.drw_state_disabled = 0,
|
||||
};
|
||||
DRWCommandsState state{};
|
||||
state.obmats_loc = -1;
|
||||
state.obinfos_loc = -1;
|
||||
state.obattrs_loc = -1;
|
||||
state.vlattrs_loc = -1;
|
||||
state.baseinst_loc = -1;
|
||||
state.chunkid_loc = -1;
|
||||
state.resourceid_loc = -1;
|
||||
state.obmat_loc = -1;
|
||||
state.obinv_loc = -1;
|
||||
state.obattrs_ubo = nullptr;
|
||||
state.drw_state_enabled = DRWState(0);
|
||||
state.drw_state_disabled = DRWState(0);
|
||||
|
||||
const bool shader_changed = (DST.shader != shgroup->shader);
|
||||
bool use_tfeedback = false;
|
||||
@@ -1010,7 +1018,7 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
|
||||
}
|
||||
GPU_shader_bind(shgroup->shader);
|
||||
DST.shader = shgroup->shader;
|
||||
DST.batch = NULL;
|
||||
DST.batch = nullptr;
|
||||
}
|
||||
|
||||
draw_update_uniforms(shgroup, &state, &use_tfeedback);
|
||||
@@ -1050,10 +1058,10 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
|
||||
case DRW_CMD_CLEAR:
|
||||
GPU_framebuffer_clear(GPU_framebuffer_active_get(),
|
||||
cmd->clear.clear_channels,
|
||||
(float[4]){cmd->clear.r / 255.0f,
|
||||
cmd->clear.g / 255.0f,
|
||||
cmd->clear.b / 255.0f,
|
||||
cmd->clear.a / 255.0f},
|
||||
blender::float4{cmd->clear.r / 255.0f,
|
||||
cmd->clear.g / 255.0f,
|
||||
cmd->clear.b / 255.0f,
|
||||
cmd->clear.a / 255.0f},
|
||||
cmd->clear.depth,
|
||||
cmd->clear.stencil);
|
||||
break;
|
||||
@@ -1177,11 +1185,11 @@ static void drw_draw_pass_ex(DRWPass *pass,
|
||||
end_group = pass->original->shgroups.last;
|
||||
}
|
||||
|
||||
if (start_group == NULL) {
|
||||
if (start_group == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
DST.shader = NULL;
|
||||
DST.shader = nullptr;
|
||||
|
||||
BLI_assert(DST.buffer_finish_called &&
|
||||
"DRW_render_instance_buffer_finish had not been called before drawing");
|
||||
@@ -1215,11 +1223,11 @@ static void drw_draw_pass_ex(DRWPass *pass,
|
||||
|
||||
if (DST.shader) {
|
||||
GPU_shader_unbind();
|
||||
DST.shader = NULL;
|
||||
DST.shader = nullptr;
|
||||
}
|
||||
|
||||
if (DST.batch) {
|
||||
DST.batch = NULL;
|
||||
DST.batch = nullptr;
|
||||
}
|
||||
|
||||
/* Fix #67342 for some reason. AMD Pro driver bug. */
|
||||
@@ -38,8 +38,8 @@
|
||||
|
||||
static CLG_LogRef LOG = {"draw.manager.shader"};
|
||||
|
||||
extern char datatoc_gpu_shader_depth_only_frag_glsl[];
|
||||
extern char datatoc_common_fullscreen_vert_glsl[];
|
||||
extern "C" char datatoc_gpu_shader_depth_only_frag_glsl[];
|
||||
extern "C" char datatoc_common_fullscreen_vert_glsl[];
|
||||
|
||||
#define USE_DEFERRED_COMPILATION 1
|
||||
|
||||
@@ -51,7 +51,7 @@ extern char datatoc_common_fullscreen_vert_glsl[];
|
||||
*
|
||||
* \{ */
|
||||
|
||||
typedef struct DRWShaderCompiler {
|
||||
struct DRWShaderCompiler {
|
||||
/** Default compilation queue. */
|
||||
ListBase queue; /* GPUMaterial */
|
||||
SpinLock list_lock;
|
||||
@@ -62,23 +62,23 @@ typedef struct DRWShaderCompiler {
|
||||
void *system_gpu_context;
|
||||
GPUContext *blender_gpu_context;
|
||||
bool own_context;
|
||||
} DRWShaderCompiler;
|
||||
};
|
||||
|
||||
static void drw_deferred_shader_compilation_exec(
|
||||
void *custom_data,
|
||||
/* Cannot be const, this function implements wm_jobs_start_callback.
|
||||
* NOLINTNEXTLINE: readability-non-const-parameter. */
|
||||
bool *stop,
|
||||
bool *UNUSED(do_update),
|
||||
float *UNUSED(progress))
|
||||
bool * /*do_update*/,
|
||||
float * /*progress*/)
|
||||
{
|
||||
GPU_render_begin();
|
||||
DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
|
||||
void *system_gpu_context = comp->system_gpu_context;
|
||||
GPUContext *blender_gpu_context = comp->blender_gpu_context;
|
||||
|
||||
BLI_assert(system_gpu_context != NULL);
|
||||
BLI_assert(blender_gpu_context != NULL);
|
||||
BLI_assert(system_gpu_context != nullptr);
|
||||
BLI_assert(blender_gpu_context != nullptr);
|
||||
|
||||
const bool use_main_context_workaround = GPU_use_main_context_workaround();
|
||||
if (use_main_context_workaround) {
|
||||
@@ -100,7 +100,7 @@ static void drw_deferred_shader_compilation_exec(
|
||||
/* Pop tail because it will be less likely to lock the main thread
|
||||
* if all GPUMaterials are to be freed (see DRW_deferred_shader_remove()). */
|
||||
LinkData *link = (LinkData *)BLI_poptail(&comp->queue);
|
||||
GPUMaterial *mat = link ? (GPUMaterial *)link->data : NULL;
|
||||
GPUMaterial *mat = link ? (GPUMaterial *)link->data : nullptr;
|
||||
if (mat) {
|
||||
/* Avoid another thread freeing the material mid compilation. */
|
||||
GPU_material_acquire(mat);
|
||||
@@ -120,7 +120,7 @@ static void drw_deferred_shader_compilation_exec(
|
||||
/* Pop tail because it will be less likely to lock the main thread
|
||||
* if all GPUMaterials are to be freed (see DRW_deferred_shader_remove()). */
|
||||
link = (LinkData *)BLI_poptail(&comp->optimize_queue);
|
||||
GPUMaterial *optimize_mat = link ? (GPUMaterial *)link->data : NULL;
|
||||
GPUMaterial *optimize_mat = link ? (GPUMaterial *)link->data : nullptr;
|
||||
if (optimize_mat) {
|
||||
/* Avoid another thread freeing the material during optimization. */
|
||||
GPU_material_acquire(optimize_mat);
|
||||
@@ -144,7 +144,7 @@ static void drw_deferred_shader_compilation_exec(
|
||||
}
|
||||
}
|
||||
|
||||
GPU_context_active_set(NULL);
|
||||
GPU_context_active_set(nullptr);
|
||||
WM_system_gpu_context_release(system_gpu_context);
|
||||
if (use_main_context_workaround) {
|
||||
GPU_context_main_unlock();
|
||||
@@ -191,11 +191,12 @@ static void drw_deferred_queue_append(GPUMaterial *mat, bool is_optimization_job
|
||||
/* Get the running job or a new one if none is running. Can only have one job per type & owner.
|
||||
*/
|
||||
wmJob *wm_job = WM_jobs_get(
|
||||
wm, win, wm, "Shaders Compilation", 0, WM_JOB_TYPE_SHADER_COMPILATION);
|
||||
wm, win, wm, "Shaders Compilation", eWM_JobFlag(0), WM_JOB_TYPE_SHADER_COMPILATION);
|
||||
|
||||
DRWShaderCompiler *old_comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
|
||||
|
||||
DRWShaderCompiler *comp = MEM_callocN(sizeof(DRWShaderCompiler), "DRWShaderCompiler");
|
||||
DRWShaderCompiler *comp = static_cast<DRWShaderCompiler *>(
|
||||
MEM_callocN(sizeof(DRWShaderCompiler), "DRWShaderCompiler"));
|
||||
BLI_spin_init(&comp->list_lock);
|
||||
|
||||
if (old_comp) {
|
||||
@@ -226,15 +227,15 @@ static void drw_deferred_queue_append(GPUMaterial *mat, bool is_optimization_job
|
||||
}
|
||||
|
||||
/* Create only one context. */
|
||||
if (comp->system_gpu_context == NULL) {
|
||||
if (comp->system_gpu_context == nullptr) {
|
||||
if (use_main_context) {
|
||||
comp->system_gpu_context = DST.system_gpu_context;
|
||||
comp->blender_gpu_context = DST.blender_gpu_context;
|
||||
}
|
||||
else {
|
||||
comp->system_gpu_context = WM_system_gpu_context_create();
|
||||
comp->blender_gpu_context = GPU_context_create(NULL, comp->system_gpu_context);
|
||||
GPU_context_active_set(NULL);
|
||||
comp->blender_gpu_context = GPU_context_create(nullptr, comp->system_gpu_context);
|
||||
GPU_context_active_set(nullptr);
|
||||
|
||||
WM_system_gpu_context_activate(DST.system_gpu_context);
|
||||
GPU_context_active_set(DST.blender_gpu_context);
|
||||
@@ -245,7 +246,7 @@ static void drw_deferred_queue_append(GPUMaterial *mat, bool is_optimization_job
|
||||
WM_jobs_customdata_set(wm_job, comp, drw_deferred_shader_compilation_free);
|
||||
WM_jobs_timer(wm_job, 0.1, NC_MATERIAL | ND_SHADING_DRAW, 0);
|
||||
WM_jobs_delay_start(wm_job, 0.1);
|
||||
WM_jobs_callbacks(wm_job, drw_deferred_shader_compilation_exec, NULL, NULL, NULL);
|
||||
WM_jobs_callbacks(wm_job, drw_deferred_shader_compilation_exec, nullptr, nullptr, nullptr);
|
||||
|
||||
G.is_break = false;
|
||||
|
||||
@@ -260,7 +261,7 @@ static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
|
||||
|
||||
/* Do not defer the compilation if we are rendering for image.
|
||||
* deferred rendering is only possible when `evil_C` is available */
|
||||
if (DST.draw_ctx.evil_C == NULL || DRW_state_is_image_render() || !USE_DEFERRED_COMPILATION) {
|
||||
if (DST.draw_ctx.evil_C == nullptr || DRW_state_is_image_render() || !USE_DEFERRED_COMPILATION) {
|
||||
deferred = false;
|
||||
}
|
||||
|
||||
@@ -310,16 +311,16 @@ static void drw_register_shader_vlattrs(GPUMaterial *mat)
|
||||
if (!BLI_ghash_ensure_p(hash, POINTER_FROM_UINT(attr->hash_code), (void ***)&p_val)) {
|
||||
DST.vmempool->vlattrs_ubo_ready = false;
|
||||
|
||||
GPULayerAttr *new_link = *p_val = MEM_dupallocN(attr);
|
||||
GPULayerAttr *new_link = *p_val = static_cast<GPULayerAttr *>(MEM_dupallocN(attr));
|
||||
|
||||
/* Insert into the list ensuring sorted order. */
|
||||
GPULayerAttr *link = list->first;
|
||||
GPULayerAttr *link = static_cast<GPULayerAttr *>(list->first);
|
||||
|
||||
while (link && link->hash_code <= attr->hash_code) {
|
||||
link = link->next;
|
||||
}
|
||||
|
||||
new_link->prev = new_link->next = NULL;
|
||||
new_link->prev = new_link->next = nullptr;
|
||||
BLI_insertlinkbefore(list, link, new_link);
|
||||
}
|
||||
|
||||
@@ -334,14 +335,14 @@ void DRW_deferred_shader_remove(GPUMaterial *mat)
|
||||
LISTBASE_FOREACH (wmWindow *, win, &wm->windows) {
|
||||
DRWShaderCompiler *comp = (DRWShaderCompiler *)WM_jobs_customdata_from_type(
|
||||
wm, wm, WM_JOB_TYPE_SHADER_COMPILATION);
|
||||
if (comp != NULL) {
|
||||
if (comp != nullptr) {
|
||||
BLI_spin_lock(&comp->list_lock);
|
||||
|
||||
/* Search for compilation job in queue. */
|
||||
LinkData *link = (LinkData *)BLI_findptr(&comp->queue, mat, offsetof(LinkData, data));
|
||||
if (link) {
|
||||
BLI_remlink(&comp->queue, link);
|
||||
GPU_material_status_set(link->data, GPU_MAT_CREATED);
|
||||
GPU_material_status_set(static_cast<GPUMaterial *>(link->data), GPU_MAT_CREATED);
|
||||
}
|
||||
|
||||
MEM_SAFE_FREE(link);
|
||||
@@ -351,7 +352,8 @@ void DRW_deferred_shader_remove(GPUMaterial *mat)
|
||||
&comp->optimize_queue, mat, offsetof(LinkData, data));
|
||||
if (opti_link) {
|
||||
BLI_remlink(&comp->optimize_queue, opti_link);
|
||||
GPU_material_optimization_status_set(opti_link->data, GPU_MAT_OPTIMIZATION_READY);
|
||||
GPU_material_optimization_status_set(static_cast<GPUMaterial *>(opti_link->data),
|
||||
GPU_MAT_OPTIMIZATION_READY);
|
||||
}
|
||||
BLI_spin_unlock(&comp->list_lock);
|
||||
|
||||
@@ -367,14 +369,15 @@ void DRW_deferred_shader_optimize_remove(GPUMaterial *mat)
|
||||
LISTBASE_FOREACH (wmWindow *, win, &wm->windows) {
|
||||
DRWShaderCompiler *comp = (DRWShaderCompiler *)WM_jobs_customdata_from_type(
|
||||
wm, wm, WM_JOB_TYPE_SHADER_COMPILATION);
|
||||
if (comp != NULL) {
|
||||
if (comp != nullptr) {
|
||||
BLI_spin_lock(&comp->list_lock);
|
||||
/* Search for optimization job in queue. */
|
||||
LinkData *opti_link = (LinkData *)BLI_findptr(
|
||||
&comp->optimize_queue, mat, offsetof(LinkData, data));
|
||||
if (opti_link) {
|
||||
BLI_remlink(&comp->optimize_queue, opti_link);
|
||||
GPU_material_optimization_status_set(opti_link->data, GPU_MAT_OPTIMIZATION_READY);
|
||||
GPU_material_optimization_status_set(static_cast<GPUMaterial *>(opti_link->data),
|
||||
GPU_MAT_OPTIMIZATION_READY);
|
||||
}
|
||||
BLI_spin_unlock(&comp->list_lock);
|
||||
|
||||
@@ -398,7 +401,7 @@ GPUShader *DRW_shader_create_from_info_name(const char *info_name)
|
||||
GPUShader *DRW_shader_create_ex(
|
||||
const char *vert, const char *geom, const char *frag, const char *defines, const char *name)
|
||||
{
|
||||
return GPU_shader_create(vert, frag, geom, NULL, defines, name);
|
||||
return GPU_shader_create(vert, frag, geom, nullptr, defines, name);
|
||||
}
|
||||
|
||||
GPUShader *DRW_shader_create_with_lib_ex(const char *vert,
|
||||
@@ -409,9 +412,9 @@ GPUShader *DRW_shader_create_with_lib_ex(const char *vert,
|
||||
const char *name)
|
||||
{
|
||||
GPUShader *sh;
|
||||
char *vert_with_lib = NULL;
|
||||
char *frag_with_lib = NULL;
|
||||
char *geom_with_lib = NULL;
|
||||
char *vert_with_lib = nullptr;
|
||||
char *frag_with_lib = nullptr;
|
||||
char *geom_with_lib = nullptr;
|
||||
|
||||
vert_with_lib = BLI_string_joinN(lib, vert);
|
||||
frag_with_lib = BLI_string_joinN(lib, frag);
|
||||
@@ -419,7 +422,7 @@ GPUShader *DRW_shader_create_with_lib_ex(const char *vert,
|
||||
geom_with_lib = BLI_string_joinN(lib, geom);
|
||||
}
|
||||
|
||||
sh = GPU_shader_create(vert_with_lib, frag_with_lib, geom_with_lib, NULL, defines, name);
|
||||
sh = GPU_shader_create(vert_with_lib, frag_with_lib, geom_with_lib, nullptr, defines, name);
|
||||
|
||||
MEM_freeN(vert_with_lib);
|
||||
MEM_freeN(frag_with_lib);
|
||||
@@ -440,9 +443,9 @@ GPUShader *DRW_shader_create_with_shaderlib_ex(const char *vert,
|
||||
GPUShader *sh;
|
||||
char *vert_with_lib = DRW_shader_library_create_shader_string(lib, vert);
|
||||
char *frag_with_lib = DRW_shader_library_create_shader_string(lib, frag);
|
||||
char *geom_with_lib = (geom) ? DRW_shader_library_create_shader_string(lib, geom) : NULL;
|
||||
char *geom_with_lib = (geom) ? DRW_shader_library_create_shader_string(lib, geom) : nullptr;
|
||||
|
||||
sh = GPU_shader_create(vert_with_lib, frag_with_lib, geom_with_lib, NULL, defines, name);
|
||||
sh = GPU_shader_create(vert_with_lib, frag_with_lib, geom_with_lib, nullptr, defines, name);
|
||||
|
||||
MEM_SAFE_FREE(vert_with_lib);
|
||||
MEM_SAFE_FREE(frag_with_lib);
|
||||
@@ -461,8 +464,8 @@ GPUShader *DRW_shader_create_with_transform_feedback(const char *vert,
|
||||
return GPU_shader_create_ex(vert,
|
||||
datatoc_gpu_shader_depth_only_frag_glsl,
|
||||
geom,
|
||||
NULL,
|
||||
NULL,
|
||||
nullptr,
|
||||
nullptr,
|
||||
defines,
|
||||
prim_type,
|
||||
varying_names,
|
||||
@@ -472,7 +475,8 @@ GPUShader *DRW_shader_create_with_transform_feedback(const char *vert,
|
||||
|
||||
GPUShader *DRW_shader_create_fullscreen_ex(const char *frag, const char *defines, const char *name)
|
||||
{
|
||||
return GPU_shader_create(datatoc_common_fullscreen_vert_glsl, frag, NULL, NULL, defines, name);
|
||||
return GPU_shader_create(
|
||||
datatoc_common_fullscreen_vert_glsl, frag, nullptr, nullptr, defines, name);
|
||||
}
|
||||
|
||||
GPUShader *DRW_shader_create_fullscreen_with_shaderlib_ex(const char *frag,
|
||||
@@ -485,7 +489,7 @@ GPUShader *DRW_shader_create_fullscreen_with_shaderlib_ex(const char *frag,
|
||||
char *vert = datatoc_common_fullscreen_vert_glsl;
|
||||
char *frag_with_lib = DRW_shader_library_create_shader_string(lib, frag);
|
||||
|
||||
sh = GPU_shader_create(vert, frag_with_lib, NULL, NULL, defines, name);
|
||||
sh = GPU_shader_create(vert, frag_with_lib, nullptr, nullptr, defines, name);
|
||||
|
||||
MEM_SAFE_FREE(frag_with_lib);
|
||||
|
||||
@@ -502,7 +506,7 @@ GPUMaterial *DRW_shader_from_world(World *wo,
|
||||
{
|
||||
Scene *scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
|
||||
GPUMaterial *mat = GPU_material_from_nodetree(scene,
|
||||
NULL,
|
||||
nullptr,
|
||||
ntree,
|
||||
&wo->gpumaterial,
|
||||
wo->id.name,
|
||||
@@ -631,7 +635,8 @@ struct DRWShaderLibrary {
|
||||
|
||||
DRWShaderLibrary *DRW_shader_library_create(void)
|
||||
{
|
||||
return MEM_callocN(sizeof(DRWShaderLibrary), "DRWShaderLibrary");
|
||||
return static_cast<DRWShaderLibrary *>(
|
||||
MEM_callocN(sizeof(DRWShaderLibrary), "DRWShaderLibrary"));
|
||||
}
|
||||
|
||||
void DRW_shader_library_free(DRWShaderLibrary *lib)
|
||||
@@ -658,7 +663,7 @@ static int drw_shader_library_search(const DRWShaderLibrary *lib, const char *na
|
||||
static uint64_t drw_shader_dependencies_get(const DRWShaderLibrary *lib,
|
||||
const char *pragma_str,
|
||||
const char *lib_code,
|
||||
const char *UNUSED(lib_name))
|
||||
const char * /*lib_name*/)
|
||||
{
|
||||
/* Search dependencies. */
|
||||
uint pragma_len = strlen(pragma_str);
|
||||
@@ -694,7 +699,7 @@ void DRW_shader_library_add_file(DRWShaderLibrary *lib, const char *lib_code, co
|
||||
{
|
||||
int index = -1;
|
||||
for (int i = 0; i < MAX_LIB; i++) {
|
||||
if (lib->libs[i] == NULL) {
|
||||
if (lib->libs[i] == nullptr) {
|
||||
index = i;
|
||||
break;
|
||||
}
|
||||
@@ -49,7 +49,7 @@ static bool drw_texture_format_supports_framebuffer(eGPUTextureFormat format)
|
||||
|
||||
void drw_texture_set_parameters(GPUTexture *tex, DRWTextureFlag flags)
|
||||
{
|
||||
if (tex == NULL) {
|
||||
if (tex == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -227,9 +227,9 @@ void DRW_texture_ensure_fullscreen_2d_ex(GPUTexture **tex,
|
||||
eGPUTextureUsage usage,
|
||||
DRWTextureFlag flags)
|
||||
{
|
||||
if (*(tex) == NULL) {
|
||||
if (*(tex) == nullptr) {
|
||||
const float *size = DRW_viewport_size_get();
|
||||
*(tex) = DRW_texture_create_2d_ex((int)size[0], (int)size[1], format, usage, flags, NULL);
|
||||
*(tex) = DRW_texture_create_2d_ex((int)size[0], (int)size[1], format, usage, flags, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -247,8 +247,8 @@ void DRW_texture_ensure_2d_ex(GPUTexture **tex,
|
||||
eGPUTextureUsage usage,
|
||||
DRWTextureFlag flags)
|
||||
{
|
||||
if (*(tex) == NULL) {
|
||||
*(tex) = DRW_texture_create_2d_ex(w, h, format, usage, flags, NULL);
|
||||
if (*(tex) == nullptr) {
|
||||
*(tex) = DRW_texture_create_2d_ex(w, h, format, usage, flags, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,16 +36,15 @@
|
||||
uint *DRW_select_buffer_read(
|
||||
Depsgraph *depsgraph, ARegion *region, View3D *v3d, const rcti *rect, uint *r_buf_len)
|
||||
{
|
||||
uint *r_buf = NULL;
|
||||
uint *r_buf = nullptr;
|
||||
uint buf_len = 0;
|
||||
|
||||
/* Clamp rect. */
|
||||
rcti r = {
|
||||
.xmin = 0,
|
||||
.xmax = region->winx,
|
||||
.ymin = 0,
|
||||
.ymax = region->winy,
|
||||
};
|
||||
rcti r{};
|
||||
r.xmin = 0;
|
||||
r.xmax = region->winx;
|
||||
r.ymin = 0;
|
||||
r.ymax = region->winy;
|
||||
|
||||
/* Make sure that the rect is within the bounds of the viewport.
|
||||
* Some GPUs have problems reading pixels off limits. */
|
||||
@@ -63,7 +62,7 @@ uint *DRW_select_buffer_read(
|
||||
|
||||
/* Read the UI32 pixels. */
|
||||
buf_len = BLI_rcti_size_x(rect) * BLI_rcti_size_y(rect);
|
||||
r_buf = MEM_mallocN(buf_len * sizeof(*r_buf), __func__);
|
||||
r_buf = static_cast<uint *>(MEM_mallocN(buf_len * sizeof(*r_buf), __func__));
|
||||
|
||||
GPUFrameBuffer *select_id_fb = DRW_engine_select_framebuffer_get();
|
||||
GPU_framebuffer_bind(select_id_fb);
|
||||
@@ -115,8 +114,8 @@ uint *DRW_select_buffer_bitmap_from_rect(
|
||||
|
||||
uint buf_len;
|
||||
uint *buf = DRW_select_buffer_read(depsgraph, region, v3d, &rect_px, &buf_len);
|
||||
if (buf == NULL) {
|
||||
return NULL;
|
||||
if (buf == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
BLI_assert(select_ctx->index_drawn_len > 0);
|
||||
@@ -149,17 +148,16 @@ uint *DRW_select_buffer_bitmap_from_circle(Depsgraph *depsgraph,
|
||||
{
|
||||
SELECTID_Context *select_ctx = DRW_select_engine_context_get();
|
||||
|
||||
const rcti rect = {
|
||||
.xmin = center[0] - radius,
|
||||
.xmax = center[0] + radius + 1,
|
||||
.ymin = center[1] - radius,
|
||||
.ymax = center[1] + radius + 1,
|
||||
};
|
||||
rcti rect{};
|
||||
rect.xmin = center[0] - radius;
|
||||
rect.xmax = center[0] + radius + 1;
|
||||
rect.ymin = center[1] - radius;
|
||||
rect.ymax = center[1] + radius + 1;
|
||||
|
||||
const uint *buf = DRW_select_buffer_read(depsgraph, region, v3d, &rect, NULL);
|
||||
const uint *buf = DRW_select_buffer_read(depsgraph, region, v3d, &rect, nullptr);
|
||||
|
||||
if (buf == NULL) {
|
||||
return NULL;
|
||||
if (buf == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
BLI_assert(select_ctx->index_drawn_len > 0);
|
||||
@@ -195,7 +193,7 @@ struct PolyMaskData {
|
||||
|
||||
static void drw_select_mask_px_cb(int x, int x_end, int y, void *user_data)
|
||||
{
|
||||
struct PolyMaskData *data = user_data;
|
||||
struct PolyMaskData *data = static_cast<PolyMaskData *>(user_data);
|
||||
BLI_bitmap *px = data->px;
|
||||
int i = (y * data->width) + x;
|
||||
do {
|
||||
@@ -220,8 +218,8 @@ uint *DRW_select_buffer_bitmap_from_poly(Depsgraph *depsgraph,
|
||||
|
||||
uint buf_len;
|
||||
uint *buf = DRW_select_buffer_read(depsgraph, region, v3d, &rect_px, &buf_len);
|
||||
if (buf == NULL) {
|
||||
return NULL;
|
||||
if (buf == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
BLI_bitmap *buf_mask = BLI_BITMAP_NEW(buf_len, __func__);
|
||||
@@ -279,12 +277,11 @@ uint DRW_select_buffer_sample_point(Depsgraph *depsgraph,
|
||||
{
|
||||
uint ret = 0;
|
||||
|
||||
const rcti rect = {
|
||||
.xmin = center[0],
|
||||
.xmax = center[0] + 1,
|
||||
.ymin = center[1],
|
||||
.ymax = center[1] + 1,
|
||||
};
|
||||
rcti rect{};
|
||||
rect.xmin = center[0];
|
||||
rect.xmax = center[0] + 1;
|
||||
rect.ymin = center[1];
|
||||
rect.ymax = center[1] + 1;
|
||||
|
||||
uint buf_len;
|
||||
uint *buf = DRW_select_buffer_read(depsgraph, region, v3d, &rect, &buf_len);
|
||||
@@ -306,7 +303,7 @@ struct SelectReadData {
|
||||
|
||||
static bool select_buffer_test_fn(const void *__restrict value, void *__restrict userdata)
|
||||
{
|
||||
struct SelectReadData *data = userdata;
|
||||
struct SelectReadData *data = static_cast<SelectReadData *>(userdata);
|
||||
uint hit_id = *(uint *)value;
|
||||
if (hit_id && hit_id >= data->id_min && hit_id < data->id_max) {
|
||||
/* Start at 1 to confirm. */
|
||||
@@ -341,13 +338,13 @@ uint DRW_select_buffer_find_nearest_to_point(Depsgraph *depsgraph,
|
||||
uint buf_len;
|
||||
const uint *buf = DRW_select_buffer_read(depsgraph, region, v3d, &rect, &buf_len);
|
||||
|
||||
if (buf == NULL) {
|
||||
if (buf == nullptr) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const int shape[2] = {height, width};
|
||||
const int center_yx[2] = {(height - 1) / 2, (width - 1) / 2};
|
||||
struct SelectReadData data = {NULL, id_min, id_max, 0};
|
||||
struct SelectReadData data = {nullptr, id_min, id_max, 0};
|
||||
BLI_array_iter_spiral_square(buf, shape, center_yx, select_buffer_test_fn, &data);
|
||||
|
||||
if (data.val_ptr) {
|
||||
@@ -456,14 +453,14 @@ void DRW_select_buffer_context_create(Base **bases, const uint bases_len, short
|
||||
{
|
||||
SELECTID_Context *select_ctx = DRW_select_engine_context_get();
|
||||
|
||||
select_ctx->objects = MEM_reallocN(select_ctx->objects,
|
||||
sizeof(*select_ctx->objects) * bases_len);
|
||||
select_ctx->objects = static_cast<Object **>(
|
||||
MEM_reallocN(select_ctx->objects, sizeof(*select_ctx->objects) * bases_len));
|
||||
|
||||
select_ctx->index_offsets = MEM_reallocN(select_ctx->index_offsets,
|
||||
sizeof(*select_ctx->index_offsets) * bases_len);
|
||||
select_ctx->index_offsets = static_cast<ObjectOffsets *>(
|
||||
MEM_reallocN(select_ctx->index_offsets, sizeof(*select_ctx->index_offsets) * bases_len));
|
||||
|
||||
select_ctx->objects_drawn = MEM_reallocN(select_ctx->objects_drawn,
|
||||
sizeof(*select_ctx->objects_drawn) * bases_len);
|
||||
select_ctx->objects_drawn = static_cast<Object **>(
|
||||
MEM_reallocN(select_ctx->objects_drawn, sizeof(*select_ctx->objects_drawn) * bases_len));
|
||||
|
||||
for (uint base_index = 0; base_index < bases_len; base_index++) {
|
||||
Object *obj = bases[base_index]->object;
|
||||
@@ -47,6 +47,10 @@ typedef enum eObjectInfoFlag eObjectInfoFlag;
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define DRW_SHADER_SHARED_H
|
||||
|
||||
#define DRW_RESOURCE_CHUNK_LEN 512
|
||||
@@ -380,3 +384,7 @@ BLI_STATIC_ASSERT_ALIGN(DRWDebugPrintBuffer, 16)
|
||||
#define drw_debug_draw_offset 2
|
||||
|
||||
/** \} */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "BLI_utildefines.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
@@ -8,9 +8,17 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
void DRW_draw_region_info(void);
|
||||
void DRW_clear_background(void);
|
||||
void DRW_draw_cursor(void);
|
||||
void DRW_draw_cursor_2d(void);
|
||||
void DRW_draw_gizmo_3d(void);
|
||||
void DRW_draw_gizmo_2d(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -63,7 +63,7 @@ static bool is_cursor_visible(const DRWContextState *draw_ctx, Scene *scene, Vie
|
||||
if ((draw_ctx->object_mode & (OB_MODE_ALL_PAINT | OB_MODE_SCULPT_CURVES)) != 0) {
|
||||
/* exception: object is in weight paint and has deforming armature in pose mode */
|
||||
if (draw_ctx->object_mode & OB_MODE_WEIGHT_PAINT) {
|
||||
if (BKE_object_pose_armature_get(draw_ctx->obact) != NULL) {
|
||||
if (BKE_object_pose_armature_get(draw_ctx->obact) != nullptr) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -110,7 +110,7 @@ void DRW_draw_cursor(void)
|
||||
region, cursor->location, co, V3D_PROJ_TEST_NOP | V3D_PROJ_TEST_CLIP_NEAR) ==
|
||||
V3D_PROJ_RET_OK)
|
||||
{
|
||||
RegionView3D *rv3d = region->regiondata;
|
||||
RegionView3D *rv3d = static_cast<RegionView3D *>(region->regiondata);
|
||||
|
||||
float cursor_quat[4];
|
||||
BKE_scene_cursor_rot_to_quat(cursor, cursor_quat);
|
||||
@@ -197,7 +197,7 @@ void DRW_draw_cursor(void)
|
||||
static bool is_cursor_visible_2d(const DRWContextState *draw_ctx)
|
||||
{
|
||||
SpaceInfo *space_data = (SpaceInfo *)draw_ctx->space_data;
|
||||
if (space_data == NULL) {
|
||||
if (space_data == nullptr) {
|
||||
return false;
|
||||
}
|
||||
if (space_data->spacetype != SPACE_IMAGE) {
|
||||
@@ -47,8 +47,8 @@ set(INC_SYS
|
||||
|
||||
set(SRC
|
||||
intern/gpu_batch.cc
|
||||
intern/gpu_batch_presets.c
|
||||
intern/gpu_batch_utils.c
|
||||
intern/gpu_batch_presets.cc
|
||||
intern/gpu_batch_utils.cc
|
||||
intern/gpu_capabilities.cc
|
||||
intern/gpu_codegen.cc
|
||||
intern/gpu_compute.cc
|
||||
@@ -57,17 +57,17 @@ set(SRC
|
||||
intern/gpu_drawlist.cc
|
||||
intern/gpu_framebuffer.cc
|
||||
intern/gpu_immediate.cc
|
||||
intern/gpu_immediate_util.c
|
||||
intern/gpu_immediate_util.cc
|
||||
intern/gpu_index_buffer.cc
|
||||
intern/gpu_init_exit.c
|
||||
intern/gpu_material.c
|
||||
intern/gpu_init_exit.cc
|
||||
intern/gpu_material.cc
|
||||
intern/gpu_matrix.cc
|
||||
intern/gpu_node_graph.cc
|
||||
intern/gpu_platform.cc
|
||||
intern/gpu_query.cc
|
||||
intern/gpu_select.c
|
||||
intern/gpu_select.cc
|
||||
intern/gpu_select_next.cc
|
||||
intern/gpu_select_pick.c
|
||||
intern/gpu_select_pick.cc
|
||||
intern/gpu_select_sample_query.cc
|
||||
intern/gpu_shader.cc
|
||||
intern/gpu_shader_builtin.cc
|
||||
@@ -81,7 +81,7 @@ set(SRC
|
||||
intern/gpu_uniform_buffer.cc
|
||||
intern/gpu_vertex_buffer.cc
|
||||
intern/gpu_vertex_format.cc
|
||||
intern/gpu_viewport.c
|
||||
intern/gpu_viewport.cc
|
||||
|
||||
GPU_batch.h
|
||||
GPU_batch_presets.h
|
||||
|
||||
@@ -8,8 +8,10 @@
|
||||
|
||||
#include "BLI_listbase.h"
|
||||
#include "BLI_math.h"
|
||||
#include "BLI_math_vector_types.hh"
|
||||
#include "BLI_threads.h"
|
||||
#include "BLI_utildefines.h"
|
||||
|
||||
#include "MEM_guardedalloc.h"
|
||||
|
||||
#include "GPU_batch.h"
|
||||
@@ -56,7 +58,7 @@ static struct {
|
||||
} attr_id;
|
||||
} g_presets_2d = {{0}};
|
||||
|
||||
static ListBase presets_list = {NULL, NULL};
|
||||
static ListBase presets_list = {nullptr, nullptr};
|
||||
|
||||
/** \} */
|
||||
|
||||
@@ -97,8 +99,8 @@ static void batch_sphere_lat_lon_vert(GPUVertBufRaw *pos_step,
|
||||
pos[0] = sinf(lat) * cosf(lon);
|
||||
pos[1] = cosf(lat);
|
||||
pos[2] = sinf(lat) * sinf(lon);
|
||||
copy_v3_v3(GPU_vertbuf_raw_step(pos_step), pos);
|
||||
copy_v3_v3(GPU_vertbuf_raw_step(nor_step), pos);
|
||||
copy_v3_v3(static_cast<float *>(GPU_vertbuf_raw_step(pos_step)), pos);
|
||||
copy_v3_v3(static_cast<float *>(GPU_vertbuf_raw_step(nor_step)), pos);
|
||||
}
|
||||
GPUBatch *GPU_batch_preset_sphere(int lod)
|
||||
{
|
||||
@@ -168,7 +170,7 @@ static GPUBatch *gpu_batch_sphere(int lat_res, int lon_res)
|
||||
BLI_assert(vbo_len == GPU_vertbuf_raw_used(&pos_step));
|
||||
BLI_assert(vbo_len == GPU_vertbuf_raw_used(&nor_step));
|
||||
|
||||
return GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
return GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
|
||||
static GPUBatch *batch_sphere_wire(int lat_res, int lon_res)
|
||||
@@ -202,7 +204,7 @@ static GPUBatch *batch_sphere_wire(int lat_res, int lon_res)
|
||||
BLI_assert(vbo_len == GPU_vertbuf_raw_used(&pos_step));
|
||||
BLI_assert(vbo_len == GPU_vertbuf_raw_used(&nor_step));
|
||||
|
||||
return GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
return GPU_batch_create_ex(GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
|
||||
/** \} */
|
||||
@@ -219,23 +221,23 @@ static void gpu_batch_preset_rectf_tris_color_ex(GPUVertBufRaw *pos_step,
|
||||
GPUVertBufRaw *col_step,
|
||||
const float color[4])
|
||||
{
|
||||
copy_v2_v2(GPU_vertbuf_raw_step(pos_step), (const float[2]){x1, y1});
|
||||
copy_v4_v4(GPU_vertbuf_raw_step(col_step), color);
|
||||
copy_v2_v2(static_cast<float *>(GPU_vertbuf_raw_step(pos_step)), blender::float2{x1, y1});
|
||||
copy_v4_v4(static_cast<float *>(GPU_vertbuf_raw_step(col_step)), color);
|
||||
|
||||
copy_v2_v2(GPU_vertbuf_raw_step(pos_step), (const float[2]){x2, y1});
|
||||
copy_v4_v4(GPU_vertbuf_raw_step(col_step), color);
|
||||
copy_v2_v2(static_cast<float *>(GPU_vertbuf_raw_step(pos_step)), blender::float2{x2, y1});
|
||||
copy_v4_v4(static_cast<float *>(GPU_vertbuf_raw_step(col_step)), color);
|
||||
|
||||
copy_v2_v2(GPU_vertbuf_raw_step(pos_step), (const float[2]){x2, y2});
|
||||
copy_v4_v4(GPU_vertbuf_raw_step(col_step), color);
|
||||
copy_v2_v2(static_cast<float *>(GPU_vertbuf_raw_step(pos_step)), blender::float2{x2, y2});
|
||||
copy_v4_v4(static_cast<float *>(GPU_vertbuf_raw_step(col_step)), color);
|
||||
|
||||
copy_v2_v2(GPU_vertbuf_raw_step(pos_step), (const float[2]){x1, y1});
|
||||
copy_v4_v4(GPU_vertbuf_raw_step(col_step), color);
|
||||
copy_v2_v2(static_cast<float *>(GPU_vertbuf_raw_step(pos_step)), blender::float2{x1, y1});
|
||||
copy_v4_v4(static_cast<float *>(GPU_vertbuf_raw_step(col_step)), color);
|
||||
|
||||
copy_v2_v2(GPU_vertbuf_raw_step(pos_step), (const float[2]){x2, y2});
|
||||
copy_v4_v4(GPU_vertbuf_raw_step(col_step), color);
|
||||
copy_v2_v2(static_cast<float *>(GPU_vertbuf_raw_step(pos_step)), blender::float2{x2, y2});
|
||||
copy_v4_v4(static_cast<float *>(GPU_vertbuf_raw_step(col_step)), color);
|
||||
|
||||
copy_v2_v2(GPU_vertbuf_raw_step(pos_step), (const float[2]){x1, y2});
|
||||
copy_v4_v4(GPU_vertbuf_raw_step(col_step), color);
|
||||
copy_v2_v2(static_cast<float *>(GPU_vertbuf_raw_step(pos_step)), blender::float2{x1, y2});
|
||||
copy_v4_v4(static_cast<float *>(GPU_vertbuf_raw_step(col_step)), color);
|
||||
}
|
||||
|
||||
static GPUBatch *gpu_batch_preset_panel_drag_widget(float pixelsize,
|
||||
@@ -277,7 +279,7 @@ static GPUBatch *gpu_batch_preset_panel_drag_widget(float pixelsize,
|
||||
&pos_step, x_co - box_size, y_co, x_co, y_co + box_size, &col_step, col_high);
|
||||
}
|
||||
}
|
||||
return GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
return GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
|
||||
GPUBatch *GPU_batch_preset_panel_drag_widget(const float pixelsize,
|
||||
@@ -294,7 +296,7 @@ GPUBatch *GPU_batch_preset_panel_drag_widget(const float pixelsize,
|
||||
if (g_presets_2d.batch.panel_drag_widget && parameters_changed) {
|
||||
gpu_batch_presets_unregister(g_presets_2d.batch.panel_drag_widget);
|
||||
GPU_batch_discard(g_presets_2d.batch.panel_drag_widget);
|
||||
g_presets_2d.batch.panel_drag_widget = NULL;
|
||||
g_presets_2d.batch.panel_drag_widget = nullptr;
|
||||
}
|
||||
|
||||
if (!g_presets_2d.batch.panel_drag_widget) {
|
||||
@@ -320,7 +322,7 @@ GPUBatch *GPU_batch_preset_quad(void)
|
||||
/* Don't fill the color. */
|
||||
|
||||
g_presets_2d.batch.quad = GPU_batch_create_ex(
|
||||
GPU_PRIM_TRI_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
GPU_PRIM_TRI_STRIP, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
|
||||
gpu_batch_presets_register(g_presets_2d.batch.quad);
|
||||
}
|
||||
@@ -364,7 +366,7 @@ void gpu_batch_presets_register(GPUBatch *preset_batch)
|
||||
bool gpu_batch_presets_unregister(GPUBatch *preset_batch)
|
||||
{
|
||||
BLI_mutex_lock(&g_presets_3d.mutex);
|
||||
for (LinkData *link = presets_list.last; link; link = link->prev) {
|
||||
for (LinkData *link = static_cast<LinkData *>(presets_list.last); link; link = link->prev) {
|
||||
if (preset_batch == link->data) {
|
||||
BLI_remlink(&presets_list, link);
|
||||
BLI_mutex_unlock(&g_presets_3d.mutex);
|
||||
@@ -379,8 +381,8 @@ bool gpu_batch_presets_unregister(GPUBatch *preset_batch)
|
||||
void gpu_batch_presets_exit(void)
|
||||
{
|
||||
LinkData *link;
|
||||
while ((link = BLI_pophead(&presets_list))) {
|
||||
GPUBatch *preset = link->data;
|
||||
while ((link = static_cast<LinkData *>(BLI_pophead(&presets_list)))) {
|
||||
GPUBatch *preset = static_cast<GPUBatch *>(link->data);
|
||||
GPU_batch_discard(preset);
|
||||
MEM_freeN(link);
|
||||
}
|
||||
@@ -24,14 +24,14 @@ GPUBatch *GPU_batch_tris_from_poly_2d_encoded(const uchar *polys_flat,
|
||||
uint polys_flat_len,
|
||||
const rctf *rect)
|
||||
{
|
||||
const uchar(*polys)[2] = (const void *)polys_flat;
|
||||
const uchar(*polys)[2] = static_cast<const uchar(*)[2]>((const void *)polys_flat);
|
||||
const uint polys_len = polys_flat_len / 2;
|
||||
BLI_assert(polys_flat_len == polys_len * 2);
|
||||
|
||||
/* Over alloc in both cases */
|
||||
float(*verts)[2] = MEM_mallocN(sizeof(*verts) * polys_len, __func__);
|
||||
float(*verts)[2] = static_cast<float(*)[2]>(MEM_mallocN(sizeof(*verts) * polys_len, __func__));
|
||||
float(*verts_step)[2] = verts;
|
||||
uint(*tris)[3] = MEM_mallocN(sizeof(*tris) * polys_len, __func__);
|
||||
uint(*tris)[3] = static_cast<uint(*)[3]>(MEM_mallocN(sizeof(*tris) * polys_len, __func__));
|
||||
uint(*tris_step)[3] = tris;
|
||||
|
||||
const float range_uchar[2] = {
|
||||
@@ -92,7 +92,7 @@ GPUBatch *GPU_batch_tris_from_poly_2d_encoded(const uchar *polys_flat,
|
||||
GPU_vertbuf_attr_get_raw_data(vbo, attr_id.pos, &pos_step);
|
||||
|
||||
for (uint i = 0; i < verts_len; i++) {
|
||||
copy_v2_v2(GPU_vertbuf_raw_step(&pos_step), verts[i]);
|
||||
copy_v2_v2(static_cast<float *>(GPU_vertbuf_raw_step(&pos_step)), verts[i]);
|
||||
}
|
||||
|
||||
GPUIndexBufBuilder elb;
|
||||
@@ -113,13 +113,13 @@ GPUBatch *GPU_batch_wire_from_poly_2d_encoded(const uchar *polys_flat,
|
||||
uint polys_flat_len,
|
||||
const rctf *rect)
|
||||
{
|
||||
const uchar(*polys)[2] = (const void *)polys_flat;
|
||||
const uchar(*polys)[2] = static_cast<const uchar(*)[2]>((const void *)polys_flat);
|
||||
const uint polys_len = polys_flat_len / 2;
|
||||
BLI_assert(polys_flat_len == polys_len * 2);
|
||||
|
||||
/* Over alloc */
|
||||
/* Lines are pairs of (x, y) byte locations packed into an int32_t. */
|
||||
int32_t *lines = MEM_mallocN(sizeof(*lines) * polys_len, __func__);
|
||||
int32_t *lines = static_cast<int32_t *>(MEM_mallocN(sizeof(*lines) * polys_len, __func__));
|
||||
int32_t *lines_step = lines;
|
||||
|
||||
const float range_uchar[2] = {
|
||||
@@ -199,7 +199,7 @@ GPUBatch *GPU_batch_wire_from_poly_2d_encoded(const uchar *polys_flat,
|
||||
} data;
|
||||
data.as_u32 = lines[i];
|
||||
for (uint k = 0; k < 2; k++) {
|
||||
float *pos_v2 = GPU_vertbuf_raw_step(&pos_step);
|
||||
float *pos_v2 = static_cast<float *>(GPU_vertbuf_raw_step(&pos_step));
|
||||
for (uint j = 0; j < 2; j++) {
|
||||
pos_v2[j] = min_uchar[j] + ((float)data.as_u8_pair[k][j] * range_uchar[j]);
|
||||
}
|
||||
@@ -207,7 +207,7 @@ GPUBatch *GPU_batch_wire_from_poly_2d_encoded(const uchar *polys_flat,
|
||||
}
|
||||
BLI_assert(vbo_len_capacity == GPU_vertbuf_raw_used(&pos_step));
|
||||
MEM_freeN(lines);
|
||||
return GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
return GPU_batch_create_ex(GPU_PRIM_LINES, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
|
||||
/** \} */
|
||||
@@ -57,15 +57,15 @@
|
||||
* worker thread as well. */
|
||||
#define ASYNC_OPTIMIZED_PASS_CREATION 0
|
||||
|
||||
typedef struct GPUColorBandBuilder {
|
||||
struct GPUColorBandBuilder {
|
||||
float pixels[MAX_COLOR_BAND][CM_TABLE + 1][4];
|
||||
int current_layer;
|
||||
} GPUColorBandBuilder;
|
||||
};
|
||||
|
||||
typedef struct GPUSkyBuilder {
|
||||
struct GPUSkyBuilder {
|
||||
float pixels[MAX_GPU_SKIES][GPU_SKY_WIDTH * GPU_SKY_HEIGHT][4];
|
||||
int current_layer;
|
||||
} GPUSkyBuilder;
|
||||
};
|
||||
|
||||
struct GPUMaterial {
|
||||
/* Contains #GPUShader and source code for deferred compilation.
|
||||
@@ -158,8 +158,9 @@ GPUTexture **gpu_material_sky_texture_layer_set(
|
||||
BLI_assert(height == GPU_SKY_HEIGHT);
|
||||
UNUSED_VARS_NDEBUG(width, height);
|
||||
|
||||
if (mat->sky_builder == NULL) {
|
||||
mat->sky_builder = MEM_mallocN(sizeof(GPUSkyBuilder), "GPUSkyBuilder");
|
||||
if (mat->sky_builder == nullptr) {
|
||||
mat->sky_builder = static_cast<GPUSkyBuilder *>(
|
||||
MEM_mallocN(sizeof(GPUSkyBuilder), "GPUSkyBuilder"));
|
||||
mat->sky_builder->current_layer = 0;
|
||||
}
|
||||
|
||||
@@ -188,8 +189,9 @@ GPUTexture **gpu_material_ramp_texture_row_set(GPUMaterial *mat,
|
||||
BLI_assert(size == CM_TABLE + 1);
|
||||
UNUSED_VARS_NDEBUG(size);
|
||||
|
||||
if (mat->coba_builder == NULL) {
|
||||
mat->coba_builder = MEM_mallocN(sizeof(GPUColorBandBuilder), "GPUColorBandBuilder");
|
||||
if (mat->coba_builder == nullptr) {
|
||||
mat->coba_builder = static_cast<GPUColorBandBuilder *>(
|
||||
MEM_mallocN(sizeof(GPUColorBandBuilder), "GPUColorBandBuilder"));
|
||||
mat->coba_builder->current_layer = 0;
|
||||
}
|
||||
|
||||
@@ -210,7 +212,7 @@ GPUTexture **gpu_material_ramp_texture_row_set(GPUMaterial *mat,
|
||||
|
||||
static void gpu_material_ramp_texture_build(GPUMaterial *mat)
|
||||
{
|
||||
if (mat->coba_builder == NULL) {
|
||||
if (mat->coba_builder == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -225,12 +227,12 @@ static void gpu_material_ramp_texture_build(GPUMaterial *mat)
|
||||
(float *)builder->pixels);
|
||||
|
||||
MEM_freeN(builder);
|
||||
mat->coba_builder = NULL;
|
||||
mat->coba_builder = nullptr;
|
||||
}
|
||||
|
||||
static void gpu_material_sky_texture_build(GPUMaterial *mat)
|
||||
{
|
||||
if (mat->sky_builder == NULL) {
|
||||
if (mat->sky_builder == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -244,7 +246,7 @@ static void gpu_material_sky_texture_build(GPUMaterial *mat)
|
||||
(float *)mat->sky_builder->pixels);
|
||||
|
||||
MEM_freeN(mat->sky_builder);
|
||||
mat->sky_builder = NULL;
|
||||
mat->sky_builder = nullptr;
|
||||
}
|
||||
|
||||
void GPU_material_free_single(GPUMaterial *material)
|
||||
@@ -256,25 +258,25 @@ void GPU_material_free_single(GPUMaterial *material)
|
||||
|
||||
gpu_node_graph_free(&material->graph);
|
||||
|
||||
if (material->optimized_pass != NULL) {
|
||||
if (material->optimized_pass != nullptr) {
|
||||
GPU_pass_release(material->optimized_pass);
|
||||
}
|
||||
if (material->pass != NULL) {
|
||||
if (material->pass != nullptr) {
|
||||
GPU_pass_release(material->pass);
|
||||
}
|
||||
if (material->ubo != NULL) {
|
||||
if (material->ubo != nullptr) {
|
||||
GPU_uniformbuf_free(material->ubo);
|
||||
}
|
||||
if (material->coba_tex != NULL) {
|
||||
if (material->coba_tex != nullptr) {
|
||||
GPU_texture_free(material->coba_tex);
|
||||
}
|
||||
if (material->sky_tex != NULL) {
|
||||
if (material->sky_tex != nullptr) {
|
||||
GPU_texture_free(material->sky_tex);
|
||||
}
|
||||
if (material->sss_profile != NULL) {
|
||||
if (material->sss_profile != nullptr) {
|
||||
GPU_uniformbuf_free(material->sss_profile);
|
||||
}
|
||||
if (material->sss_tex_profile != NULL) {
|
||||
if (material->sss_tex_profile != nullptr) {
|
||||
GPU_texture_free(material->sss_tex_profile);
|
||||
}
|
||||
MEM_freeN(material);
|
||||
@@ -283,7 +285,7 @@ void GPU_material_free_single(GPUMaterial *material)
|
||||
void GPU_material_free(ListBase *gpumaterial)
|
||||
{
|
||||
LISTBASE_FOREACH (LinkData *, link, gpumaterial) {
|
||||
GPUMaterial *material = link->data;
|
||||
GPUMaterial *material = static_cast<GPUMaterial *>(link->data);
|
||||
DRW_deferred_shader_remove(material);
|
||||
GPU_material_free_single(material);
|
||||
}
|
||||
@@ -313,13 +315,13 @@ GPUShader *GPU_material_get_shader(GPUMaterial *material)
|
||||
GPU_MAT_OPTIMIZATION_SUCCESS) &&
|
||||
material->optimized_pass) ?
|
||||
GPU_pass_shader_get(material->optimized_pass) :
|
||||
NULL;
|
||||
return (shader) ? shader : ((material->pass) ? GPU_pass_shader_get(material->pass) : NULL);
|
||||
nullptr;
|
||||
return (shader) ? shader : ((material->pass) ? GPU_pass_shader_get(material->pass) : nullptr);
|
||||
}
|
||||
|
||||
GPUShader *GPU_material_get_shader_base(GPUMaterial *material)
|
||||
{
|
||||
return (material->pass) ? GPU_pass_shader_get(material->pass) : NULL;
|
||||
return (material->pass) ? GPU_pass_shader_get(material->pass) : nullptr;
|
||||
}
|
||||
|
||||
const char *GPU_material_get_name(GPUMaterial *material)
|
||||
@@ -355,13 +357,13 @@ ListBase GPU_material_textures(GPUMaterial *material)
|
||||
const GPUUniformAttrList *GPU_material_uniform_attributes(const GPUMaterial *material)
|
||||
{
|
||||
const GPUUniformAttrList *attrs = &material->graph.uniform_attrs;
|
||||
return attrs->count > 0 ? attrs : NULL;
|
||||
return attrs->count > 0 ? attrs : nullptr;
|
||||
}
|
||||
|
||||
const ListBase *GPU_material_layer_attributes(const GPUMaterial *material)
|
||||
{
|
||||
const ListBase *attrs = &material->graph.layer_attrs;
|
||||
return !BLI_listbase_is_empty(attrs) ? attrs : NULL;
|
||||
return !BLI_listbase_is_empty(attrs) ? attrs : nullptr;
|
||||
}
|
||||
|
||||
#if 1 /* End of life code. */
|
||||
@@ -371,13 +373,13 @@ const ListBase *GPU_material_layer_attributes(const GPUMaterial *material)
|
||||
# define SSS_SAMPLES 65
|
||||
# define SSS_EXPONENT 2.0f /* Importance sampling exponent */
|
||||
|
||||
typedef struct GPUSssKernelData {
|
||||
struct GPUSssKernelData {
|
||||
float kernel[SSS_SAMPLES][4];
|
||||
float param[3], max_radius;
|
||||
float avg_inv_radius;
|
||||
int samples;
|
||||
int pad[2];
|
||||
} GPUSssKernelData;
|
||||
};
|
||||
|
||||
BLI_STATIC_ASSERT_ALIGN(GPUSssKernelData, 16)
|
||||
|
||||
@@ -515,7 +517,8 @@ static void compute_sss_translucence_kernel(const GPUSssKernelData *kd,
|
||||
float **output)
|
||||
{
|
||||
float(*texels)[4];
|
||||
texels = MEM_callocN(sizeof(float[4]) * resolution, "compute_sss_translucence_kernel");
|
||||
texels = static_cast<float(*)[4]>(
|
||||
MEM_callocN(sizeof(float[4]) * resolution, "compute_sss_translucence_kernel"));
|
||||
*output = (float *)texels;
|
||||
|
||||
/* Last texel should be black, hence the - 1. */
|
||||
@@ -581,7 +584,7 @@ bool GPU_material_sss_profile_create(GPUMaterial *material, float radii[3])
|
||||
material->sss_enabled = true;
|
||||
|
||||
/* Update / Create UBO */
|
||||
if (material->sss_profile == NULL) {
|
||||
if (material->sss_profile == nullptr) {
|
||||
material->sss_profile = GPU_uniformbuf_create(sizeof(GPUSssKernelData));
|
||||
}
|
||||
return true;
|
||||
@@ -592,7 +595,7 @@ GPUUniformBuf *GPU_material_sss_profile_get(GPUMaterial *material,
|
||||
GPUTexture **tex_profile)
|
||||
{
|
||||
if (!material->sss_enabled) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (material->sss_dirty || (material->sss_samples != sample_len)) {
|
||||
@@ -607,7 +610,7 @@ GPUUniformBuf *GPU_material_sss_profile_get(GPUMaterial *material,
|
||||
float *translucence_profile;
|
||||
compute_sss_translucence_kernel(&kd, 64, &translucence_profile);
|
||||
|
||||
if (material->sss_tex_profile != NULL) {
|
||||
if (material->sss_tex_profile != nullptr) {
|
||||
GPU_texture_free(material->sss_tex_profile);
|
||||
}
|
||||
|
||||
@@ -624,7 +627,7 @@ GPUUniformBuf *GPU_material_sss_profile_get(GPUMaterial *material,
|
||||
material->sss_dirty = false;
|
||||
}
|
||||
|
||||
if (tex_profile != NULL) {
|
||||
if (tex_profile != nullptr) {
|
||||
*tex_profile = material->sss_tex_profile;
|
||||
}
|
||||
return material->sss_profile;
|
||||
@@ -671,7 +674,8 @@ void GPU_material_output_thickness(GPUMaterial *material, GPUNodeLink *link)
|
||||
|
||||
void GPU_material_add_output_link_aov(GPUMaterial *material, GPUNodeLink *link, int hash)
|
||||
{
|
||||
GPUNodeGraphOutputLink *aov_link = MEM_callocN(sizeof(GPUNodeGraphOutputLink), __func__);
|
||||
GPUNodeGraphOutputLink *aov_link = static_cast<GPUNodeGraphOutputLink *>(
|
||||
MEM_callocN(sizeof(GPUNodeGraphOutputLink), __func__));
|
||||
aov_link->outlink = link;
|
||||
aov_link->hash = hash;
|
||||
BLI_addtail(&material->graph.outlink_aovs, aov_link);
|
||||
@@ -679,7 +683,8 @@ void GPU_material_add_output_link_aov(GPUMaterial *material, GPUNodeLink *link,
|
||||
|
||||
void GPU_material_add_output_link_composite(GPUMaterial *material, GPUNodeLink *link)
|
||||
{
|
||||
GPUNodeGraphOutputLink *compositor_link = MEM_callocN(sizeof(GPUNodeGraphOutputLink), __func__);
|
||||
GPUNodeGraphOutputLink *compositor_link = static_cast<GPUNodeGraphOutputLink *>(
|
||||
MEM_callocN(sizeof(GPUNodeGraphOutputLink), __func__));
|
||||
compositor_link->outlink = link;
|
||||
BLI_addtail(&material->graph.outlink_compositor, compositor_link);
|
||||
}
|
||||
@@ -704,7 +709,8 @@ char *GPU_material_split_sub_function(GPUMaterial *material,
|
||||
break;
|
||||
}
|
||||
|
||||
GPUNodeGraphFunctionLink *func_link = MEM_callocN(sizeof(GPUNodeGraphFunctionLink), __func__);
|
||||
GPUNodeGraphFunctionLink *func_link = static_cast<GPUNodeGraphFunctionLink *>(
|
||||
MEM_callocN(sizeof(GPUNodeGraphFunctionLink), __func__));
|
||||
func_link->outlink = *link;
|
||||
SNPRINTF(func_link->name, "ntree_fn%d", material->generated_function_len++);
|
||||
BLI_addtail(&material->graph.material_functions, func_link);
|
||||
@@ -821,13 +827,13 @@ GPUMaterial *GPU_material_from_nodetree(Scene *scene,
|
||||
}
|
||||
}
|
||||
|
||||
GPUMaterial *mat = MEM_callocN(sizeof(GPUMaterial), "GPUMaterial");
|
||||
GPUMaterial *mat = static_cast<GPUMaterial *>(MEM_callocN(sizeof(GPUMaterial), "GPUMaterial"));
|
||||
mat->ma = ma;
|
||||
mat->scene = scene;
|
||||
mat->uuid = shader_uuid;
|
||||
mat->flag = GPU_MATFLAG_UPDATED;
|
||||
mat->status = GPU_MAT_CREATED;
|
||||
mat->default_mat = NULL;
|
||||
mat->default_mat = nullptr;
|
||||
mat->is_volume_shader = is_volume_shader;
|
||||
mat->graph.used_libraries = BLI_gset_new(
|
||||
BLI_ghashutil_ptrhash, BLI_ghashutil_ptrcmp, "GPUNodeGraph.used_libraries");
|
||||
@@ -848,7 +854,7 @@ GPUMaterial *GPU_material_from_nodetree(Scene *scene,
|
||||
/* Create source code and search pass cache for an already compiled version. */
|
||||
mat->pass = GPU_generate_pass(mat, &mat->graph, callback, thunk, false);
|
||||
|
||||
if (mat->pass == NULL) {
|
||||
if (mat->pass == nullptr) {
|
||||
/* We had a cache hit and the shader has already failed to compile. */
|
||||
mat->status = GPU_MAT_FAILED;
|
||||
gpu_node_graph_free(&mat->graph);
|
||||
@@ -861,7 +867,7 @@ GPUMaterial *GPU_material_from_nodetree(Scene *scene,
|
||||
}
|
||||
|
||||
GPUShader *sh = GPU_pass_shader_get(mat->pass);
|
||||
if (sh != NULL) {
|
||||
if (sh != nullptr) {
|
||||
/* We had a cache hit and the shader is already compiled. */
|
||||
mat->status = GPU_MAT_SUCCESS;
|
||||
|
||||
@@ -873,19 +879,19 @@ GPUMaterial *GPU_material_from_nodetree(Scene *scene,
|
||||
/* Generate optimized pass. */
|
||||
if (mat->optimization_status == GPU_MAT_OPTIMIZATION_READY) {
|
||||
#if ASYNC_OPTIMIZED_PASS_CREATION == 1
|
||||
mat->optimized_pass = NULL;
|
||||
mat->optimized_pass = nullptr;
|
||||
mat->optimize_pass_info.callback = callback;
|
||||
mat->optimize_pass_info.thunk = thunk;
|
||||
#else
|
||||
mat->optimized_pass = GPU_generate_pass(mat, &mat->graph, callback, thunk, true);
|
||||
if (mat->optimized_pass == NULL) {
|
||||
if (mat->optimized_pass == nullptr) {
|
||||
/* Failed to create optimized pass. */
|
||||
gpu_node_graph_free_nodes(&mat->graph);
|
||||
GPU_material_optimization_status_set(mat, GPU_MAT_OPTIMIZATION_SKIP);
|
||||
}
|
||||
else {
|
||||
GPUShader *optimized_sh = GPU_pass_shader_get(mat->optimized_pass);
|
||||
if (optimized_sh != NULL) {
|
||||
if (optimized_sh != nullptr) {
|
||||
/* Optimized shader already available. */
|
||||
gpu_node_graph_free_nodes(&mat->graph);
|
||||
GPU_material_optimization_status_set(mat, GPU_MAT_OPTIMIZATION_SUCCESS);
|
||||
@@ -904,7 +910,7 @@ GPUMaterial *GPU_material_from_nodetree(Scene *scene,
|
||||
/* Note that even if building the shader fails in some way, we still keep
|
||||
* it to avoid trying to compile again and again, and simply do not use
|
||||
* the actual shader on drawing. */
|
||||
LinkData *link = MEM_callocN(sizeof(LinkData), "GPUMaterialLink");
|
||||
LinkData *link = static_cast<LinkData *>(MEM_callocN(sizeof(LinkData), "GPUMaterialLink"));
|
||||
link->data = mat;
|
||||
BLI_addtail(gpumaterials, link);
|
||||
|
||||
@@ -928,8 +934,8 @@ void GPU_material_compile(GPUMaterial *mat)
|
||||
BLI_assert(ELEM(mat->status, GPU_MAT_QUEUED, GPU_MAT_CREATED));
|
||||
BLI_assert(mat->pass);
|
||||
|
||||
/* NOTE: The shader may have already been compiled here since we are
|
||||
* sharing GPUShader across GPUMaterials. In this case it's a no-op. */
|
||||
/* NOTE: The shader may have already been compiled here since we are
|
||||
* sharing GPUShader across GPUMaterials. In this case it's a no-op. */
|
||||
#ifndef NDEBUG
|
||||
success = GPU_pass_compile(mat->pass, mat->name);
|
||||
#else
|
||||
@@ -940,7 +946,7 @@ void GPU_material_compile(GPUMaterial *mat)
|
||||
|
||||
if (success) {
|
||||
GPUShader *sh = GPU_pass_shader_get(mat->pass);
|
||||
if (sh != NULL) {
|
||||
if (sh != nullptr) {
|
||||
|
||||
/** Perform async Render Pipeline State Object (PSO) compilation.
|
||||
*
|
||||
@@ -955,8 +961,8 @@ void GPU_material_compile(GPUMaterial *mat)
|
||||
* configurations to ensure compile time remains fast, as these first
|
||||
* entries will be the most commonly used PSOs. As not all PSOs are necessarily
|
||||
* required immediately, this limit should remain low (1-3 at most). */
|
||||
if (!ELEM(mat->default_mat, NULL, mat)) {
|
||||
if (mat->default_mat->pass != NULL) {
|
||||
if (!ELEM(mat->default_mat, nullptr, mat)) {
|
||||
if (mat->default_mat->pass != nullptr) {
|
||||
GPUShader *parent_sh = GPU_pass_shader_get(mat->default_mat->pass);
|
||||
if (parent_sh) {
|
||||
/* Skip warming if cached pass is identical to the default material. */
|
||||
@@ -982,7 +988,7 @@ void GPU_material_compile(GPUMaterial *mat)
|
||||
else {
|
||||
mat->status = GPU_MAT_FAILED;
|
||||
GPU_pass_release(mat->pass);
|
||||
mat->pass = NULL;
|
||||
mat->pass = nullptr;
|
||||
gpu_node_graph_free(&mat->graph);
|
||||
}
|
||||
}
|
||||
@@ -1023,8 +1029,8 @@ void GPU_material_optimize(GPUMaterial *mat)
|
||||
#endif
|
||||
|
||||
bool success;
|
||||
/* NOTE: The shader may have already been compiled here since we are
|
||||
* sharing GPUShader across GPUMaterials. In this case it's a no-op. */
|
||||
/* NOTE: The shader may have already been compiled here since we are
|
||||
* sharing GPUShader across GPUMaterials. In this case it's a no-op. */
|
||||
#ifndef NDEBUG
|
||||
success = GPU_pass_compile(mat->optimized_pass, mat->name);
|
||||
#else
|
||||
@@ -1033,7 +1039,7 @@ void GPU_material_optimize(GPUMaterial *mat)
|
||||
|
||||
if (success) {
|
||||
GPUShader *sh = GPU_pass_shader_get(mat->optimized_pass);
|
||||
if (sh != NULL) {
|
||||
if (sh != nullptr) {
|
||||
/** Perform async Render Pipeline State Object (PSO) compilation.
|
||||
*
|
||||
* Warm PSO cache within async compilation thread for optimized materials.
|
||||
@@ -1062,7 +1068,7 @@ void GPU_material_optimize(GPUMaterial *mat)
|
||||
else {
|
||||
/* Optimization pass generation failed. Disable future attempts to optimize. */
|
||||
GPU_pass_release(mat->optimized_pass);
|
||||
mat->optimized_pass = NULL;
|
||||
mat->optimized_pass = nullptr;
|
||||
GPU_material_optimization_status_set(mat, GPU_MAT_OPTIMIZATION_SKIP);
|
||||
}
|
||||
|
||||
@@ -1088,13 +1094,14 @@ GPUMaterial *GPU_material_from_callbacks(ConstructGPUMaterialFn construct_functi
|
||||
void *thunk)
|
||||
{
|
||||
/* Allocate a new material and its material graph, and initialize its reference count. */
|
||||
GPUMaterial *material = MEM_callocN(sizeof(GPUMaterial), "GPUMaterial");
|
||||
GPUMaterial *material = static_cast<GPUMaterial *>(
|
||||
MEM_callocN(sizeof(GPUMaterial), "GPUMaterial"));
|
||||
material->graph.used_libraries = BLI_gset_new(
|
||||
BLI_ghashutil_ptrhash, BLI_ghashutil_ptrcmp, "GPUNodeGraph.used_libraries");
|
||||
material->refcount = 1;
|
||||
material->optimization_status = GPU_MAT_OPTIMIZATION_SKIP;
|
||||
material->optimized_pass = NULL;
|
||||
material->default_mat = NULL;
|
||||
material->optimized_pass = nullptr;
|
||||
material->default_mat = nullptr;
|
||||
|
||||
/* Construct the material graph by adding and linking the necessary GPU material nodes. */
|
||||
construct_function_cb(thunk, material);
|
||||
@@ -1105,10 +1112,10 @@ GPUMaterial *GPU_material_from_callbacks(ConstructGPUMaterialFn construct_functi
|
||||
/* Lookup an existing pass in the cache or generate a new one. */
|
||||
material->pass = GPU_generate_pass(
|
||||
material, &material->graph, generate_code_function_cb, thunk, false);
|
||||
material->optimized_pass = NULL;
|
||||
material->optimized_pass = nullptr;
|
||||
|
||||
/* The pass already exists in the pass cache but its shader already failed to compile. */
|
||||
if (material->pass == NULL) {
|
||||
if (material->pass == nullptr) {
|
||||
material->status = GPU_MAT_FAILED;
|
||||
gpu_node_graph_free(&material->graph);
|
||||
return material;
|
||||
@@ -1116,7 +1123,7 @@ GPUMaterial *GPU_material_from_callbacks(ConstructGPUMaterialFn construct_functi
|
||||
|
||||
/* The pass already exists in the pass cache and its shader is already compiled. */
|
||||
GPUShader *shader = GPU_pass_shader_get(material->pass);
|
||||
if (shader != NULL) {
|
||||
if (shader != nullptr) {
|
||||
material->status = GPU_MAT_SUCCESS;
|
||||
if (material->optimization_status == GPU_MAT_OPTIMIZATION_SKIP) {
|
||||
/* Only free node graph if not required by secondary optimization pass. */
|
||||
@@ -26,7 +26,7 @@
|
||||
* \{ */
|
||||
|
||||
/* Internal algorithm used */
|
||||
typedef enum eGPUSelectAlgo {
|
||||
enum eGPUSelectAlgo {
|
||||
/** glBegin/EndQuery(GL_SAMPLES_PASSED... ), `gpu_select_query.c`
|
||||
* Only sets 4th component (ID) correctly. */
|
||||
ALGO_GL_QUERY = 1,
|
||||
@@ -35,9 +35,9 @@ typedef enum eGPUSelectAlgo {
|
||||
ALGO_GL_PICK = 2,
|
||||
/** Use Select-Next draw engine. */
|
||||
ALGO_SELECT_NEXT = 3,
|
||||
} eGPUSelectAlgo;
|
||||
};
|
||||
|
||||
typedef struct GPUSelectState {
|
||||
struct GPUSelectState {
|
||||
/* To ignore selection id calls when not initialized */
|
||||
bool select_is_active;
|
||||
/* mode of operation */
|
||||
@@ -55,7 +55,7 @@ typedef struct GPUSelectState {
|
||||
* where the `mode` to pass to #GPU_select_begin yet isn't known.
|
||||
*/
|
||||
bool use_cache_needs_init;
|
||||
} GPUSelectState;
|
||||
};
|
||||
|
||||
static GPUSelectState g_select_state = {0};
|
||||
|
||||
@@ -247,7 +247,7 @@ bool GPU_select_is_cached(void)
|
||||
|
||||
const GPUSelectResult *GPU_select_buffer_near(const GPUSelectResult *buffer, int hits)
|
||||
{
|
||||
const GPUSelectResult *buffer_near = NULL;
|
||||
const GPUSelectResult *buffer_near = nullptr;
|
||||
uint depth_min = (uint)-1;
|
||||
for (int i = 0; i < hits; i++) {
|
||||
if (buffer->depth < depth_min) {
|
||||
@@ -40,7 +40,7 @@
|
||||
* \{ */
|
||||
|
||||
/** For looping over a sub-region of a #rcti, could be moved into 'rct.c'. */
|
||||
typedef struct SubRectStride {
|
||||
struct SubRectStride {
|
||||
/** Start here. */
|
||||
uint start;
|
||||
/** Read these. */
|
||||
@@ -49,7 +49,7 @@ typedef struct SubRectStride {
|
||||
uint span_len;
|
||||
/** Skip those. */
|
||||
uint skip;
|
||||
} SubRectStride;
|
||||
};
|
||||
|
||||
/** We may want to change back to float if `uint` isn't well supported. */
|
||||
typedef uint depth_t;
|
||||
@@ -97,15 +97,16 @@ BLI_INLINE bool depth_is_filled(const depth_t *prev, const depth_t *curr)
|
||||
* \{ */
|
||||
|
||||
/** Store result of #GPU_framebuffer_read_depth. */
|
||||
typedef struct DepthBufCache {
|
||||
struct DepthBufCache {
|
||||
struct DepthBufCache *next, *prev;
|
||||
uint id;
|
||||
depth_t buf[0];
|
||||
} DepthBufCache;
|
||||
};
|
||||
|
||||
static DepthBufCache *depth_buf_malloc(uint rect_len)
|
||||
{
|
||||
DepthBufCache *rect = MEM_mallocN(sizeof(DepthBufCache) + sizeof(depth_t) * rect_len, __func__);
|
||||
DepthBufCache *rect = static_cast<DepthBufCache *>(
|
||||
MEM_mallocN(sizeof(DepthBufCache) + sizeof(depth_t) * rect_len, __func__));
|
||||
rect->id = SELECT_ID_NONE;
|
||||
return rect;
|
||||
}
|
||||
@@ -186,14 +187,14 @@ static bool depth_buf_subrect_depth_any_filled(const DepthBufCache *rect_src,
|
||||
* Internal structure for storing hits.
|
||||
* \{ */
|
||||
|
||||
typedef struct DepthID {
|
||||
struct DepthID {
|
||||
uint id;
|
||||
depth_t depth;
|
||||
} DepthID;
|
||||
};
|
||||
|
||||
static int depth_id_cmp(const void *v1, const void *v2)
|
||||
{
|
||||
const DepthID *d1 = v1, *d2 = v2;
|
||||
const DepthID *d1 = static_cast<const DepthID *>(v1), *d2 = static_cast<const DepthID *>(v2);
|
||||
if (d1->id < d2->id) {
|
||||
return -1;
|
||||
}
|
||||
@@ -206,7 +207,7 @@ static int depth_id_cmp(const void *v1, const void *v2)
|
||||
|
||||
static int depth_cmp(const void *v1, const void *v2)
|
||||
{
|
||||
const DepthID *d1 = v1, *d2 = v2;
|
||||
const DepthID *d1 = static_cast<const DepthID *>(v1), *d2 = static_cast<const DepthID *>(v2);
|
||||
if (d1->depth < d2->depth) {
|
||||
return -1;
|
||||
}
|
||||
@@ -224,7 +225,7 @@ static int depth_cmp(const void *v1, const void *v2)
|
||||
* \{ */
|
||||
|
||||
/** Depth sorting. */
|
||||
typedef struct GPUPickState {
|
||||
struct GPUPickState {
|
||||
/** Cache on initialization. */
|
||||
GPUSelectResult *buffer;
|
||||
uint buffer_len;
|
||||
@@ -289,9 +290,9 @@ typedef struct GPUPickState {
|
||||
int scissor[4];
|
||||
eGPUWriteMask write_mask;
|
||||
eGPUDepthTest depth_test;
|
||||
} GPUPickState;
|
||||
};
|
||||
|
||||
static GPUPickState g_pick_state = {0};
|
||||
static GPUPickState g_pick_state{};
|
||||
|
||||
void gpu_select_pick_begin(GPUSelectResult *buffer,
|
||||
const uint buffer_len,
|
||||
@@ -366,18 +367,20 @@ void gpu_select_pick_begin(GPUSelectResult *buffer,
|
||||
/* Using cache `ps->is_cached == true`. */
|
||||
/* `src.clip_rect` -> `dst.clip_rect`. */
|
||||
rect_subregion_stride_calc(&ps->src.clip_rect, &ps->dst.clip_rect, &ps->cache.sub_rect);
|
||||
BLI_assert(ps->gpu.rect_depth == NULL);
|
||||
BLI_assert(ps->gpu.rect_depth_test == NULL);
|
||||
BLI_assert(ps->gpu.rect_depth == nullptr);
|
||||
BLI_assert(ps->gpu.rect_depth_test == nullptr);
|
||||
}
|
||||
|
||||
if (mode == GPU_SELECT_PICK_ALL) {
|
||||
ps->all.hits = MEM_mallocN(sizeof(*ps->all.hits) * ALLOC_DEPTHS, __func__);
|
||||
ps->all.hits = static_cast<DepthID *>(
|
||||
MEM_mallocN(sizeof(*ps->all.hits) * ALLOC_DEPTHS, __func__));
|
||||
ps->all.hits_len = 0;
|
||||
ps->all.hits_len_alloc = ALLOC_DEPTHS;
|
||||
}
|
||||
else {
|
||||
/* Set to 0xff for #SELECT_ID_NONE. */
|
||||
ps->nearest.rect_id = MEM_mallocN(sizeof(uint) * ps->dst.rect_len, __func__);
|
||||
ps->nearest.rect_id = static_cast<uint *>(
|
||||
MEM_mallocN(sizeof(uint) * ps->dst.rect_len, __func__));
|
||||
memset(ps->nearest.rect_id, 0xff, sizeof(uint) * ps->dst.rect_len);
|
||||
}
|
||||
}
|
||||
@@ -424,7 +427,8 @@ static void gpu_select_load_id_pass_all(const DepthBufCache *rect_curr)
|
||||
/* Ensure enough space. */
|
||||
if (UNLIKELY(ps->all.hits_len == ps->all.hits_len_alloc)) {
|
||||
ps->all.hits_len_alloc += ALLOC_DEPTHS;
|
||||
ps->all.hits = MEM_reallocN(ps->all.hits, ps->all.hits_len_alloc * sizeof(*ps->all.hits));
|
||||
ps->all.hits = static_cast<DepthID *>(
|
||||
MEM_reallocN(ps->all.hits, ps->all.hits_len_alloc * sizeof(*ps->all.hits)));
|
||||
}
|
||||
DepthID *d = &ps->all.hits[ps->all.hits_len++];
|
||||
d->id = id;
|
||||
@@ -440,9 +444,9 @@ static void gpu_select_load_id_pass_nearest(const DepthBufCache *rect_prev,
|
||||
if (id != SELECT_ID_NONE) {
|
||||
uint *id_ptr = ps->nearest.rect_id;
|
||||
|
||||
/* Check against DEPTH_MAX because XRAY will clear the buffer,
|
||||
* so previously set values will become unset.
|
||||
* In this case just leave those id's left as-is. */
|
||||
/* Check against DEPTH_MAX because XRAY will clear the buffer,
|
||||
* so previously set values will become unset.
|
||||
* In this case just leave those id's left as-is. */
|
||||
#define EVAL_TEST() \
|
||||
if (depth_is_filled(prev, curr)) { \
|
||||
*id_ptr = id; \
|
||||
@@ -562,11 +566,11 @@ uint gpu_select_pick_end(void)
|
||||
/* Store depth in cache */
|
||||
if (ps->use_cache && !ps->is_cached) {
|
||||
BLI_addtail(&ps->cache.bufs, ps->gpu.rect_depth);
|
||||
ps->gpu.rect_depth = NULL;
|
||||
rect_depth_final = ps->cache.bufs.last;
|
||||
ps->gpu.rect_depth = nullptr;
|
||||
rect_depth_final = static_cast<DepthBufCache *>(ps->cache.bufs.last);
|
||||
}
|
||||
else if (ps->is_cached) {
|
||||
rect_depth_final = ps->cache.bufs.last;
|
||||
rect_depth_final = static_cast<DepthBufCache *>(ps->cache.bufs.last);
|
||||
}
|
||||
else {
|
||||
/* Common case, no cache. */
|
||||
@@ -581,7 +585,7 @@ uint gpu_select_pick_end(void)
|
||||
depth_data = ps->all.hits;
|
||||
depth_data_len = ps->all.hits_len;
|
||||
/* Move ownership. */
|
||||
ps->all.hits = NULL;
|
||||
ps->all.hits = nullptr;
|
||||
ps->all.hits_len = 0;
|
||||
ps->all.hits_len_alloc = 0;
|
||||
}
|
||||
@@ -590,7 +594,8 @@ uint gpu_select_pick_end(void)
|
||||
|
||||
/* Over allocate (unlikely we have as many depths as pixels). */
|
||||
uint depth_data_len_first_pass = 0;
|
||||
depth_data = MEM_mallocN(ps->dst.rect_len * sizeof(*depth_data), __func__);
|
||||
depth_data = static_cast<DepthID *>(
|
||||
MEM_mallocN(ps->dst.rect_len * sizeof(*depth_data), __func__));
|
||||
|
||||
/* Partially de-duplicating copy,
|
||||
* when contiguous ID's are found - update their closest depth.
|
||||
@@ -601,7 +606,7 @@ uint gpu_select_pick_end(void)
|
||||
const uint id = ps->nearest.rect_id[i_dst]; \
|
||||
if (id != SELECT_ID_NONE) { \
|
||||
const depth_t depth = rect_depth_final->buf[i_src]; \
|
||||
if (depth_last == NULL || depth_last->id != id) { \
|
||||
if (depth_last == nullptr || depth_last->id != id) { \
|
||||
DepthID *d = &depth_data[depth_data_len_first_pass++]; \
|
||||
d->id = id; \
|
||||
d->depth = depth; \
|
||||
@@ -614,7 +619,7 @@ uint gpu_select_pick_end(void)
|
||||
((void)0)
|
||||
|
||||
{
|
||||
DepthID *depth_last = NULL;
|
||||
DepthID *depth_last = nullptr;
|
||||
if (ps->is_cached == false) {
|
||||
for (uint i = 0; i < ps->src.rect_len; i++) {
|
||||
EVAL_TEST(i, i);
|
||||
@@ -640,9 +645,9 @@ uint gpu_select_pick_end(void)
|
||||
/* Sort by ID's then keep the best depth for each ID. */
|
||||
depth_data_len = 0;
|
||||
{
|
||||
DepthID *depth_last = NULL;
|
||||
DepthID *depth_last = nullptr;
|
||||
for (uint i = 0; i < depth_data_len_first_pass; i++) {
|
||||
if (depth_last == NULL || depth_last->id != depth_data[i].id) {
|
||||
if (depth_last == nullptr || depth_last->id != depth_data[i].id) {
|
||||
depth_last = &depth_data[depth_data_len++];
|
||||
*depth_last = depth_data[i];
|
||||
}
|
||||
@@ -685,7 +690,7 @@ uint gpu_select_pick_end(void)
|
||||
}
|
||||
else {
|
||||
MEM_freeN(ps->nearest.rect_id);
|
||||
ps->nearest.rect_id = NULL;
|
||||
ps->nearest.rect_id = nullptr;
|
||||
}
|
||||
|
||||
if (ps->use_cache) {
|
||||
@@ -737,7 +742,7 @@ void gpu_select_pick_cache_load_id(void)
|
||||
printf("%s (building depth from cache)\n", __func__);
|
||||
#endif
|
||||
LISTBASE_FOREACH (DepthBufCache *, rect_depth, &ps->cache.bufs) {
|
||||
if (rect_depth->next != NULL) {
|
||||
if (rect_depth->next != nullptr) {
|
||||
/* We know the buffers differ, but this sub-region may not.
|
||||
* Double check before adding an id-pass. */
|
||||
if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
|
||||
@@ -98,7 +98,8 @@ bool GPU_viewport_do_update(GPUViewport *viewport)
|
||||
|
||||
GPUViewport *GPU_viewport_create(void)
|
||||
{
|
||||
GPUViewport *viewport = MEM_callocN(sizeof(GPUViewport), "GPUViewport");
|
||||
GPUViewport *viewport = static_cast<GPUViewport *>(
|
||||
MEM_callocN(sizeof(GPUViewport), "GPUViewport"));
|
||||
viewport->do_color_management = false;
|
||||
viewport->size[0] = viewport->size[1] = -1;
|
||||
viewport->active_view = 0;
|
||||
@@ -123,14 +124,18 @@ static void gpu_viewport_textures_create(GPUViewport *viewport)
|
||||
float empty_pixel[4] = {0.0f, 0.0f, 0.0f, 0.0f};
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT;
|
||||
|
||||
if (viewport->color_render_tx[0] == NULL) {
|
||||
if (viewport->color_render_tx[0] == nullptr) {
|
||||
|
||||
/* NOTE: dtxl_color texture requires write support as it may be written to by the realtime
|
||||
* compositor. */
|
||||
viewport->color_render_tx[0] = GPU_texture_create_2d(
|
||||
"dtxl_color", UNPACK2(size), 1, GPU_RGBA16F, usage | GPU_TEXTURE_USAGE_SHADER_WRITE, NULL);
|
||||
viewport->color_render_tx[0] = GPU_texture_create_2d("dtxl_color",
|
||||
UNPACK2(size),
|
||||
1,
|
||||
GPU_RGBA16F,
|
||||
usage | GPU_TEXTURE_USAGE_SHADER_WRITE,
|
||||
nullptr);
|
||||
viewport->color_overlay_tx[0] = GPU_texture_create_2d(
|
||||
"dtxl_color_overlay", UNPACK2(size), 1, GPU_SRGB8_A8, usage, NULL);
|
||||
"dtxl_color_overlay", UNPACK2(size), 1, GPU_SRGB8_A8, usage, nullptr);
|
||||
|
||||
if (GPU_clear_viewport_workaround()) {
|
||||
GPU_texture_clear(viewport->color_render_tx[0], GPU_DATA_FLOAT, empty_pixel);
|
||||
@@ -138,15 +143,15 @@ static void gpu_viewport_textures_create(GPUViewport *viewport)
|
||||
}
|
||||
}
|
||||
|
||||
if ((viewport->flag & GPU_VIEWPORT_STEREO) != 0 && viewport->color_render_tx[1] == NULL) {
|
||||
if ((viewport->flag & GPU_VIEWPORT_STEREO) != 0 && viewport->color_render_tx[1] == nullptr) {
|
||||
viewport->color_render_tx[1] = GPU_texture_create_2d("dtxl_color_stereo",
|
||||
UNPACK2(size),
|
||||
1,
|
||||
GPU_RGBA16F,
|
||||
usage | GPU_TEXTURE_USAGE_SHADER_WRITE,
|
||||
NULL);
|
||||
nullptr);
|
||||
viewport->color_overlay_tx[1] = GPU_texture_create_2d(
|
||||
"dtxl_color_overlay_stereo", UNPACK2(size), 1, GPU_SRGB8_A8, usage, NULL);
|
||||
"dtxl_color_overlay_stereo", UNPACK2(size), 1, GPU_SRGB8_A8, usage, nullptr);
|
||||
|
||||
if (GPU_clear_viewport_workaround()) {
|
||||
GPU_texture_clear(viewport->color_render_tx[1], GPU_DATA_FLOAT, empty_pixel);
|
||||
@@ -155,14 +160,14 @@ static void gpu_viewport_textures_create(GPUViewport *viewport)
|
||||
}
|
||||
|
||||
/* Can be shared with GPUOffscreen. */
|
||||
if (viewport->depth_tx == NULL) {
|
||||
if (viewport->depth_tx == nullptr) {
|
||||
/* Depth texture can be read back by gizmos #view3d_depths_create. */
|
||||
viewport->depth_tx = GPU_texture_create_2d("dtxl_depth",
|
||||
UNPACK2(size),
|
||||
1,
|
||||
GPU_DEPTH24_STENCIL8,
|
||||
usage | GPU_TEXTURE_USAGE_HOST_READ,
|
||||
NULL);
|
||||
nullptr);
|
||||
if (GPU_clear_viewport_workaround()) {
|
||||
static int depth_clear = 0;
|
||||
GPU_texture_clear(viewport->depth_tx, GPU_DATA_UINT_24_8, &depth_clear);
|
||||
@@ -257,15 +262,15 @@ void GPU_viewport_colorspace_set(GPUViewport *viewport,
|
||||
/* Don't copy the curve mapping already. */
|
||||
CurveMapping *tmp_curve_mapping = view_settings->curve_mapping;
|
||||
CurveMapping *tmp_curve_mapping_vp = viewport->view_settings.curve_mapping;
|
||||
view_settings->curve_mapping = NULL;
|
||||
viewport->view_settings.curve_mapping = NULL;
|
||||
view_settings->curve_mapping = nullptr;
|
||||
viewport->view_settings.curve_mapping = nullptr;
|
||||
|
||||
BKE_color_managed_view_settings_copy(&viewport->view_settings, view_settings);
|
||||
/* Restore. */
|
||||
view_settings->curve_mapping = tmp_curve_mapping;
|
||||
viewport->view_settings.curve_mapping = tmp_curve_mapping_vp;
|
||||
/* Only copy curve-mapping if needed. Avoid unneeded OCIO cache miss. */
|
||||
if (tmp_curve_mapping && viewport->view_settings.curve_mapping == NULL) {
|
||||
if (tmp_curve_mapping && viewport->view_settings.curve_mapping == nullptr) {
|
||||
BKE_color_managed_view_settings_free(&viewport->view_settings);
|
||||
viewport->view_settings.curve_mapping = BKE_curvemapping_copy(tmp_curve_mapping);
|
||||
}
|
||||
@@ -372,16 +377,24 @@ static GPUBatch *gpu_viewport_batch_create(const rctf *rect_pos, const rctf *rec
|
||||
GPU_vertbuf_attr_get_raw_data(vbo, g_viewport.attr_id.pos, &pos_step);
|
||||
GPU_vertbuf_attr_get_raw_data(vbo, g_viewport.attr_id.tex_coord, &tex_coord_step);
|
||||
|
||||
copy_v2_fl2(GPU_vertbuf_raw_step(&pos_step), rect_pos->xmin, rect_pos->ymin);
|
||||
copy_v2_fl2(GPU_vertbuf_raw_step(&tex_coord_step), rect_uv->xmin, rect_uv->ymin);
|
||||
copy_v2_fl2(GPU_vertbuf_raw_step(&pos_step), rect_pos->xmax, rect_pos->ymin);
|
||||
copy_v2_fl2(GPU_vertbuf_raw_step(&tex_coord_step), rect_uv->xmax, rect_uv->ymin);
|
||||
copy_v2_fl2(GPU_vertbuf_raw_step(&pos_step), rect_pos->xmin, rect_pos->ymax);
|
||||
copy_v2_fl2(GPU_vertbuf_raw_step(&tex_coord_step), rect_uv->xmin, rect_uv->ymax);
|
||||
copy_v2_fl2(GPU_vertbuf_raw_step(&pos_step), rect_pos->xmax, rect_pos->ymax);
|
||||
copy_v2_fl2(GPU_vertbuf_raw_step(&tex_coord_step), rect_uv->xmax, rect_uv->ymax);
|
||||
copy_v2_fl2(
|
||||
static_cast<float *>(GPU_vertbuf_raw_step(&pos_step)), rect_pos->xmin, rect_pos->ymin);
|
||||
copy_v2_fl2(
|
||||
static_cast<float *>(GPU_vertbuf_raw_step(&tex_coord_step)), rect_uv->xmin, rect_uv->ymin);
|
||||
copy_v2_fl2(
|
||||
static_cast<float *>(GPU_vertbuf_raw_step(&pos_step)), rect_pos->xmax, rect_pos->ymin);
|
||||
copy_v2_fl2(
|
||||
static_cast<float *>(GPU_vertbuf_raw_step(&tex_coord_step)), rect_uv->xmax, rect_uv->ymin);
|
||||
copy_v2_fl2(
|
||||
static_cast<float *>(GPU_vertbuf_raw_step(&pos_step)), rect_pos->xmin, rect_pos->ymax);
|
||||
copy_v2_fl2(
|
||||
static_cast<float *>(GPU_vertbuf_raw_step(&tex_coord_step)), rect_uv->xmin, rect_uv->ymax);
|
||||
copy_v2_fl2(
|
||||
static_cast<float *>(GPU_vertbuf_raw_step(&pos_step)), rect_pos->xmax, rect_pos->ymax);
|
||||
copy_v2_fl2(
|
||||
static_cast<float *>(GPU_vertbuf_raw_step(&tex_coord_step)), rect_uv->xmax, rect_uv->ymax);
|
||||
|
||||
return GPU_batch_create_ex(GPU_PRIM_TRI_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
|
||||
return GPU_batch_create_ex(GPU_PRIM_TRI_STRIP, vbo, nullptr, GPU_BATCH_OWNS_VBO);
|
||||
}
|
||||
|
||||
static GPUBatch *gpu_viewport_batch_get(GPUViewport *viewport,
|
||||
@@ -396,7 +409,7 @@ static GPUBatch *gpu_viewport_batch_get(GPUViewport *viewport,
|
||||
|
||||
if (viewport->batch.batch && parameters_changed) {
|
||||
GPU_batch_discard(viewport->batch.batch);
|
||||
viewport->batch.batch = NULL;
|
||||
viewport->batch.batch = nullptr;
|
||||
}
|
||||
|
||||
if (!viewport->batch.batch) {
|
||||
@@ -411,7 +424,7 @@ static void gpu_viewport_batch_free(GPUViewport *viewport)
|
||||
{
|
||||
if (viewport->batch.batch) {
|
||||
GPU_batch_discard(viewport->batch.batch);
|
||||
viewport->batch.batch = NULL;
|
||||
viewport->batch.batch = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -440,7 +453,7 @@ static void gpu_viewport_draw_colormanaged(GPUViewport *viewport,
|
||||
immVertexFormat();
|
||||
use_ocio = IMB_colormanagement_setup_glsl_draw_from_space(&viewport->view_settings,
|
||||
&viewport->display_settings,
|
||||
NULL,
|
||||
nullptr,
|
||||
viewport->dither,
|
||||
false,
|
||||
do_overlay_merge);
|
||||
@@ -475,7 +488,7 @@ void GPU_viewport_draw_to_screen_ex(GPUViewport *viewport,
|
||||
{
|
||||
GPUTexture *color = viewport->color_render_tx[view];
|
||||
|
||||
if (color == NULL) {
|
||||
if (color == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -493,19 +506,18 @@ void GPU_viewport_draw_to_screen_ex(GPUViewport *viewport,
|
||||
const float halfx = GLA_PIXEL_OFS / w;
|
||||
const float halfy = GLA_PIXEL_OFS / h;
|
||||
|
||||
rctf pos_rect = {
|
||||
.xmin = sanitized_rect.xmin,
|
||||
.ymin = sanitized_rect.ymin,
|
||||
.xmax = sanitized_rect.xmin + w,
|
||||
.ymax = sanitized_rect.ymin + h,
|
||||
};
|
||||
rctf pos_rect{};
|
||||
pos_rect.xmin = sanitized_rect.xmin;
|
||||
pos_rect.ymin = sanitized_rect.ymin;
|
||||
pos_rect.xmax = sanitized_rect.xmin + w;
|
||||
pos_rect.ymax = sanitized_rect.ymin + h;
|
||||
|
||||
rctf uv_rect{};
|
||||
uv_rect.xmin = halfx;
|
||||
uv_rect.ymin = halfy;
|
||||
uv_rect.xmax = halfx + 1.0f;
|
||||
uv_rect.ymax = halfy + 1.0f;
|
||||
|
||||
rctf uv_rect = {
|
||||
.xmin = halfx,
|
||||
.ymin = halfy,
|
||||
.xmax = halfx + 1.0f,
|
||||
.ymax = halfy + 1.0f,
|
||||
};
|
||||
/* Mirror the UV rect in case axis-swapped drawing is requested (by passing a rect with min and
|
||||
* max values swapped). */
|
||||
if (BLI_rcti_size_x(rect) < 0) {
|
||||
@@ -531,35 +543,33 @@ void GPU_viewport_unbind_from_offscreen(GPUViewport *viewport,
|
||||
{
|
||||
const int view = 0;
|
||||
|
||||
if (viewport->color_render_tx[view] == NULL) {
|
||||
if (viewport->color_render_tx[view] == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
GPU_depth_test(GPU_DEPTH_NONE);
|
||||
GPU_offscreen_bind(ofs, false);
|
||||
|
||||
rctf pos_rect = {
|
||||
.xmin = -1.0f,
|
||||
.ymin = -1.0f,
|
||||
.xmax = 1.0f,
|
||||
.ymax = 1.0f,
|
||||
};
|
||||
rctf pos_rect{};
|
||||
pos_rect.xmin = -1.0f;
|
||||
pos_rect.ymin = -1.0f;
|
||||
pos_rect.xmax = 1.0f;
|
||||
pos_rect.ymax = 1.0f;
|
||||
|
||||
rctf uv_rect = {
|
||||
.xmin = 0.0f,
|
||||
.ymin = 0.0f,
|
||||
.xmax = 1.0f,
|
||||
.ymax = 1.0f,
|
||||
};
|
||||
rctf uv_rect{};
|
||||
uv_rect.xmin = 0.0f;
|
||||
uv_rect.ymin = 0.0f;
|
||||
uv_rect.xmax = 1.0f;
|
||||
uv_rect.ymax = 1.0f;
|
||||
|
||||
gpu_viewport_draw_colormanaged(
|
||||
viewport, view, &pos_rect, &uv_rect, display_colorspace, do_overlay_merge);
|
||||
|
||||
/* This one is from the offscreen. Don't free it with the viewport. */
|
||||
viewport->depth_tx = NULL;
|
||||
viewport->depth_tx = nullptr;
|
||||
}
|
||||
|
||||
void GPU_viewport_unbind(GPUViewport *UNUSED(viewport))
|
||||
void GPU_viewport_unbind(GPUViewport * /*viewport*/)
|
||||
{
|
||||
GPU_framebuffer_restore();
|
||||
DRW_gpu_context_disable();
|
||||
@@ -11,6 +11,8 @@
|
||||
#include "DNA_freestyle_types.h"
|
||||
#include "DNA_listBase.h"
|
||||
|
||||
#include "BLI_utildefines.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
@@ -49,6 +51,7 @@ typedef enum eViewLayerEEVEEPassType {
|
||||
EEVEE_RENDER_PASS_TRANSPARENT = (1 << 20),
|
||||
} eViewLayerEEVEEPassType;
|
||||
#define EEVEE_RENDER_PASS_MAX_BIT 20
|
||||
ENUM_OPERATORS(eViewLayerEEVEEPassType, 1 << EEVEE_RENDER_PASS_MAX_BIT)
|
||||
|
||||
/* #ViewLayerAOV.type */
|
||||
typedef enum eViewLayerAOVType {
|
||||
@@ -68,6 +71,7 @@ typedef enum eViewLayerCryptomatteFlags {
|
||||
VIEW_LAYER_CRYPTOMATTE_ASSET = (1 << 2),
|
||||
VIEW_LAYER_CRYPTOMATTE_ACCURATE = (1 << 3),
|
||||
} eViewLayerCryptomatteFlags;
|
||||
ENUM_OPERATORS(eViewLayerCryptomatteFlags, VIEW_LAYER_CRYPTOMATTE_ACCURATE)
|
||||
#define VIEW_LAYER_CRYPTOMATTE_ALL \
|
||||
(VIEW_LAYER_CRYPTOMATTE_OBJECT | VIEW_LAYER_CRYPTOMATTE_MATERIAL | VIEW_LAYER_CRYPTOMATTE_ASSET)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user