From 324517fd7839a68194c1537d8f3316e83cc1a270 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cle=CC=81ment=20Foucault?= Date: Thu, 31 Oct 2024 15:18:29 +0100 Subject: [PATCH] Cleanup: GPU: Fix clang tidy warnings Removes some other things like: - `TRUST_NO_ONE` which was the same as `#ifndef NDEBUG`. - Replace `reinterpret_cast` by `unwrap` Pull Request: https://projects.blender.org/blender/blender/pulls/129631 --- source/blender/gpu/GPU_batch.hh | 8 +- source/blender/gpu/GPU_common.hh | 13 - source/blender/gpu/GPU_common_types.hh | 2 +- source/blender/gpu/GPU_framebuffer.hh | 58 ++-- source/blender/gpu/GPU_immediate_util.hh | 2 + source/blender/gpu/GPU_matrix.hh | 4 +- source/blender/gpu/GPU_platform.hh | 4 +- .../blender/gpu/GPU_platform_backend_enum.h | 4 +- source/blender/gpu/GPU_shader.hh | 4 +- source/blender/gpu/GPU_texture.hh | 6 +- source/blender/gpu/GPU_uniform_buffer.hh | 4 +- source/blender/gpu/GPU_vertex_format.hh | 35 +-- source/blender/gpu/GPU_viewport.hh | 2 - source/blender/gpu/intern/gpu_backend.hh | 6 +- source/blender/gpu/intern/gpu_batch.cc | 2 - source/blender/gpu/intern/gpu_context.cc | 2 +- .../blender/gpu/intern/gpu_context_private.hh | 2 +- .../gpu/intern/gpu_drawlist_private.hh | 8 +- source/blender/gpu/intern/gpu_framebuffer.cc | 145 +++++---- .../gpu/intern/gpu_framebuffer_private.hh | 11 +- .../gpu/intern/gpu_immediate_private.hh | 5 +- .../blender/gpu/intern/gpu_immediate_util.cc | 2 +- source/blender/gpu/intern/gpu_index_buffer.cc | 40 +-- source/blender/gpu/intern/gpu_matrix.cc | 2 +- .../gpu/intern/gpu_platform_private.hh | 1 - source/blender/gpu/intern/gpu_query.hh | 2 +- source/blender/gpu/intern/gpu_select_next.cc | 2 - source/blender/gpu/intern/gpu_select_pick.cc | 2 +- .../blender/gpu/intern/gpu_select_private.hh | 4 +- .../gpu/intern/gpu_shader_interface.hh | 1 - .../blender/gpu/intern/gpu_shader_private.hh | 18 +- .../blender/gpu/intern/gpu_state_private.hh | 15 +- .../blender/gpu/intern/gpu_storage_buffer.cc | 3 +- .../gpu/intern/gpu_storage_buffer_private.hh | 6 +- source/blender/gpu/intern/gpu_texture.cc | 295 +++++++++--------- .../blender/gpu/intern/gpu_texture_private.hh | 8 +- .../gpu/intern/gpu_uniform_buffer_private.hh | 6 +- .../blender/gpu/intern/gpu_vertex_buffer.cc | 2 - .../blender/gpu/intern/gpu_vertex_format.cc | 48 ++- .../gpu/intern/gpu_vertex_format_private.hh | 2 + source/blender/gpu/metal/mtl_batch.mm | 3 - source/blender/gpu/metal/mtl_texture.mm | 2 - 42 files changed, 350 insertions(+), 441 deletions(-) diff --git a/source/blender/gpu/GPU_batch.hh b/source/blender/gpu/GPU_batch.hh index d4fa6216edc..ce08fb4811b 100644 --- a/source/blender/gpu/GPU_batch.hh +++ b/source/blender/gpu/GPU_batch.hh @@ -29,10 +29,10 @@ struct GPUShader; -#define GPU_BATCH_VBO_MAX_LEN 16 -#define GPU_BATCH_INST_VBO_MAX_LEN 2 -#define GPU_BATCH_VAO_STATIC_LEN 3 -#define GPU_BATCH_VAO_DYN_ALLOC_COUNT 16 +constexpr static int GPU_BATCH_VBO_MAX_LEN = 16; +constexpr static int GPU_BATCH_INST_VBO_MAX_LEN = 2; +constexpr static int GPU_BATCH_VAO_STATIC_LEN = 3; +constexpr static int GPU_BATCH_VAO_DYN_ALLOC_COUNT = 16; enum eGPUBatchFlag { /** Invalid default state. */ diff --git a/source/blender/gpu/GPU_common.hh b/source/blender/gpu/GPU_common.hh index 0b1591d8deb..391f669ac53 100644 --- a/source/blender/gpu/GPU_common.hh +++ b/source/blender/gpu/GPU_common.hh @@ -11,20 +11,7 @@ #define PROGRAM_NO_OPTI 0 // #define GPU_NO_USE_PY_REFERENCES -#if defined(NDEBUG) -# define TRUST_NO_ONE 0 -#else -/* strict error checking, enabled for debug builds during early development */ -# define TRUST_NO_ONE 1 -#endif - #include "BLI_sys_types.h" -#include -#include - -#if TRUST_NO_ONE -# include -#endif /* GPU_INLINE */ #if defined(_MSC_VER) diff --git a/source/blender/gpu/GPU_common_types.hh b/source/blender/gpu/GPU_common_types.hh index b803b218e30..1a14100448e 100644 --- a/source/blender/gpu/GPU_common_types.hh +++ b/source/blender/gpu/GPU_common_types.hh @@ -191,7 +191,7 @@ struct SpecializationConstant { StringRefNull name; Value value; - SpecializationConstant() {} + SpecializationConstant() = default; SpecializationConstant(const char *name, uint32_t value) : type(Type::UINT), name(name) { diff --git a/source/blender/gpu/GPU_framebuffer.hh b/source/blender/gpu/GPU_framebuffer.hh index 5258e1c8744..28523149e15 100644 --- a/source/blender/gpu/GPU_framebuffer.hh +++ b/source/blender/gpu/GPU_framebuffer.hh @@ -38,7 +38,7 @@ enum eGPUFrameBufferBits { ENUM_OPERATORS(eGPUFrameBufferBits, GPU_STENCIL_BIT) /* Guaranteed by the spec and is never greater than 16 on any hardware or implementation. */ -#define GPU_MAX_VIEWPORTS 16 +constexpr static int GPU_MAX_VIEWPORTS = 16; struct GPUAttachment { GPUTexture *tex; @@ -80,7 +80,7 @@ GPUFrameBuffer *GPU_framebuffer_back_get(); * Create a #GPUFrameBuffer object. It is not configured and not bound to a specific context until * `GPU_framebuffer_bind()` is called. */ -void GPU_framebuffer_free(GPUFrameBuffer *framebuffer); +void GPU_framebuffer_free(GPUFrameBuffer *fb); #define GPU_FRAMEBUFFER_FREE_SAFE(fb) \ do { \ @@ -112,12 +112,12 @@ void GPU_backbuffer_bind(eGPUBackBuffer back_buffer_type); /** * Binds a #GPUFrameBuffer making it the active framebuffer for all geometry rendering. */ -void GPU_framebuffer_bind(GPUFrameBuffer *framebuffer); +void GPU_framebuffer_bind(GPUFrameBuffer *fb); /** * Same as `GPU_framebuffer_bind` but do not enable the SRGB transform. */ -void GPU_framebuffer_bind_no_srgb(GPUFrameBuffer *framebuffer); +void GPU_framebuffer_bind_no_srgb(GPUFrameBuffer *fb); /** * Binds back the active context's default frame-buffer. @@ -174,7 +174,7 @@ struct GPULoadStore { * }) * \endcode */ -void GPU_framebuffer_bind_loadstore(GPUFrameBuffer *framebuffer, +void GPU_framebuffer_bind_loadstore(GPUFrameBuffer *fb, const GPULoadStore *load_store_actions, uint load_store_actions_len); #define GPU_framebuffer_bind_ex(_fb, ...) \ @@ -204,7 +204,7 @@ void GPU_framebuffer_bind_loadstore(GPUFrameBuffer *framebuffer, * * \note Excess attachments will have no effect as long as they are GPU_ATTACHMENT_IGNORE. */ -void GPU_framebuffer_subpass_transition_array(GPUFrameBuffer *framebuffer, +void GPU_framebuffer_subpass_transition_array(GPUFrameBuffer *fb, const GPUAttachmentState *attachment_states, uint attachment_len); @@ -255,9 +255,7 @@ void GPU_framebuffer_subpass_transition_array(GPUFrameBuffer *framebuffer, * Setting #GPUAttachment.mip to -1 will leave the texture in this slot. * Setting #GPUAttachment.tex to nullptr will detach the texture in this slot. */ -void GPU_framebuffer_config_array(GPUFrameBuffer *framebuffer, - const GPUAttachment *config, - int config_len); +void GPU_framebuffer_config_array(GPUFrameBuffer *fb, const GPUAttachment *config, int config_len); /** Empty bind point. */ #define GPU_ATTACHMENT_NONE \ @@ -311,10 +309,7 @@ void GPU_framebuffer_config_array(GPUFrameBuffer *framebuffer, * DEPRECATED: Prefer using multiple #GPUFrameBuffer with different configurations with * `GPU_framebuffer_config_array()`. */ -void GPU_framebuffer_texture_attach(GPUFrameBuffer *framebuffer, - GPUTexture *texture, - int slot, - int mip); +void GPU_framebuffer_texture_attach(GPUFrameBuffer *fb, GPUTexture *texture, int slot, int mip); /** * Attach a single layer of an array texture mip level to a #GPUFrameBuffer. @@ -326,7 +321,7 @@ void GPU_framebuffer_texture_attach(GPUFrameBuffer *framebuffer, * `GPU_framebuffer_config_array()`. */ void GPU_framebuffer_texture_layer_attach( - GPUFrameBuffer *framebuffer, GPUTexture *texture, int slot, int layer, int mip); + GPUFrameBuffer *fb, GPUTexture *texture, int slot, int layer, int mip); /** * Attach a single cube-face of an cube-map texture mip level to a #GPUFrameBuffer. @@ -338,7 +333,7 @@ void GPU_framebuffer_texture_layer_attach( * `GPU_framebuffer_config_array()`. */ void GPU_framebuffer_texture_cubeface_attach( - GPUFrameBuffer *framebuffer, GPUTexture *texture, int slot, int face, int mip); + GPUFrameBuffer *fb, GPUTexture *texture, int slot, int face, int mip); /** * Detach a texture from a #GPUFrameBuffer. The texture must be attached. @@ -346,7 +341,7 @@ void GPU_framebuffer_texture_cubeface_attach( * DEPRECATED: Prefer using multiple #GPUFrameBuffer with different configurations with * `GPU_framebuffer_config_array()`. */ -void GPU_framebuffer_texture_detach(GPUFrameBuffer *framebuffer, GPUTexture *texture); +void GPU_framebuffer_texture_detach(GPUFrameBuffer *fb, GPUTexture *texture); /** * Checks a framebuffer current configuration for errors. @@ -355,7 +350,7 @@ void GPU_framebuffer_texture_detach(GPUFrameBuffer *framebuffer, GPUTexture *tex * \a err_out is an error output buffer. * \return false if the framebuffer is invalid. */ -bool GPU_framebuffer_check_valid(GPUFrameBuffer *framebuffer, char err_out[256]); +bool GPU_framebuffer_check_valid(GPUFrameBuffer *fb, char err_out[256]); /** \} */ @@ -371,7 +366,7 @@ bool GPU_framebuffer_check_valid(GPUFrameBuffer *framebuffer, char err_out[256]) * Default size is used if the frame-buffer contains no attachments. * It needs to be re-specified each time an attachment is added. */ -void GPU_framebuffer_default_size(GPUFrameBuffer *framebuffer, int width, int height); +void GPU_framebuffer_default_size(GPUFrameBuffer *fb, int width, int height); /** \} */ @@ -388,8 +383,7 @@ void GPU_framebuffer_default_size(GPUFrameBuffer *framebuffer, int width, int he * \note Setting a singular viewport will only change the state of the first viewport. * \note Must be called after first bind. */ -void GPU_framebuffer_viewport_set( - GPUFrameBuffer *framebuffer, int x, int y, int width, int height); +void GPU_framebuffer_viewport_set(GPUFrameBuffer *fb, int x, int y, int width, int height); /** * Similar to `GPU_framebuffer_viewport_set()` but specify the bounds of all 16 viewports. @@ -406,13 +400,13 @@ void GPU_framebuffer_multi_viewports_set(GPUFrameBuffer *gpu_fb, * Return the viewport offset and size in a int quadruple: (x, y, width, height). * \note Viewport and scissor size is stored per frame-buffer. */ -void GPU_framebuffer_viewport_get(GPUFrameBuffer *framebuffer, int r_viewport[4]); +void GPU_framebuffer_viewport_get(GPUFrameBuffer *fb, int r_viewport[4]); /** * Reset a frame-buffer viewport bounds to its attachment(s) size. * \note Viewport and scissor size is stored per frame-buffer. */ -void GPU_framebuffer_viewport_reset(GPUFrameBuffer *framebuffer); +void GPU_framebuffer_viewport_reset(GPUFrameBuffer *fb); /** \} */ @@ -432,7 +426,7 @@ void GPU_framebuffer_viewport_reset(GPUFrameBuffer *framebuffer); * \note `GPU_write_mask`, and stencil test do not affect this command. * \note Viewport and scissor regions affect this command but are not efficient nor recommended. */ -void GPU_framebuffer_clear(GPUFrameBuffer *framebuffer, +void GPU_framebuffer_clear(GPUFrameBuffer *fb, eGPUFrameBufferBits buffers, const float clear_col[4], float clear_depth, @@ -495,7 +489,7 @@ void GPU_framebuffer_clear_color_depth_stencil(GPUFrameBuffer *fb, * \note `GPU_write_mask`, and stencil test do not affect this command. * \note Viewport and scissor regions affect this command but are not efficient nor recommended. */ -void GPU_framebuffer_multi_clear(GPUFrameBuffer *framebuffer, const float (*clear_colors)[4]); +void GPU_framebuffer_multi_clear(GPUFrameBuffer *fb, const float (*clear_colors)[4]); /** * Clear all color attachment textures of the active frame-buffer with the given red, green, blue, @@ -520,7 +514,7 @@ void GPU_clear_depth(float depth); /** \name Debugging introspection API. * \{ */ -const char *GPU_framebuffer_get_name(GPUFrameBuffer *framebuffer); +const char *GPU_framebuffer_get_name(GPUFrameBuffer *fb); /** \} */ @@ -536,8 +530,8 @@ const char *GPU_framebuffer_get_name(GPUFrameBuffer *framebuffer); * Points to #BPyGPUFrameBuffer.fb */ #ifndef GPU_NO_USE_PY_REFERENCES -void **GPU_framebuffer_py_reference_get(GPUFrameBuffer *framebuffer); -void GPU_framebuffer_py_reference_set(GPUFrameBuffer *framebuffer, void **py_ref); +void **GPU_framebuffer_py_reference_get(GPUFrameBuffer *fb); +void GPU_framebuffer_py_reference_set(GPUFrameBuffer *fb, void **py_ref); #endif /** @@ -547,7 +541,7 @@ void GPU_framebuffer_py_reference_set(GPUFrameBuffer *framebuffer, void **py_ref */ /* TODO(fclem): This has nothing to do with the GPU module and should be move to the pyGPU module. */ -void GPU_framebuffer_push(GPUFrameBuffer *framebuffer); +void GPU_framebuffer_push(GPUFrameBuffer *fb); GPUFrameBuffer *GPU_framebuffer_pop(); uint GPU_framebuffer_stack_level_get(); @@ -562,14 +556,14 @@ uint GPU_framebuffer_stack_level_get(); * \note return false if no context is active. * \note this is undefined behavior if \a framebuffer is `nullptr`. * DEPRECATED: Kept only because of Python GPU API. */ -bool GPU_framebuffer_bound(GPUFrameBuffer *framebuffer); +bool GPU_framebuffer_bound(GPUFrameBuffer *fb); /** * Read a region of the framebuffer depth attachment and copy it to \a r_data . * The pixel data will be converted to \a data_format but it needs to be compatible with the * attachment type. DEPRECATED: Prefer using `GPU_texture_read()`. */ -void GPU_framebuffer_read_depth(GPUFrameBuffer *framebuffer, +void GPU_framebuffer_read_depth(GPUFrameBuffer *fb, int x, int y, int width, @@ -582,7 +576,7 @@ void GPU_framebuffer_read_depth(GPUFrameBuffer *framebuffer, * The pixel data will be converted to \a data_format but it needs to be compatible with the * attachment type. DEPRECATED: Prefer using `GPU_texture_read()`. */ -void GPU_framebuffer_read_color(GPUFrameBuffer *framebuffer, +void GPU_framebuffer_read_color(GPUFrameBuffer *fb, int x, int y, int width, @@ -625,7 +619,7 @@ void GPU_framebuffer_blit(GPUFrameBuffer *fb_read, * DEPRECATED: Prefer using a compute shader with arbitrary imageLoad/Store for this purpose * as it is clearer and likely faster with optimizations. */ -void GPU_framebuffer_recursive_downsample(GPUFrameBuffer *framebuffer, +void GPU_framebuffer_recursive_downsample(GPUFrameBuffer *fb, int max_level, void (*per_level_callback)(void *user_data, int level), void *user_data); diff --git a/source/blender/gpu/GPU_immediate_util.hh b/source/blender/gpu/GPU_immediate_util.hh index 4c6ef1dd607..2e1ec709f40 100644 --- a/source/blender/gpu/GPU_immediate_util.hh +++ b/source/blender/gpu/GPU_immediate_util.hh @@ -10,6 +10,8 @@ #pragma once +#include "BLI_sys_types.h" + /* Draw 2D rectangles (replaces glRect functions) */ /* caller is responsible for vertex format & shader */ void immRectf(uint pos, float x1, float y1, float x2, float y2); diff --git a/source/blender/gpu/GPU_matrix.hh b/source/blender/gpu/GPU_matrix.hh index 37efe541991..2c23de61033 100644 --- a/source/blender/gpu/GPU_matrix.hh +++ b/source/blender/gpu/GPU_matrix.hh @@ -223,5 +223,5 @@ int GPU_matrix_stack_level_get_projection(); /* Not part of the GPU_matrix API, * however we need to check these limits in code that calls into these API's. */ -#define GPU_MATRIX_ORTHO_CLIP_NEAR_DEFAULT (-100) -#define GPU_MATRIX_ORTHO_CLIP_FAR_DEFAULT (100) +constexpr static int GPU_MATRIX_ORTHO_CLIP_NEAR_DEFAULT = -100; +constexpr static int GPU_MATRIX_ORTHO_CLIP_FAR_DEFAULT = 100; diff --git a/source/blender/gpu/GPU_platform.hh b/source/blender/gpu/GPU_platform.hh index 6843d62b74b..0b21a220a57 100644 --- a/source/blender/gpu/GPU_platform.hh +++ b/source/blender/gpu/GPU_platform.hh @@ -66,13 +66,13 @@ enum GPUArchitectureType { GPU_ARCHITECTURE_TBDR = 1, }; -typedef struct GPUDevice { +struct GPUDevice { std::string identifier; int index; uint32_t vendor_id; uint32_t device_id; std::string name; -} GPUDevice; +}; /* GPU Types */ /* TODO: Verify all use-cases of GPU_type_matches to determine which graphics API it should apply diff --git a/source/blender/gpu/GPU_platform_backend_enum.h b/source/blender/gpu/GPU_platform_backend_enum.h index 3c7bffdc39e..f7338b1ea30 100644 --- a/source/blender/gpu/GPU_platform_backend_enum.h +++ b/source/blender/gpu/GPU_platform_backend_enum.h @@ -8,10 +8,10 @@ #pragma once -typedef enum eGPUBackendType { +enum eGPUBackendType { GPU_BACKEND_NONE = 0, GPU_BACKEND_OPENGL = 1 << 0, GPU_BACKEND_METAL = 1 << 1, GPU_BACKEND_VULKAN = 1 << 3, GPU_BACKEND_ANY = 0xFFFFFFFFu -} eGPUBackendType; +}; diff --git a/source/blender/gpu/GPU_shader.hh b/source/blender/gpu/GPU_shader.hh index dc4a4767147..8f51c3d73d7 100644 --- a/source/blender/gpu/GPU_shader.hh +++ b/source/blender/gpu/GPU_shader.hh @@ -26,10 +26,10 @@ struct GPUShader; /* Hardware limit is 16. Position attribute is always needed so we reduce to 15. * This makes sure the GPUVertexFormat name buffer does not overflow. */ -#define GPU_MAX_ATTR 15 +constexpr static int GPU_MAX_ATTR = 15; /* Determined by the maximum uniform buffer size divided by chunk size. */ -#define GPU_MAX_UNIFORM_ATTR 8 +constexpr static int GPU_MAX_UNIFORM_ATTR = 8; /* -------------------------------------------------------------------- */ /** \name Creation diff --git a/source/blender/gpu/GPU_texture.hh b/source/blender/gpu/GPU_texture.hh index 49a0e525ca5..ce20d5bce62 100644 --- a/source/blender/gpu/GPU_texture.hh +++ b/source/blender/gpu/GPU_texture.hh @@ -977,17 +977,17 @@ bool GPU_texture_has_integer_format(const GPUTexture *texture); /** * Return true if the texture format is a float type. */ -bool GPU_texture_has_float_format(const GPUTexture *tex); +bool GPU_texture_has_float_format(const GPUTexture *texture); /** * Return true if the texture format is an integer normalized type. */ -bool GPU_texture_has_normalized_format(const GPUTexture *tex); +bool GPU_texture_has_normalized_format(const GPUTexture *texture); /** * Return true if the texture format is a signed type. */ -bool GPU_texture_has_signed_format(const GPUTexture *tex); +bool GPU_texture_has_signed_format(const GPUTexture *texture); /** * Returns the pixel dimensions of a texture's mip-map level. diff --git a/source/blender/gpu/GPU_uniform_buffer.hh b/source/blender/gpu/GPU_uniform_buffer.hh index e2a773fbf0a..d5aa4f19a65 100644 --- a/source/blender/gpu/GPU_uniform_buffer.hh +++ b/source/blender/gpu/GPU_uniform_buffer.hh @@ -15,6 +15,8 @@ #pragma once +#include "BLI_sys_types.h" + struct ListBase; /** Opaque type hiding blender::gpu::UniformBuf. */ @@ -49,4 +51,4 @@ void GPU_uniformbuf_clear_to_zero(GPUUniformBuf *ubo); #define GPU_UBO_BLOCK_NAME "node_tree" #define GPU_ATTRIBUTE_UBO_BLOCK_NAME "unf_attrs" #define GPU_LAYER_ATTRIBUTE_UBO_BLOCK_NAME "drw_layer_attrs" -#define GPU_NODE_TREE_UBO_SLOT 0 +constexpr static int GPU_NODE_TREE_UBO_SLOT = 0; diff --git a/source/blender/gpu/GPU_vertex_format.hh b/source/blender/gpu/GPU_vertex_format.hh index a83222cf8ee..98864d48303 100644 --- a/source/blender/gpu/GPU_vertex_format.hh +++ b/source/blender/gpu/GPU_vertex_format.hh @@ -17,12 +17,12 @@ struct GPUShader; -#define GPU_VERT_ATTR_MAX_LEN 16 -#define GPU_VERT_ATTR_MAX_NAMES 6 -#define GPU_VERT_ATTR_NAMES_BUF_LEN 256 -#define GPU_VERT_FORMAT_MAX_NAMES 63 /* More than enough, actual max is ~30. */ +constexpr static int GPU_VERT_ATTR_MAX_LEN = 16; +constexpr static int GPU_VERT_ATTR_MAX_NAMES = 6; +constexpr static int GPU_VERT_ATTR_NAMES_BUF_LEN = 256; +constexpr static int GPU_VERT_FORMAT_MAX_NAMES = 63; /* More than enough, actual max is ~30. */ /* Computed as GPU_VERT_ATTR_NAMES_BUF_LEN / 30 (actual max format name). */ -#define GPU_MAX_SAFE_ATTR_NAME 12 +constexpr static int GPU_MAX_SAFE_ATTR_NAME = 12; enum GPUVertCompType { GPU_COMP_I8 = 0, @@ -162,32 +162,27 @@ struct GPUNormal { }; }; -/* OpenGL ES packs in a different order as desktop GL but component conversion is the same. - * Of the code here, only GPUPackedNormal needs to change. */ - -#define SIGNED_INT_10_MAX 511 -#define SIGNED_INT_10_MIN -512 - BLI_INLINE int clampi(int x, int min_allowed, int max_allowed) { -#if TRUST_NO_ONE - assert(min_allowed <= max_allowed); -#endif + BLI_assert(min_allowed <= max_allowed); if (x < min_allowed) { return min_allowed; } - else if (x > max_allowed) { + if (x > max_allowed) { return max_allowed; } - else { - return x; - } + return x; } BLI_INLINE int gpu_convert_normalized_f32_to_i10(float x) { - int qx = x * 511.0f; - return clampi(qx, SIGNED_INT_10_MIN, SIGNED_INT_10_MAX); + /* OpenGL ES packs in a different order as desktop GL but component conversion is the same. + * Of the code here, only GPUPackedNormal needs to change. */ + constexpr int signed_int_10_max = 511; + constexpr int signed_int_10_min = -512; + + int qx = x * signed_int_10_max; + return clampi(qx, signed_int_10_min, signed_int_10_max); } BLI_INLINE int gpu_convert_i16_to_i10(short x) diff --git a/source/blender/gpu/GPU_viewport.hh b/source/blender/gpu/GPU_viewport.hh index 92c1757fdc5..8ad7a2eb183 100644 --- a/source/blender/gpu/GPU_viewport.hh +++ b/source/blender/gpu/GPU_viewport.hh @@ -8,8 +8,6 @@ #pragma once -#include - #include "DNA_scene_types.h" #include "DNA_vec_types.h" diff --git a/source/blender/gpu/intern/gpu_backend.hh b/source/blender/gpu/intern/gpu_backend.hh index 0b1df0e46d9..b75fefbb9e9 100644 --- a/source/blender/gpu/intern/gpu_backend.hh +++ b/source/blender/gpu/intern/gpu_backend.hh @@ -12,8 +12,7 @@ #include "GPU_vertex_buffer.hh" -namespace blender { -namespace gpu { +namespace blender::gpu { class Context; @@ -64,5 +63,4 @@ class GPUBackend { virtual void render_step() = 0; }; -} // namespace gpu -} // namespace blender +} // namespace blender::gpu diff --git a/source/blender/gpu/intern/gpu_batch.cc b/source/blender/gpu/intern/gpu_batch.cc index 9c539603212..e3016d5d05f 100644 --- a/source/blender/gpu/intern/gpu_batch.cc +++ b/source/blender/gpu/intern/gpu_batch.cc @@ -25,8 +25,6 @@ #include "gpu_context_private.hh" #include "gpu_shader_private.hh" -#include "GPU_batch.hh" - #include using namespace blender::gpu; diff --git a/source/blender/gpu/intern/gpu_context.cc b/source/blender/gpu/intern/gpu_context.cc index 4bfd8e43ea1..3d9b6d8ab6a 100644 --- a/source/blender/gpu/intern/gpu_context.cc +++ b/source/blender/gpu/intern/gpu_context.cc @@ -279,7 +279,7 @@ bool GPU_backend_type_selection_is_overridden() bool GPU_backend_type_selection_detect() { blender::VectorSet backends_to_check; - if (GPU_backend_type_selection_is_overridden()) { + if (g_backend_type_override.has_value()) { backends_to_check.add(*g_backend_type_override); } #if defined(WITH_OPENGL_BACKEND) diff --git a/source/blender/gpu/intern/gpu_context_private.hh b/source/blender/gpu/intern/gpu_context_private.hh index 521bbedc571..ccbf00bec58 100644 --- a/source/blender/gpu/intern/gpu_context_private.hh +++ b/source/blender/gpu/intern/gpu_context_private.hh @@ -87,7 +87,7 @@ class Context { virtual void memory_statistics_get(int *r_total_mem, int *r_free_mem) = 0; - virtual void debug_group_begin(const char *, int){}; + virtual void debug_group_begin(const char * /*name*/, int /*index*/){}; virtual void debug_group_end(){}; /* Returns true if capture successfully started. */ diff --git a/source/blender/gpu/intern/gpu_drawlist_private.hh b/source/blender/gpu/intern/gpu_drawlist_private.hh index 60b21b46d60..17d2a909899 100644 --- a/source/blender/gpu/intern/gpu_drawlist_private.hh +++ b/source/blender/gpu/intern/gpu_drawlist_private.hh @@ -12,8 +12,7 @@ #include "GPU_drawlist.hh" -namespace blender { -namespace gpu { +namespace blender::gpu { /** * Implementation of Multi Draw Indirect. @@ -21,7 +20,7 @@ namespace gpu { */ class DrawList { public: - virtual ~DrawList(){}; + virtual ~DrawList() = default; virtual void append(Batch *batch, int i_first, int i_count) = 0; virtual void submit() = 0; @@ -41,5 +40,4 @@ static inline const DrawList *unwrap(const GPUDrawList *vert) return reinterpret_cast(vert); } -} // namespace gpu -} // namespace blender +} // namespace blender::gpu diff --git a/source/blender/gpu/intern/gpu_framebuffer.cc b/source/blender/gpu/intern/gpu_framebuffer.cc index 56dc392eac7..11407556291 100644 --- a/source/blender/gpu/intern/gpu_framebuffer.cc +++ b/source/blender/gpu/intern/gpu_framebuffer.cc @@ -277,59 +277,58 @@ GPUFrameBuffer *GPU_framebuffer_create(const char *name) return wrap(GPUBackend::get()->framebuffer_alloc(name)); } -void GPU_framebuffer_free(GPUFrameBuffer *gpu_fb) +void GPU_framebuffer_free(GPUFrameBuffer *fb) { - delete unwrap(gpu_fb); + delete unwrap(fb); } -const char *GPU_framebuffer_get_name(GPUFrameBuffer *gpu_fb) +const char *GPU_framebuffer_get_name(GPUFrameBuffer *fb) { - return unwrap(gpu_fb)->name_get(); + return unwrap(fb)->name_get(); } /* ---------- Binding ----------- */ -void GPU_framebuffer_bind(GPUFrameBuffer *gpu_fb) +void GPU_framebuffer_bind(GPUFrameBuffer *fb) { const bool enable_srgb = true; /* Disable custom loadstore and bind. */ - unwrap(gpu_fb)->set_use_explicit_loadstore(false); - unwrap(gpu_fb)->bind(enable_srgb); + unwrap(fb)->set_use_explicit_loadstore(false); + unwrap(fb)->bind(enable_srgb); } -void GPU_framebuffer_bind_loadstore(GPUFrameBuffer *gpu_fb, +void GPU_framebuffer_bind_loadstore(GPUFrameBuffer *fb, const GPULoadStore *load_store_actions, uint actions_len) { const bool enable_srgb = true; /* Bind with explicit loadstore state */ - unwrap(gpu_fb)->set_use_explicit_loadstore(true); - unwrap(gpu_fb)->bind(enable_srgb); + unwrap(fb)->set_use_explicit_loadstore(true); + unwrap(fb)->bind(enable_srgb); /* Update load store */ - FrameBuffer *fb = unwrap(gpu_fb); - fb->load_store_config_array(load_store_actions, actions_len); + unwrap(fb)->load_store_config_array(load_store_actions, actions_len); } -void GPU_framebuffer_subpass_transition_array(GPUFrameBuffer *gpu_fb, +void GPU_framebuffer_subpass_transition_array(GPUFrameBuffer *fb, const GPUAttachmentState *attachment_states, uint attachment_len) { - unwrap(gpu_fb)->subpass_transition( + unwrap(fb)->subpass_transition( attachment_states[0], Span(attachment_states + 1, attachment_len - 1)); } -void GPU_framebuffer_bind_no_srgb(GPUFrameBuffer *gpu_fb) +void GPU_framebuffer_bind_no_srgb(GPUFrameBuffer *fb) { const bool enable_srgb = false; - unwrap(gpu_fb)->bind(enable_srgb); + unwrap(fb)->bind(enable_srgb); } -void GPU_backbuffer_bind(eGPUBackBuffer buffer) +void GPU_backbuffer_bind(eGPUBackBuffer back_buffer_type) { Context *ctx = Context::get(); - if (buffer == GPU_BACKBUFFER_LEFT) { + if (back_buffer_type == GPU_BACKBUFFER_LEFT) { ctx->back_left->bind(false); } else { @@ -510,12 +509,12 @@ void GPU_framebuffer_clear_color_depth_stencil(GPUFrameBuffer *fb, fb, GPU_COLOR_BIT | GPU_DEPTH_BIT | GPU_STENCIL_BIT, clear_col, clear_depth, clear_stencil); } -void GPU_framebuffer_multi_clear(GPUFrameBuffer *gpu_fb, const float (*clear_cols)[4]) +void GPU_framebuffer_multi_clear(GPUFrameBuffer *fb, const float (*clear_colors)[4]) { - BLI_assert_msg(unwrap(gpu_fb)->get_use_explicit_loadstore() == false, + BLI_assert_msg(unwrap(fb)->get_use_explicit_loadstore() == false, "Using GPU_framebuffer_clear_* functions in conjunction with custom load-store " "state via GPU_framebuffer_bind_ex is invalid."); - unwrap(gpu_fb)->clear_multi(clear_cols); + unwrap(fb)->clear_multi(clear_colors); } void GPU_clear_color(float red, float green, float blue, float alpha) @@ -537,13 +536,13 @@ void GPU_clear_depth(float depth) } void GPU_framebuffer_read_depth( - GPUFrameBuffer *gpu_fb, int x, int y, int w, int h, eGPUDataFormat format, void *data) + GPUFrameBuffer *fb, int x, int y, int w, int h, eGPUDataFormat format, void *data) { int rect[4] = {x, y, w, h}; - unwrap(gpu_fb)->read(GPU_DEPTH_BIT, format, rect, 1, 1, data); + unwrap(fb)->read(GPU_DEPTH_BIT, format, rect, 1, 1, data); } -void GPU_framebuffer_read_color(GPUFrameBuffer *gpu_fb, +void GPU_framebuffer_read_color(GPUFrameBuffer *fb, int x, int y, int w, @@ -554,7 +553,7 @@ void GPU_framebuffer_read_color(GPUFrameBuffer *gpu_fb, void *data) { int rect[4] = {x, y, w, h}; - unwrap(gpu_fb)->read(GPU_COLOR_BIT, format, rect, channels, slot, data); + unwrap(fb)->read(GPU_COLOR_BIT, format, rect, channels, slot, data); } void GPU_frontbuffer_read_color( @@ -565,14 +564,14 @@ void GPU_frontbuffer_read_color( } /* TODO(fclem): port as texture operation. */ -void GPU_framebuffer_blit(GPUFrameBuffer *gpufb_read, +void GPU_framebuffer_blit(GPUFrameBuffer *gpu_fb_read, int read_slot, - GPUFrameBuffer *gpufb_write, + GPUFrameBuffer *gpu_fb_write, int write_slot, eGPUFrameBufferBits blit_buffers) { - FrameBuffer *fb_read = unwrap(gpufb_read); - FrameBuffer *fb_write = unwrap(gpufb_write); + FrameBuffer *fb_read = unwrap(gpu_fb_read); + FrameBuffer *fb_write = unwrap(gpu_fb_write); BLI_assert(blit_buffers != 0); FrameBuffer *prev_fb = Context::get()->active_fb; @@ -605,24 +604,24 @@ void GPU_framebuffer_blit(GPUFrameBuffer *gpufb_read, prev_fb->bind(true); } -void GPU_framebuffer_recursive_downsample(GPUFrameBuffer *gpu_fb, - int max_lvl, - void (*callback)(void *user_data, int level), +void GPU_framebuffer_recursive_downsample(GPUFrameBuffer *fb, + int max_level, + void (*per_level_callback)(void *user_data, int level), void *user_data) { - unwrap(gpu_fb)->recursive_downsample(max_lvl, callback, user_data); + unwrap(fb)->recursive_downsample(max_level, per_level_callback, user_data); } #ifndef GPU_NO_USE_PY_REFERENCES -void **GPU_framebuffer_py_reference_get(GPUFrameBuffer *gpu_fb) +void **GPU_framebuffer_py_reference_get(GPUFrameBuffer *fb) { - return unwrap(gpu_fb)->py_ref; + return unwrap(fb)->py_ref; } -void GPU_framebuffer_py_reference_set(GPUFrameBuffer *gpu_fb, void **py_ref) +void GPU_framebuffer_py_reference_set(GPUFrameBuffer *fb, void **py_ref) { - BLI_assert(py_ref == nullptr || unwrap(gpu_fb)->py_ref == nullptr); - unwrap(gpu_fb)->py_ref = py_ref; + BLI_assert(py_ref == nullptr || unwrap(fb)->py_ref == nullptr); + unwrap(fb)->py_ref = py_ref; } #endif @@ -671,9 +670,9 @@ uint GPU_framebuffer_stack_level_get() * Might be bound to multiple contexts. * \{ */ -#define MAX_CTX_FB_LEN 3 - struct GPUOffScreen { + constexpr static int MAX_CTX_FB_LEN = 3; + struct { Context *ctx; GPUFrameBuffer *fb; @@ -726,7 +725,7 @@ static GPUFrameBuffer *gpu_offscreen_fb_get(GPUOffScreen *ofs) GPUOffScreen *GPU_offscreen_create(int width, int height, - bool depth, + bool with_depth_buffer, eGPUTextureFormat format, eGPUTextureUsage usage, char err_out[256]) @@ -743,14 +742,14 @@ GPUOffScreen *GPU_offscreen_create(int width, ofs->color = GPU_texture_create_2d("ofs_color", width, height, 1, format, usage, nullptr); - if (depth) { + if (with_depth_buffer) { /* Format view flag is needed by Workbench Volumes to read the stencil view. */ eGPUTextureUsage depth_usage = usage | GPU_TEXTURE_USAGE_FORMAT_VIEW; ofs->depth = GPU_texture_create_2d( "ofs_depth", width, height, 1, GPU_DEPTH24_STENCIL8, depth_usage, nullptr); } - if ((depth && !ofs->depth) || !ofs->color) { + if ((with_depth_buffer && !ofs->depth) || !ofs->color) { const char error[] = "GPUTexture: Texture allocation failed."; if (err_out) { BLI_strncpy(err_out, error, 256); @@ -773,33 +772,33 @@ GPUOffScreen *GPU_offscreen_create(int width, return ofs; } -void GPU_offscreen_free(GPUOffScreen *ofs) +void GPU_offscreen_free(GPUOffScreen *offscreen) { - for (auto &framebuffer : ofs->framebuffers) { + for (auto &framebuffer : offscreen->framebuffers) { if (framebuffer.fb) { GPU_framebuffer_free(framebuffer.fb); } } - if (ofs->color) { - GPU_texture_free(ofs->color); + if (offscreen->color) { + GPU_texture_free(offscreen->color); } - if (ofs->depth) { - GPU_texture_free(ofs->depth); + if (offscreen->depth) { + GPU_texture_free(offscreen->depth); } - MEM_freeN(ofs); + MEM_freeN(offscreen); } -void GPU_offscreen_bind(GPUOffScreen *ofs, bool save) +void GPU_offscreen_bind(GPUOffScreen *offscreen, bool save) { if (save) { GPUFrameBuffer *fb = GPU_framebuffer_active_get(); GPU_framebuffer_push(fb); } - unwrap(gpu_offscreen_fb_get(ofs))->bind(false); + unwrap(gpu_offscreen_fb_get(offscreen))->bind(false); } -void GPU_offscreen_unbind(GPUOffScreen * /*ofs*/, bool restore) +void GPU_offscreen_unbind(GPUOffScreen * /*offscreen*/, bool restore) { GPUFrameBuffer *fb = nullptr; if (restore) { @@ -814,48 +813,48 @@ void GPU_offscreen_unbind(GPUOffScreen * /*ofs*/, bool restore) } } -void GPU_offscreen_draw_to_screen(GPUOffScreen *ofs, int x, int y) +void GPU_offscreen_draw_to_screen(GPUOffScreen *offscreen, int x, int y) { Context *ctx = Context::get(); - FrameBuffer *ofs_fb = unwrap(gpu_offscreen_fb_get(ofs)); + FrameBuffer *ofs_fb = unwrap(gpu_offscreen_fb_get(offscreen)); ofs_fb->blit_to(GPU_COLOR_BIT, 0, ctx->active_fb, 0, x, y); } void GPU_offscreen_read_color_region( - GPUOffScreen *ofs, eGPUDataFormat format, int x, int y, int w, int h, void *r_data) + GPUOffScreen *offscreen, eGPUDataFormat format, int x, int y, int w, int h, void *r_data) { BLI_assert(ELEM(format, GPU_DATA_UBYTE, GPU_DATA_FLOAT)); BLI_assert(x >= 0 && y >= 0 && w > 0 && h > 0); - BLI_assert(x + w <= GPU_texture_width(ofs->color)); - BLI_assert(y + h <= GPU_texture_height(ofs->color)); + BLI_assert(x + w <= GPU_texture_width(offscreen->color)); + BLI_assert(y + h <= GPU_texture_height(offscreen->color)); - GPUFrameBuffer *ofs_fb = gpu_offscreen_fb_get(ofs); + GPUFrameBuffer *ofs_fb = gpu_offscreen_fb_get(offscreen); GPU_framebuffer_read_color(ofs_fb, x, y, w, h, 4, 0, format, r_data); } -void GPU_offscreen_read_color(GPUOffScreen *ofs, eGPUDataFormat format, void *r_data) +void GPU_offscreen_read_color(GPUOffScreen *offscreen, eGPUDataFormat format, void *r_data) { BLI_assert(ELEM(format, GPU_DATA_UBYTE, GPU_DATA_FLOAT)); - const int w = GPU_texture_width(ofs->color); - const int h = GPU_texture_height(ofs->color); + const int w = GPU_texture_width(offscreen->color); + const int h = GPU_texture_height(offscreen->color); - GPU_offscreen_read_color_region(ofs, format, 0, 0, w, h, r_data); + GPU_offscreen_read_color_region(offscreen, format, 0, 0, w, h, r_data); } -int GPU_offscreen_width(const GPUOffScreen *ofs) +int GPU_offscreen_width(const GPUOffScreen *offscreen) { - return GPU_texture_width(ofs->color); + return GPU_texture_width(offscreen->color); } -int GPU_offscreen_height(const GPUOffScreen *ofs) +int GPU_offscreen_height(const GPUOffScreen *offscreen) { - return GPU_texture_height(ofs->color); + return GPU_texture_height(offscreen->color); } -GPUTexture *GPU_offscreen_color_texture(const GPUOffScreen *ofs) +GPUTexture *GPU_offscreen_color_texture(const GPUOffScreen *offscreen) { - return ofs->color; + return offscreen->color; } eGPUTextureFormat GPU_offscreen_format(const GPUOffScreen *offscreen) @@ -863,14 +862,14 @@ eGPUTextureFormat GPU_offscreen_format(const GPUOffScreen *offscreen) return GPU_texture_format(offscreen->color); } -void GPU_offscreen_viewport_data_get(GPUOffScreen *ofs, +void GPU_offscreen_viewport_data_get(GPUOffScreen *offscreen, GPUFrameBuffer **r_fb, GPUTexture **r_color, GPUTexture **r_depth) { - *r_fb = gpu_offscreen_fb_get(ofs); - *r_color = ofs->color; - *r_depth = ofs->depth; + *r_fb = gpu_offscreen_fb_get(offscreen); + *r_color = offscreen->color; + *r_depth = offscreen->depth; } /** \} */ diff --git a/source/blender/gpu/intern/gpu_framebuffer_private.hh b/source/blender/gpu/intern/gpu_framebuffer_private.hh index dee6d884029..418be68d34c 100644 --- a/source/blender/gpu/intern/gpu_framebuffer_private.hh +++ b/source/blender/gpu/intern/gpu_framebuffer_private.hh @@ -61,8 +61,7 @@ inline GPUAttachmentType &operator--(GPUAttachmentType &a) return a; } -namespace blender { -namespace gpu { +namespace blender::gpu { #ifndef NDEBUG # define DEBUG_NAME_LEN 64 @@ -89,8 +88,8 @@ class FrameBuffer { /* Flag specifying the current bind operation should use explicit load-store state. */ bool use_explicit_load_store_ = false; -#ifndef GPU_NO_USE_PY_REFERENCES public: +#ifndef GPU_NO_USE_PY_REFERENCES /** * Reference of a pointer that needs to be cleaned when deallocating the frame-buffer. * Points to #BPyGPUFrameBuffer.fb @@ -98,7 +97,6 @@ class FrameBuffer { void **py_ref = nullptr; #endif - public: FrameBuffer(const char *name); virtual ~FrameBuffer(); @@ -238,7 +236,7 @@ class FrameBuffer { return attachments_[GPU_FB_COLOR_ATTACHMENT0 + slot].tex; }; - inline const char *const name_get() const + inline const char *name_get() const { return name_; }; @@ -270,5 +268,4 @@ static inline const FrameBuffer *unwrap(const GPUFrameBuffer *vert) #undef DEBUG_NAME_LEN -} // namespace gpu -} // namespace blender +} // namespace blender::gpu diff --git a/source/blender/gpu/intern/gpu_immediate_private.hh b/source/blender/gpu/intern/gpu_immediate_private.hh index 2be19ff61f5..7f83fc450af 100644 --- a/source/blender/gpu/intern/gpu_immediate_private.hh +++ b/source/blender/gpu/intern/gpu_immediate_private.hh @@ -51,9 +51,8 @@ class Immediate { /** Uniform color: Kept here to update the wide-line shader just before #immBegin. */ float uniform_color[4]; - public: - Immediate(){}; - virtual ~Immediate(){}; + Immediate() = default; + virtual ~Immediate() = default; virtual uchar *begin() = 0; virtual void end() = 0; diff --git a/source/blender/gpu/intern/gpu_immediate_util.cc b/source/blender/gpu/intern/gpu_immediate_util.cc index eb84ea0af20..90845be99a7 100644 --- a/source/blender/gpu/intern/gpu_immediate_util.cc +++ b/source/blender/gpu/intern/gpu_immediate_util.cc @@ -674,7 +674,7 @@ void imm_draw_cylinder_fill_3d( } /* Circle Drawing - Tables for Optimized Drawing Speed */ -#define CIRCLE_RESOL 32 +constexpr static int CIRCLE_RESOL = 32; static void circball_array_fill(const float verts[CIRCLE_RESOL][3], const float cent[3], diff --git a/source/blender/gpu/intern/gpu_index_buffer.cc b/source/blender/gpu/intern/gpu_index_buffer.cc index fdd3ae5db85..7cab56aae5f 100644 --- a/source/blender/gpu/intern/gpu_index_buffer.cc +++ b/source/blender/gpu/intern/gpu_index_buffer.cc @@ -76,9 +76,7 @@ void GPU_indexbuf_init(GPUIndexBufBuilder *builder, uint vertex_len) { int verts_per_prim = GPU_indexbuf_primitive_len(prim_type); -#if TRUST_NO_ONE - assert(verts_per_prim != -1); -#endif + BLI_assert(verts_per_prim != -1); GPU_indexbuf_init_ex(builder, prim_type, prim_len * uint(verts_per_prim), vertex_len); } @@ -110,11 +108,9 @@ void GPU_indexbuf_join(GPUIndexBufBuilder *builder_to, const GPUIndexBufBuilder void GPU_indexbuf_add_generic_vert(GPUIndexBufBuilder *builder, uint v) { -#if TRUST_NO_ONE - assert(builder->data != nullptr); - assert(builder->index_len < builder->max_index_len); - assert(v <= builder->max_allowed_index); -#endif + BLI_assert(builder->data != nullptr); + BLI_assert(builder->index_len < builder->max_index_len); + BLI_assert(v <= builder->max_allowed_index); builder->data[builder->index_len++] = v; builder->index_min = std::min(builder->index_min, v); builder->index_max = std::max(builder->index_max, v); @@ -122,38 +118,30 @@ void GPU_indexbuf_add_generic_vert(GPUIndexBufBuilder *builder, uint v) void GPU_indexbuf_add_primitive_restart(GPUIndexBufBuilder *builder) { -#if TRUST_NO_ONE - assert(builder->data != nullptr); - assert(builder->index_len < builder->max_index_len); -#endif + BLI_assert(builder->data != nullptr); + BLI_assert(builder->index_len < builder->max_index_len); builder->data[builder->index_len++] = builder->restart_index_value; builder->uses_restart_indices = true; } void GPU_indexbuf_add_point_vert(GPUIndexBufBuilder *builder, uint v) { -#if TRUST_NO_ONE - assert(builder->prim_type == GPU_PRIM_POINTS); -#endif + BLI_assert(builder->prim_type == GPU_PRIM_POINTS); GPU_indexbuf_add_generic_vert(builder, v); } void GPU_indexbuf_add_line_verts(GPUIndexBufBuilder *builder, uint v1, uint v2) { -#if TRUST_NO_ONE - assert(builder->prim_type == GPU_PRIM_LINES); - assert(v1 != v2); -#endif + BLI_assert(builder->prim_type == GPU_PRIM_LINES); + BLI_assert(v1 != v2); GPU_indexbuf_add_generic_vert(builder, v1); GPU_indexbuf_add_generic_vert(builder, v2); } void GPU_indexbuf_add_tri_verts(GPUIndexBufBuilder *builder, uint v1, uint v2, uint v3) { -#if TRUST_NO_ONE - assert(builder->prim_type == GPU_PRIM_TRIS); - assert(v1 != v2 && v2 != v3 && v3 != v1); -#endif + BLI_assert(builder->prim_type == GPU_PRIM_TRIS); + BLI_assert(v1 != v2 && v2 != v3 && v3 != v1); GPU_indexbuf_add_generic_vert(builder, v1); GPU_indexbuf_add_generic_vert(builder, v2); GPU_indexbuf_add_generic_vert(builder, v3); @@ -162,10 +150,8 @@ void GPU_indexbuf_add_tri_verts(GPUIndexBufBuilder *builder, uint v1, uint v2, u void GPU_indexbuf_add_line_adj_verts( GPUIndexBufBuilder *builder, uint v1, uint v2, uint v3, uint v4) { -#if TRUST_NO_ONE - assert(builder->prim_type == GPU_PRIM_LINES_ADJ); - assert(v2 != v3); /* only the line need diff indices */ -#endif + BLI_assert(builder->prim_type == GPU_PRIM_LINES_ADJ); + BLI_assert(v2 != v3); /* only the line need diff indices */ GPU_indexbuf_add_generic_vert(builder, v1); GPU_indexbuf_add_generic_vert(builder, v2); GPU_indexbuf_add_generic_vert(builder, v3); diff --git a/source/blender/gpu/intern/gpu_matrix.cc b/source/blender/gpu/intern/gpu_matrix.cc index 5883b732fdf..4c2326b3a6b 100644 --- a/source/blender/gpu/intern/gpu_matrix.cc +++ b/source/blender/gpu/intern/gpu_matrix.cc @@ -22,7 +22,7 @@ using namespace blender::gpu; -#define MATRIX_STACK_DEPTH 32 +constexpr static int MATRIX_STACK_DEPTH = 32; using Mat4 = float[4][4]; using Mat3 = float[3][3]; diff --git a/source/blender/gpu/intern/gpu_platform_private.hh b/source/blender/gpu/intern/gpu_platform_private.hh index 68c67ad2fd9..cd05886a617 100644 --- a/source/blender/gpu/intern/gpu_platform_private.hh +++ b/source/blender/gpu/intern/gpu_platform_private.hh @@ -30,7 +30,6 @@ class GPUPlatformGlobal { GPUArchitectureType architecture_type = GPU_ARCHITECTURE_IMR; Vector devices; - public: void init(eGPUDeviceType gpu_device, eGPUOSType os_type, eGPUDriverType driver_type, diff --git a/source/blender/gpu/intern/gpu_query.hh b/source/blender/gpu/intern/gpu_query.hh index 76bb86fa97b..75f4b1a9fc2 100644 --- a/source/blender/gpu/intern/gpu_query.hh +++ b/source/blender/gpu/intern/gpu_query.hh @@ -20,7 +20,7 @@ enum GPUQueryType { class QueryPool { public: - virtual ~QueryPool(){}; + virtual ~QueryPool() = default; /** * Will start and end the query at this index inside the pool. The pool will resize diff --git a/source/blender/gpu/intern/gpu_select_next.cc b/source/blender/gpu/intern/gpu_select_next.cc index 21b0845b84d..9e3f1279656 100644 --- a/source/blender/gpu/intern/gpu_select_next.cc +++ b/source/blender/gpu/intern/gpu_select_next.cc @@ -12,8 +12,6 @@ #include "BLI_rect.h" #include "BLI_span.hh" -#include "GPU_select.hh" - #include "gpu_select_private.hh" struct GPUSelectNextState { diff --git a/source/blender/gpu/intern/gpu_select_pick.cc b/source/blender/gpu/intern/gpu_select_pick.cc index 6401085c58e..4e74b58d9e0 100644 --- a/source/blender/gpu/intern/gpu_select_pick.cc +++ b/source/blender/gpu/intern/gpu_select_pick.cc @@ -52,7 +52,7 @@ struct SubRectStride { }; /** We may want to change back to float if `uint` isn't well supported. */ -typedef uint depth_t; +using depth_t = uint; /** * Calculate values needed for looping over a sub-region (smaller buffer within a larger buffer). diff --git a/source/blender/gpu/intern/gpu_select_private.hh b/source/blender/gpu/intern/gpu_select_private.hh index b1576d391b4..4125f7e398e 100644 --- a/source/blender/gpu/intern/gpu_select_private.hh +++ b/source/blender/gpu/intern/gpu_select_private.hh @@ -10,6 +10,8 @@ #pragma once +#include "GPU_select.hh" + /* gpu_select_pick */ void gpu_select_pick_begin(GPUSelectBuffer *buffer, const rcti *input, eGPUSelectMode mode); @@ -41,6 +43,6 @@ uint gpu_select_next_end(); /* Return a single offset since picking uses squared viewport. */ int gpu_select_next_get_pick_area_center(); eGPUSelectMode gpu_select_next_get_mode(); -void gpu_select_next_set_result(GPUSelectResult *buffer, uint buffer_len); +void gpu_select_next_set_result(GPUSelectResult *hit_buf, uint hit_len); #define SELECT_ID_NONE ((uint)0xffffffff) diff --git a/source/blender/gpu/intern/gpu_shader_interface.hh b/source/blender/gpu/intern/gpu_shader_interface.hh index f228d74406c..4ea3a70f600 100644 --- a/source/blender/gpu/intern/gpu_shader_interface.hh +++ b/source/blender/gpu/intern/gpu_shader_interface.hh @@ -77,7 +77,6 @@ class ShaderInterface { */ uint8_t attr_types_[GPU_VERT_ATTR_MAX_LEN]; - public: ShaderInterface(); virtual ~ShaderInterface(); diff --git a/source/blender/gpu/intern/gpu_shader_private.hh b/source/blender/gpu/intern/gpu_shader_private.hh index f0c3d96684a..778a2cadae1 100644 --- a/source/blender/gpu/intern/gpu_shader_private.hh +++ b/source/blender/gpu/intern/gpu_shader_private.hh @@ -20,8 +20,7 @@ #include #include -namespace blender { -namespace gpu { +namespace blender::gpu { class GPULogParser; class Context; @@ -123,7 +122,7 @@ class Shader { virtual bool get_uses_ssbo_vertex_fetch() const = 0; virtual int get_ssbo_vertex_fetch_output_num_verts() const = 0; - inline const char *const name_get() const + inline const char *name_get() const { return name; } @@ -174,7 +173,7 @@ class ShaderCompiler { }; public: - virtual ~ShaderCompiler(){}; + virtual ~ShaderCompiler() = default; Shader *compile(const shader::ShaderCreateInfo &info, bool is_batch_compilation); @@ -209,11 +208,11 @@ class ShaderCompilerGeneric : public ShaderCompiler { Map batches; public: - virtual ~ShaderCompilerGeneric() override; + ~ShaderCompilerGeneric() override; - virtual BatchHandle batch_compile(Span &infos) override; - virtual bool batch_is_ready(BatchHandle handle) override; - virtual Vector batch_finalize(BatchHandle &handle) override; + BatchHandle batch_compile(Span &infos) override; + bool batch_is_ready(BatchHandle handle) override; + Vector batch_finalize(BatchHandle &handle) override; }; enum class Severity { @@ -259,8 +258,7 @@ class GPULogParser { void printf_begin(Context *ctx); void printf_end(Context *ctx); -} // namespace gpu -} // namespace blender +} // namespace blender::gpu /* XXX do not use it. Special hack to use OCIO with batch API. */ GPUShader *immGetShader(); diff --git a/source/blender/gpu/intern/gpu_state_private.hh b/source/blender/gpu/intern/gpu_state_private.hh index a1a3b028199..e045cdf10ce 100644 --- a/source/blender/gpu/intern/gpu_state_private.hh +++ b/source/blender/gpu/intern/gpu_state_private.hh @@ -16,8 +16,7 @@ #include -namespace blender { -namespace gpu { +namespace blender::gpu { /* Encapsulate all pipeline state that we need to track. * Try to keep small to reduce validation time. */ @@ -105,7 +104,7 @@ BLI_STATIC_ASSERT(sizeof(GPUStateMutable) == sizeof(GPUStateMutable::data), inline bool operator==(const GPUStateMutable &a, const GPUStateMutable &b) { - return memcmp(&a, &b, sizeof(GPUStateMutable)) == 0; + return a.data[0] == b.data[0] && a.data[1] == b.data[1] && a.data[2] == b.data[2]; } inline bool operator!=(const GPUStateMutable &a, const GPUStateMutable &b) @@ -141,9 +140,8 @@ class StateManager { GPUStateMutable mutable_state; bool use_bgl = false; - public: StateManager(); - virtual ~StateManager(){}; + virtual ~StateManager() = default; virtual void apply_state() = 0; virtual void force_state() = 0; @@ -169,8 +167,8 @@ class Fence { bool signalled_ = false; public: - Fence(){}; - virtual ~Fence(){}; + Fence() = default; + virtual ~Fence() = default; virtual void signal() = 0; virtual void wait() = 0; @@ -190,5 +188,4 @@ static inline const Fence *unwrap(const GPUFence *pixbuf) return reinterpret_cast(pixbuf); } -} // namespace gpu -} // namespace blender +} // namespace blender::gpu diff --git a/source/blender/gpu/intern/gpu_storage_buffer.cc b/source/blender/gpu/intern/gpu_storage_buffer.cc index de345fe5470..489ec7a2cfa 100644 --- a/source/blender/gpu/intern/gpu_storage_buffer.cc +++ b/source/blender/gpu/intern/gpu_storage_buffer.cc @@ -15,10 +15,9 @@ #include "gpu_backend.hh" #include "GPU_material.hh" +#include "GPU_storage_buffer.hh" #include "GPU_vertex_buffer.hh" /* For GPUUsageType. */ -#include "GPU_storage_buffer.hh" -#include "GPU_vertex_buffer.hh" #include "gpu_context_private.hh" #include "gpu_storage_buffer_private.hh" diff --git a/source/blender/gpu/intern/gpu_storage_buffer_private.hh b/source/blender/gpu/intern/gpu_storage_buffer_private.hh index fe0406ae3ef..a2cf616bf04 100644 --- a/source/blender/gpu/intern/gpu_storage_buffer_private.hh +++ b/source/blender/gpu/intern/gpu_storage_buffer_private.hh @@ -13,8 +13,7 @@ struct GPUStorageBuf; -namespace blender { -namespace gpu { +namespace blender::gpu { class VertBuf; @@ -67,5 +66,4 @@ static inline const StorageBuf *unwrap(const GPUStorageBuf *storage_buf) #undef DEBUG_NAME_LEN -} // namespace gpu -} // namespace blender +} // namespace blender::gpu diff --git a/source/blender/gpu/intern/gpu_texture.cc b/source/blender/gpu/intern/gpu_texture.cc index 95b546a38f4..df14b555e6a 100644 --- a/source/blender/gpu/intern/gpu_texture.cc +++ b/source/blender/gpu/intern/gpu_texture.cc @@ -260,9 +260,9 @@ static inline GPUTexture *gpu_texture_create(const char *name, const eGPUTextureType type, int mip_len, eGPUTextureFormat tex_format, - eGPUDataFormat data_format, eGPUTextureUsage usage, - const void *pixels) + const void *pixels, + eGPUDataFormat data_format = GPU_DATA_FLOAT) { BLI_assert(mip_len > 0); Texture *tex = GPUBackend::get()->texture_alloc(name); @@ -300,100 +300,97 @@ static inline GPUTexture *gpu_texture_create(const char *name, } GPUTexture *GPU_texture_create_1d(const char *name, - int w, + int width, int mip_len, eGPUTextureFormat format, eGPUTextureUsage usage, const float *data) { - return gpu_texture_create( - name, w, 0, 0, GPU_TEXTURE_1D, mip_len, format, GPU_DATA_FLOAT, usage, data); + return gpu_texture_create(name, width, 0, 0, GPU_TEXTURE_1D, mip_len, format, usage, data); } GPUTexture *GPU_texture_create_1d_array(const char *name, - int w, - int h, + int width, + int layer_len, int mip_len, eGPUTextureFormat format, eGPUTextureUsage usage, const float *data) { return gpu_texture_create( - name, w, h, 0, GPU_TEXTURE_1D_ARRAY, mip_len, format, GPU_DATA_FLOAT, usage, data); + name, width, layer_len, 0, GPU_TEXTURE_1D_ARRAY, mip_len, format, usage, data); } GPUTexture *GPU_texture_create_2d(const char *name, - int w, - int h, + int width, + int height, int mip_len, eGPUTextureFormat format, eGPUTextureUsage usage, const float *data) { - return gpu_texture_create( - name, w, h, 0, GPU_TEXTURE_2D, mip_len, format, GPU_DATA_FLOAT, usage, data); + return gpu_texture_create(name, width, height, 0, GPU_TEXTURE_2D, mip_len, format, usage, data); } GPUTexture *GPU_texture_create_2d_array(const char *name, - int w, - int h, - int d, + int width, + int height, + int layer_len, int mip_len, eGPUTextureFormat format, eGPUTextureUsage usage, const float *data) { return gpu_texture_create( - name, w, h, d, GPU_TEXTURE_2D_ARRAY, mip_len, format, GPU_DATA_FLOAT, usage, data); + name, width, height, layer_len, GPU_TEXTURE_2D_ARRAY, mip_len, format, usage, data); } GPUTexture *GPU_texture_create_3d(const char *name, - int w, - int h, - int d, + int width, + int height, + int depth, int mip_len, eGPUTextureFormat texture_format, eGPUTextureUsage usage, const void *data) { return gpu_texture_create( - name, w, h, d, GPU_TEXTURE_3D, mip_len, texture_format, GPU_DATA_FLOAT, usage, data); + name, width, height, depth, GPU_TEXTURE_3D, mip_len, texture_format, usage, data); } GPUTexture *GPU_texture_create_cube(const char *name, - int w, + int width, int mip_len, eGPUTextureFormat format, eGPUTextureUsage usage, const float *data) { - return gpu_texture_create( - name, w, w, 0, GPU_TEXTURE_CUBE, mip_len, format, GPU_DATA_FLOAT, usage, data); + return gpu_texture_create(name, width, width, 0, GPU_TEXTURE_CUBE, mip_len, format, usage, data); } GPUTexture *GPU_texture_create_cube_array(const char *name, - int w, - int d, + int width, + int layer_len, int mip_len, eGPUTextureFormat format, eGPUTextureUsage usage, const float *data) { return gpu_texture_create( - name, w, w, d, GPU_TEXTURE_CUBE_ARRAY, mip_len, format, GPU_DATA_FLOAT, usage, data); + name, width, width, layer_len, GPU_TEXTURE_CUBE_ARRAY, mip_len, format, usage, data); } GPUTexture *GPU_texture_create_compressed_2d(const char *name, - int w, - int h, - int miplen, + int width, + int height, + int mip_len, eGPUTextureFormat tex_format, eGPUTextureUsage usage, const void *data) { Texture *tex = GPUBackend::get()->texture_alloc(name); tex->usage_set(usage); - bool success = tex->init_2D(w, h, 0, miplen, tex_format); + bool success = tex->init_2D(width, height, 0, mip_len, tex_format); if (!success) { delete tex; @@ -401,7 +398,7 @@ GPUTexture *GPU_texture_create_compressed_2d(const char *name, } if (data) { size_t ofs = 0; - for (int mip = 0; mip < miplen; mip++) { + for (int mip = 0; mip < mip_len; mip++) { int extent[3], offset[3] = {0, 0, 0}; tex->mip_size_get(mip, extent); @@ -445,21 +442,13 @@ GPUTexture *GPU_texture_create_error(int dimension, bool is_array) type = (dimension == 2) ? (is_array ? GPU_TEXTURE_2D_ARRAY : GPU_TEXTURE_2D) : type; type = (dimension == 1) ? (is_array ? GPU_TEXTURE_1D_ARRAY : GPU_TEXTURE_1D) : type; - return gpu_texture_create("invalid_tex", - w, - h, - d, - type, - 1, - GPU_RGBA8, - GPU_DATA_FLOAT, - GPU_TEXTURE_USAGE_GENERAL, - pixel); + return gpu_texture_create( + "invalid_tex", w, h, d, type, 1, GPU_RGBA8, GPU_TEXTURE_USAGE_GENERAL, pixel); } GPUTexture *GPU_texture_create_view(const char *name, - GPUTexture *src, - eGPUTextureFormat format, + GPUTexture *source_texture, + eGPUTextureFormat view_format, int mip_start, int mip_len, int layer_start, @@ -469,17 +458,18 @@ GPUTexture *GPU_texture_create_view(const char *name, { BLI_assert(mip_len > 0); BLI_assert(layer_len > 0); - BLI_assert_msg(use_stencil == false || (GPU_texture_usage(src) & GPU_TEXTURE_USAGE_FORMAT_VIEW), + BLI_assert_msg(use_stencil == false || + (GPU_texture_usage(source_texture) & GPU_TEXTURE_USAGE_FORMAT_VIEW), "Source texture of TextureView must have GPU_TEXTURE_USAGE_FORMAT_VIEW usage " "flag if view texture uses stencil texturing."); - BLI_assert_msg((format == GPU_texture_format(src)) || - (GPU_texture_usage(src) & GPU_TEXTURE_USAGE_FORMAT_VIEW), + BLI_assert_msg((view_format == GPU_texture_format(source_texture)) || + (GPU_texture_usage(source_texture) & GPU_TEXTURE_USAGE_FORMAT_VIEW), "Source texture of TextureView must have GPU_TEXTURE_USAGE_FORMAT_VIEW usage " "flag if view texture format is different."); Texture *view = GPUBackend::get()->texture_alloc(name); - view->init_view(src, - format, - unwrap(src)->type_get(), + view->init_view(source_texture, + view_format, + unwrap(source_texture)->type_get(), mip_start, mip_len, layer_start, @@ -498,15 +488,14 @@ eGPUTextureUsage GPU_texture_usage(const GPUTexture *texture_) /* ------ Update ------ */ -void GPU_texture_update_mipmap(GPUTexture *tex_, - int miplvl, +void GPU_texture_update_mipmap(GPUTexture *texture, + int mip_level, eGPUDataFormat data_format, const void *pixels) { - Texture *tex = reinterpret_cast(tex_); int extent[3] = {1, 1, 1}, offset[3] = {0, 0, 0}; - tex->mip_size_get(miplvl, extent); - reinterpret_cast(tex)->update_sub(miplvl, offset, extent, data_format, pixels); + unwrap(texture)->mip_size_get(mip_level, extent); + unwrap(texture)->update_sub(mip_level, offset, extent, data_format, pixels); } void GPU_texture_update_sub(GPUTexture *tex, @@ -521,12 +510,12 @@ void GPU_texture_update_sub(GPUTexture *tex, { int offset[3] = {offset_x, offset_y, offset_z}; int extent[3] = {width, height, depth}; - reinterpret_cast(tex)->update_sub(0, offset, extent, data_format, pixels); + unwrap(tex)->update_sub(0, offset, extent, data_format, pixels); } -void GPU_texture_update_sub_from_pixel_buffer(GPUTexture *tex, +void GPU_texture_update_sub_from_pixel_buffer(GPUTexture *texture, eGPUDataFormat data_format, - GPUPixelBuffer *pix_buf, + GPUPixelBuffer *pixel_buf, int offset_x, int offset_y, int offset_z, @@ -536,28 +525,27 @@ void GPU_texture_update_sub_from_pixel_buffer(GPUTexture *tex, { int offset[3] = {offset_x, offset_y, offset_z}; int extent[3] = {width, height, depth}; - reinterpret_cast(tex)->update_sub(offset, extent, data_format, pix_buf); + unwrap(texture)->update_sub(offset, extent, data_format, pixel_buf); } -void *GPU_texture_read(GPUTexture *tex_, eGPUDataFormat data_format, int miplvl) +void *GPU_texture_read(GPUTexture *texture, eGPUDataFormat data_format, int mip_level) { - Texture *tex = reinterpret_cast(tex_); BLI_assert_msg( - GPU_texture_usage(tex_) & GPU_TEXTURE_USAGE_HOST_READ, + GPU_texture_usage(texture) & GPU_TEXTURE_USAGE_HOST_READ, "The host-read usage flag must be specified up-front. Only textures which require data " - "reads should be flagged, allowing the backend to make certain optimisations."); - return tex->read(miplvl, data_format); + "reads should be flagged, allowing the backend to make certain optimizations."); + return unwrap(texture)->read(mip_level, data_format); } void GPU_texture_clear(GPUTexture *tex, eGPUDataFormat data_format, const void *data) { BLI_assert(data != nullptr); /* Do not accept nullptr as parameter. */ - reinterpret_cast(tex)->clear(data_format, data); + unwrap(tex)->clear(data_format, data); } void GPU_texture_update(GPUTexture *tex, eGPUDataFormat data_format, const void *data) { - reinterpret_cast(tex)->update(data_format, data); + unwrap(tex)->update(data_format, data); } void GPU_unpack_row_length_set(uint len) @@ -567,22 +555,22 @@ void GPU_unpack_row_length_set(uint len) /* ------ Binding ------ */ -void GPU_texture_bind_ex(GPUTexture *tex_, GPUSamplerState state, int unit) +void GPU_texture_bind_ex(GPUTexture *texture, GPUSamplerState state, int unit) { - Texture *tex = reinterpret_cast(tex_); + Texture *tex = unwrap(texture); state = (state.type == GPU_SAMPLER_STATE_TYPE_INTERNAL) ? tex->sampler_state : state; Context::get()->state_manager->texture_bind(tex, state, unit); } -void GPU_texture_bind(GPUTexture *tex_, int unit) +void GPU_texture_bind(GPUTexture *texture, int unit) { - Texture *tex = reinterpret_cast(tex_); + Texture *tex = unwrap(texture); Context::get()->state_manager->texture_bind(tex, tex->sampler_state, unit); } -void GPU_texture_unbind(GPUTexture *tex_) +void GPU_texture_unbind(GPUTexture *texture) { - Texture *tex = reinterpret_cast(tex_); + Texture *tex = unwrap(texture); Context::get()->state_manager->texture_unbind(tex); } @@ -608,19 +596,19 @@ void GPU_texture_image_unbind_all() void GPU_texture_update_mipmap_chain(GPUTexture *tex) { - reinterpret_cast(tex)->generate_mipmap(); + unwrap(tex)->generate_mipmap(); } void GPU_texture_copy(GPUTexture *dst_, GPUTexture *src_) { - Texture *src = reinterpret_cast(src_); - Texture *dst = reinterpret_cast(dst_); + Texture *src = unwrap(src_); + Texture *dst = unwrap(dst_); src->copy_to(dst); } -void GPU_texture_compare_mode(GPUTexture *tex_, bool use_compare) +void GPU_texture_compare_mode(GPUTexture *texture, bool use_compare) { - Texture *tex = reinterpret_cast(tex_); + Texture *tex = unwrap(texture); /* Only depth formats does support compare mode. */ BLI_assert(!(use_compare) || (tex->format_flag_get() & GPU_FORMAT_DEPTH)); @@ -629,18 +617,18 @@ void GPU_texture_compare_mode(GPUTexture *tex_, bool use_compare) tex->sampler_state.custom_type = GPU_SAMPLER_CUSTOM_COMPARE; } -void GPU_texture_filter_mode(GPUTexture *tex_, bool use_filter) +void GPU_texture_filter_mode(GPUTexture *texture, bool use_filter) { - Texture *tex = reinterpret_cast(tex_); + Texture *tex = unwrap(texture); /* Stencil and integer format does not support filtering. */ BLI_assert(!(use_filter) || !(tex->format_flag_get() & (GPU_FORMAT_STENCIL | GPU_FORMAT_INTEGER))); tex->sampler_state.set_filtering_flag_from_test(GPU_SAMPLER_FILTERING_LINEAR, use_filter); } -void GPU_texture_mipmap_mode(GPUTexture *tex_, bool use_mipmap, bool use_filter) +void GPU_texture_mipmap_mode(GPUTexture *texture, bool use_mipmap, bool use_filter) { - Texture *tex = reinterpret_cast(tex_); + Texture *tex = unwrap(texture); /* Stencil and integer format does not support filtering. */ BLI_assert(!(use_filter || use_mipmap) || !(tex->format_flag_get() & (GPU_FORMAT_STENCIL | GPU_FORMAT_INTEGER))); @@ -648,42 +636,39 @@ void GPU_texture_mipmap_mode(GPUTexture *tex_, bool use_mipmap, bool use_filter) tex->sampler_state.set_filtering_flag_from_test(GPU_SAMPLER_FILTERING_LINEAR, use_filter); } -void GPU_texture_anisotropic_filter(GPUTexture *tex_, bool use_aniso) +void GPU_texture_anisotropic_filter(GPUTexture *texture, bool use_aniso) { - Texture *tex = reinterpret_cast(tex_); + Texture *tex = unwrap(texture); /* Stencil and integer format does not support filtering. */ BLI_assert(!(use_aniso) || !(tex->format_flag_get() & (GPU_FORMAT_STENCIL | GPU_FORMAT_INTEGER))); tex->sampler_state.set_filtering_flag_from_test(GPU_SAMPLER_FILTERING_ANISOTROPIC, use_aniso); } -void GPU_texture_extend_mode_x(GPUTexture *tex_, GPUSamplerExtendMode extend_mode) +void GPU_texture_extend_mode_x(GPUTexture *texture, GPUSamplerExtendMode extend_mode) { - Texture *tex = reinterpret_cast(tex_); - tex->sampler_state.extend_x = extend_mode; + unwrap(texture)->sampler_state.extend_x = extend_mode; } -void GPU_texture_extend_mode_y(GPUTexture *tex_, GPUSamplerExtendMode extend_mode) +void GPU_texture_extend_mode_y(GPUTexture *texture, GPUSamplerExtendMode extend_mode) { - Texture *tex = reinterpret_cast(tex_); - tex->sampler_state.extend_yz = extend_mode; + unwrap(texture)->sampler_state.extend_yz = extend_mode; } -void GPU_texture_extend_mode(GPUTexture *tex_, GPUSamplerExtendMode extend_mode) +void GPU_texture_extend_mode(GPUTexture *texture, GPUSamplerExtendMode extend_mode) { - Texture *tex = reinterpret_cast(tex_); - tex->sampler_state.extend_x = extend_mode; - tex->sampler_state.extend_yz = extend_mode; + unwrap(texture)->sampler_state.extend_x = extend_mode; + unwrap(texture)->sampler_state.extend_yz = extend_mode; } -void GPU_texture_swizzle_set(GPUTexture *tex, const char swizzle[4]) +void GPU_texture_swizzle_set(GPUTexture *texture, const char swizzle[4]) { - reinterpret_cast(tex)->swizzle_set(swizzle); + unwrap(texture)->swizzle_set(swizzle); } -void GPU_texture_free(GPUTexture *tex_) +void GPU_texture_free(GPUTexture *texture) { - Texture *tex = reinterpret_cast(tex_); + Texture *tex = unwrap(texture); tex->refcount--; if (tex->refcount < 0) { @@ -695,14 +680,14 @@ void GPU_texture_free(GPUTexture *tex_) } } -void GPU_texture_ref(GPUTexture *tex) +void GPU_texture_ref(GPUTexture *texture) { - reinterpret_cast(tex)->refcount++; + unwrap(texture)->refcount++; } -int GPU_texture_dimensions(const GPUTexture *tex_) +int GPU_texture_dimensions(const GPUTexture *texture) { - eGPUTextureType type = reinterpret_cast(tex_)->type_get(); + eGPUTextureType type = unwrap(texture)->type_get(); if (type & GPU_TEXTURE_1D) { return 1; } @@ -719,51 +704,50 @@ int GPU_texture_dimensions(const GPUTexture *tex_) return 1; } -int GPU_texture_width(const GPUTexture *tex) +int GPU_texture_width(const GPUTexture *texture) { - return reinterpret_cast(tex)->width_get(); + return unwrap(texture)->width_get(); } -int GPU_texture_height(const GPUTexture *tex) +int GPU_texture_height(const GPUTexture *texture) { - return reinterpret_cast(tex)->height_get(); + return unwrap(texture)->height_get(); } -int GPU_texture_depth(const GPUTexture *tex) +int GPU_texture_depth(const GPUTexture *texture) { - return reinterpret_cast(tex)->depth_get(); + return unwrap(texture)->depth_get(); } -int GPU_texture_layer_count(const GPUTexture *tex) +int GPU_texture_layer_count(const GPUTexture *texture) { - return reinterpret_cast(tex)->layer_count(); + return unwrap(texture)->layer_count(); } -int GPU_texture_mip_count(const GPUTexture *tex) +int GPU_texture_mip_count(const GPUTexture *texture) { - return reinterpret_cast(tex)->mip_count(); + return unwrap(texture)->mip_count(); } -int GPU_texture_original_width(const GPUTexture *tex) +int GPU_texture_original_width(const GPUTexture *texture) { - return reinterpret_cast(tex)->src_w; + return unwrap(texture)->src_w; } -int GPU_texture_original_height(const GPUTexture *tex) +int GPU_texture_original_height(const GPUTexture *texture) { - return reinterpret_cast(tex)->src_h; + return unwrap(texture)->src_h; } -void GPU_texture_original_size_set(GPUTexture *tex_, int w, int h) +void GPU_texture_original_size_set(GPUTexture *texture, int w, int h) { - Texture *tex = reinterpret_cast(tex_); - tex->src_w = w; - tex->src_h = h; + unwrap(texture)->src_w = w; + unwrap(texture)->src_h = h; } -eGPUTextureFormat GPU_texture_format(const GPUTexture *tex) +eGPUTextureFormat GPU_texture_format(const GPUTexture *texture) { - return reinterpret_cast(tex)->format_get(); + return unwrap(texture)->format_get(); } const char *GPU_texture_format_name(eGPUTextureFormat texture_format) @@ -909,69 +893,68 @@ const char *GPU_texture_format_name(eGPUTextureFormat texture_format) return ""; } -bool GPU_texture_has_depth_format(const GPUTexture *tex) +bool GPU_texture_has_depth_format(const GPUTexture *texture) { - return (reinterpret_cast(tex)->format_flag_get() & GPU_FORMAT_DEPTH) != 0; + return (unwrap(texture)->format_flag_get() & GPU_FORMAT_DEPTH) != 0; } -bool GPU_texture_has_stencil_format(const GPUTexture *tex) +bool GPU_texture_has_stencil_format(const GPUTexture *texture) { - return (reinterpret_cast(tex)->format_flag_get() & GPU_FORMAT_STENCIL) != 0; + return (unwrap(texture)->format_flag_get() & GPU_FORMAT_STENCIL) != 0; } -bool GPU_texture_has_integer_format(const GPUTexture *tex) +bool GPU_texture_has_integer_format(const GPUTexture *texture) { - return (reinterpret_cast(tex)->format_flag_get() & GPU_FORMAT_INTEGER) != 0; + return (unwrap(texture)->format_flag_get() & GPU_FORMAT_INTEGER) != 0; } -bool GPU_texture_has_float_format(const GPUTexture *tex) +bool GPU_texture_has_float_format(const GPUTexture *texture) { - return (reinterpret_cast(tex)->format_flag_get() & GPU_FORMAT_FLOAT) != 0; + return (unwrap(texture)->format_flag_get() & GPU_FORMAT_FLOAT) != 0; } -bool GPU_texture_has_normalized_format(const GPUTexture *tex) +bool GPU_texture_has_normalized_format(const GPUTexture *texture) { - return (reinterpret_cast(tex)->format_flag_get() & - GPU_FORMAT_NORMALIZED_INTEGER) != 0; + return (unwrap(texture)->format_flag_get() & GPU_FORMAT_NORMALIZED_INTEGER) != 0; } -bool GPU_texture_has_signed_format(const GPUTexture *tex) +bool GPU_texture_has_signed_format(const GPUTexture *texture) { - return (reinterpret_cast(tex)->format_flag_get() & GPU_FORMAT_SIGNED) != 0; + return (unwrap(texture)->format_flag_get() & GPU_FORMAT_SIGNED) != 0; } -bool GPU_texture_is_cube(const GPUTexture *tex) +bool GPU_texture_is_cube(const GPUTexture *texture) { - return (reinterpret_cast(tex)->type_get() & GPU_TEXTURE_CUBE) != 0; + return (unwrap(texture)->type_get() & GPU_TEXTURE_CUBE) != 0; } -bool GPU_texture_is_array(const GPUTexture *tex) +bool GPU_texture_is_array(const GPUTexture *texture) { - return (reinterpret_cast(tex)->type_get() & GPU_TEXTURE_ARRAY) != 0; + return (unwrap(texture)->type_get() & GPU_TEXTURE_ARRAY) != 0; } #ifndef GPU_NO_USE_PY_REFERENCES -void **GPU_texture_py_reference_get(GPUTexture *tex) +void **GPU_texture_py_reference_get(GPUTexture *texture) { - return unwrap(tex)->py_ref; + return unwrap(texture)->py_ref; } -void GPU_texture_py_reference_set(GPUTexture *tex, void **py_ref) +void GPU_texture_py_reference_set(GPUTexture *texture, void **py_ref) { - BLI_assert(py_ref == nullptr || unwrap(tex)->py_ref == nullptr); - unwrap(tex)->py_ref = py_ref; + BLI_assert(py_ref == nullptr || unwrap(texture)->py_ref == nullptr); + unwrap(texture)->py_ref = py_ref; } #endif /* TODO: remove. */ -int GPU_texture_opengl_bindcode(const GPUTexture *tex) +int GPU_texture_opengl_bindcode(const GPUTexture *texture) { - return reinterpret_cast(tex)->gl_bindcode_get(); + return unwrap(texture)->gl_bindcode_get(); } -void GPU_texture_get_mipmap_size(GPUTexture *tex, int lvl, int *r_size) +void GPU_texture_get_mipmap_size(GPUTexture *texture, int mip_level, int *r_size) { - return reinterpret_cast(tex)->mip_size_get(lvl, r_size); + return unwrap(texture)->mip_size_get(mip_level, r_size); } /** \} */ @@ -995,30 +978,30 @@ GPUPixelBuffer *GPU_pixel_buffer_create(size_t size) return wrap(pixbuf); } -void GPU_pixel_buffer_free(GPUPixelBuffer *pix_buf) +void GPU_pixel_buffer_free(GPUPixelBuffer *pixel_buf) { - PixelBuffer *handle = unwrap(pix_buf); + PixelBuffer *handle = unwrap(pixel_buf); delete handle; } -void *GPU_pixel_buffer_map(GPUPixelBuffer *pix_buf) +void *GPU_pixel_buffer_map(GPUPixelBuffer *pixel_buf) { - return reinterpret_cast(pix_buf)->map(); + return unwrap(pixel_buf)->map(); } -void GPU_pixel_buffer_unmap(GPUPixelBuffer *pix_buf) +void GPU_pixel_buffer_unmap(GPUPixelBuffer *pixel_buf) { - reinterpret_cast(pix_buf)->unmap(); + unwrap(pixel_buf)->unmap(); } -size_t GPU_pixel_buffer_size(GPUPixelBuffer *pix_buf) +size_t GPU_pixel_buffer_size(GPUPixelBuffer *pixel_buf) { - return reinterpret_cast(pix_buf)->get_size(); + return unwrap(pixel_buf)->get_size(); } -int64_t GPU_pixel_buffer_get_native_handle(GPUPixelBuffer *pix_buf) +int64_t GPU_pixel_buffer_get_native_handle(GPUPixelBuffer *pixel_buf) { - return reinterpret_cast(pix_buf)->get_native_handle(); + return unwrap(pixel_buf)->get_native_handle(); } /** \} */ diff --git a/source/blender/gpu/intern/gpu_texture_private.hh b/source/blender/gpu/intern/gpu_texture_private.hh index a2f65aaafa6..223bc2780ed 100644 --- a/source/blender/gpu/intern/gpu_texture_private.hh +++ b/source/blender/gpu/intern/gpu_texture_private.hh @@ -14,8 +14,7 @@ #include "gpu_framebuffer_private.hh" -namespace blender { -namespace gpu { +namespace blender::gpu { enum eGPUTextureFormatFlag { /* The format has a depth component and can be used as depth attachment. */ @@ -341,7 +340,7 @@ class PixelBuffer { public: PixelBuffer(size_t size) : size_(size){}; - virtual ~PixelBuffer(){}; + virtual ~PixelBuffer() = default; virtual void *map() = 0; virtual void unmap() = 0; @@ -1170,5 +1169,4 @@ static inline eGPUTextureFormat to_texture_format(const GPUVertFormat *format) return GPU_DEPTH_COMPONENT24; } -} // namespace gpu -} // namespace blender +} // namespace blender::gpu diff --git a/source/blender/gpu/intern/gpu_uniform_buffer_private.hh b/source/blender/gpu/intern/gpu_uniform_buffer_private.hh index 721bd9b8fd3..b2618f1c444 100644 --- a/source/blender/gpu/intern/gpu_uniform_buffer_private.hh +++ b/source/blender/gpu/intern/gpu_uniform_buffer_private.hh @@ -12,8 +12,7 @@ struct GPUUniformBuf; -namespace blender { -namespace gpu { +namespace blender::gpu { #ifndef NDEBUG # define DEBUG_NAME_LEN 64 @@ -69,5 +68,4 @@ static inline const UniformBuf *unwrap(const GPUUniformBuf *vert) #undef DEBUG_NAME_LEN -} // namespace gpu -} // namespace blender +} // namespace blender::gpu diff --git a/source/blender/gpu/intern/gpu_vertex_buffer.cc b/source/blender/gpu/intern/gpu_vertex_buffer.cc index 74f3b8334bf..c5f2cc1c1b2 100644 --- a/source/blender/gpu/intern/gpu_vertex_buffer.cc +++ b/source/blender/gpu/intern/gpu_vertex_buffer.cc @@ -17,8 +17,6 @@ #include "gpu_context_private.hh" /* TODO: remove. */ -#include "GPU_vertex_buffer.hh" - #include /* -------------------------------------------------------------------- */ diff --git a/source/blender/gpu/intern/gpu_vertex_format.cc b/source/blender/gpu/intern/gpu_vertex_format.cc index 9e8f7d8f003..7ca41332787 100644 --- a/source/blender/gpu/intern/gpu_vertex_format.cc +++ b/source/blender/gpu/intern/gpu_vertex_format.cc @@ -9,6 +9,7 @@ */ #include "GPU_vertex_format.hh" +#include "BLI_assert.h" #include "GPU_capabilities.hh" #include "gpu_shader_create_info.hh" @@ -33,7 +34,7 @@ using namespace blender::gpu::shader; void GPU_vertformat_clear(GPUVertFormat *format) { -#if TRUST_NO_ONE +#ifndef NDEBUG memset(format, 0, sizeof(GPUVertFormat)); #else format->attr_len = 0; @@ -56,9 +57,7 @@ void GPU_vertformat_copy(GPUVertFormat *dest, const GPUVertFormat &src) static uint comp_size(GPUVertCompType type) { -#if TRUST_NO_ONE - assert(type <= GPU_COMP_F32); /* other types have irregular sizes (not bytes) */ -#endif + BLI_assert(type <= GPU_COMP_F32); /* other types have irregular sizes (not bytes) */ const uint sizes[] = {1, 1, 2, 2, 4, 4, 4}; return sizes[type]; } @@ -89,9 +88,7 @@ static uint attr_align(const GPUVertAttr *a, uint minimum_stride) uint vertex_buffer_size(const GPUVertFormat *format, uint vertex_len) { -#if TRUST_NO_ONE - assert(format->packed && format->stride > 0); -#endif + BLI_assert(format->packed && format->stride > 0); return format->stride * vertex_len; } @@ -112,12 +109,9 @@ static uchar copy_attr_name(GPUVertFormat *format, const char *name) break; } } -#if TRUST_NO_ONE - assert(terminated); - assert(format->name_offset <= GPU_VERT_ATTR_NAMES_BUF_LEN); -#else - (void)terminated; -#endif + BLI_assert(terminated); + BLI_assert(format->name_offset <= GPU_VERT_ATTR_NAMES_BUF_LEN); + UNUSED_VARS_NDEBUG(terminated); return name_offset; } @@ -127,33 +121,33 @@ uint GPU_vertformat_attr_add(GPUVertFormat *format, uint comp_len, GPUVertFetchMode fetch_mode) { -#if TRUST_NO_ONE - assert(format->name_len < GPU_VERT_FORMAT_MAX_NAMES); /* there's room for more */ - assert(format->attr_len < GPU_VERT_ATTR_MAX_LEN); /* there's room for more */ - assert(!format->packed); /* packed means frozen/locked */ - assert((comp_len >= 1 && comp_len <= 4) || comp_len == 8 || comp_len == 12 || comp_len == 16); + BLI_assert(format->name_len < GPU_VERT_FORMAT_MAX_NAMES); /* there's room for more */ + BLI_assert(format->attr_len < GPU_VERT_ATTR_MAX_LEN); /* there's room for more */ + BLI_assert(!format->packed); /* packed means frozen/locked */ + BLI_assert((comp_len >= 1 && comp_len <= 4) || comp_len == 8 || comp_len == 12 || + comp_len == 16); switch (comp_type) { case GPU_COMP_F32: /* float type can only kept as float */ - assert(fetch_mode == GPU_FETCH_FLOAT); + BLI_assert(fetch_mode == GPU_FETCH_FLOAT); break; case GPU_COMP_I10: /* 10_10_10 format intended for normals (XYZ) or colors (RGB) * extra component packed.w can be manually set to { -2, -1, 0, 1 } */ - assert(ELEM(comp_len, 3, 4)); + BLI_assert(ELEM(comp_len, 3, 4)); /* Not strictly required, may relax later. */ - assert(fetch_mode == GPU_FETCH_INT_TO_FLOAT_UNIT); + BLI_assert(fetch_mode == GPU_FETCH_INT_TO_FLOAT_UNIT); break; default: /* integer types can be kept as int or converted/normalized to float */ - assert(fetch_mode != GPU_FETCH_FLOAT); + BLI_assert(fetch_mode != GPU_FETCH_FLOAT); /* only support float matrices (see Batch_update_program_bindings) */ - assert(!ELEM(comp_len, 8, 12, 16)); + BLI_assert(!ELEM(comp_len, 8, 12, 16)); } -#endif + format->name_len++; /* Multi-name support. */ const uint attr_id = format->attr_len++; @@ -174,10 +168,8 @@ uint GPU_vertformat_attr_add(GPUVertFormat *format, void GPU_vertformat_alias_add(GPUVertFormat *format, const char *alias) { GPUVertAttr *attr = &format->attrs[format->attr_len - 1]; -#if TRUST_NO_ONE - assert(format->name_len < GPU_VERT_FORMAT_MAX_NAMES); /* there's room for more */ - assert(attr->name_len < GPU_VERT_ATTR_MAX_NAMES); -#endif + BLI_assert(format->name_len < GPU_VERT_FORMAT_MAX_NAMES); /* there's room for more */ + BLI_assert(attr->name_len < GPU_VERT_ATTR_MAX_NAMES); format->name_len++; /* Multi-name support. */ attr->names[attr->name_len++] = copy_attr_name(format, alias); } diff --git a/source/blender/gpu/intern/gpu_vertex_format_private.hh b/source/blender/gpu/intern/gpu_vertex_format_private.hh index f081a856bbf..722dea63574 100644 --- a/source/blender/gpu/intern/gpu_vertex_format_private.hh +++ b/source/blender/gpu/intern/gpu_vertex_format_private.hh @@ -10,6 +10,8 @@ #pragma once +#include "GPU_vertex_buffer.hh" + struct GPUVertFormat; void VertexFormat_pack(GPUVertFormat *format); diff --git a/source/blender/gpu/metal/mtl_batch.mm b/source/blender/gpu/metal/mtl_batch.mm index c316ba71f2e..5d527af7dc7 100644 --- a/source/blender/gpu/metal/mtl_batch.mm +++ b/source/blender/gpu/metal/mtl_batch.mm @@ -741,10 +741,7 @@ void MTLBatch::prepare_vertex_descriptor_and_bindings(MTLVertBuf **buffers, int void MTLBatch::draw_advanced(int v_first, int v_count, int i_first, int i_count) { - -#if TRUST_NO_ONE BLI_assert(v_count > 0 && i_count > 0); -#endif /* Setup RenderPipelineState for batch. */ MTLContext *ctx = MTLContext::get(); diff --git a/source/blender/gpu/metal/mtl_texture.mm b/source/blender/gpu/metal/mtl_texture.mm index 10d141952f9..86aa0ececce 100644 --- a/source/blender/gpu/metal/mtl_texture.mm +++ b/source/blender/gpu/metal/mtl_texture.mm @@ -504,11 +504,9 @@ void gpu::MTLTexture::update_sub( this->ensure_baked(); /* Safety checks. */ -#if TRUST_NO_ONE BLI_assert(mip >= mip_min_ && mip <= mip_max_); BLI_assert(mip < texture_.mipmapLevelCount); BLI_assert(texture_.mipmapLevelCount >= mip_max_); -#endif /* DEPTH FLAG - Depth formats cannot use direct BLIT - pass off to their own routine which will * do a depth-only render. */