Cleanup: GPU: Fix clang tidy warnings

Removes some other things like:
- `TRUST_NO_ONE` which was the same as `#ifndef NDEBUG`.
- Replace `reinterpret_cast` by `unwrap`

Pull Request: https://projects.blender.org/blender/blender/pulls/129631
This commit is contained in:
Clément Foucault
2024-10-31 15:18:29 +01:00
committed by Clément Foucault
parent 61505200e9
commit 324517fd78
42 changed files with 350 additions and 441 deletions

View File

@@ -29,10 +29,10 @@
struct GPUShader;
#define GPU_BATCH_VBO_MAX_LEN 16
#define GPU_BATCH_INST_VBO_MAX_LEN 2
#define GPU_BATCH_VAO_STATIC_LEN 3
#define GPU_BATCH_VAO_DYN_ALLOC_COUNT 16
constexpr static int GPU_BATCH_VBO_MAX_LEN = 16;
constexpr static int GPU_BATCH_INST_VBO_MAX_LEN = 2;
constexpr static int GPU_BATCH_VAO_STATIC_LEN = 3;
constexpr static int GPU_BATCH_VAO_DYN_ALLOC_COUNT = 16;
enum eGPUBatchFlag {
/** Invalid default state. */

View File

@@ -11,20 +11,7 @@
#define PROGRAM_NO_OPTI 0
// #define GPU_NO_USE_PY_REFERENCES
#if defined(NDEBUG)
# define TRUST_NO_ONE 0
#else
/* strict error checking, enabled for debug builds during early development */
# define TRUST_NO_ONE 1
#endif
#include "BLI_sys_types.h"
#include <stdbool.h>
#include <stdint.h>
#if TRUST_NO_ONE
# include <assert.h>
#endif
/* GPU_INLINE */
#if defined(_MSC_VER)

View File

@@ -191,7 +191,7 @@ struct SpecializationConstant {
StringRefNull name;
Value value;
SpecializationConstant() {}
SpecializationConstant() = default;
SpecializationConstant(const char *name, uint32_t value) : type(Type::UINT), name(name)
{

View File

@@ -38,7 +38,7 @@ enum eGPUFrameBufferBits {
ENUM_OPERATORS(eGPUFrameBufferBits, GPU_STENCIL_BIT)
/* Guaranteed by the spec and is never greater than 16 on any hardware or implementation. */
#define GPU_MAX_VIEWPORTS 16
constexpr static int GPU_MAX_VIEWPORTS = 16;
struct GPUAttachment {
GPUTexture *tex;
@@ -80,7 +80,7 @@ GPUFrameBuffer *GPU_framebuffer_back_get();
* Create a #GPUFrameBuffer object. It is not configured and not bound to a specific context until
* `GPU_framebuffer_bind()` is called.
*/
void GPU_framebuffer_free(GPUFrameBuffer *framebuffer);
void GPU_framebuffer_free(GPUFrameBuffer *fb);
#define GPU_FRAMEBUFFER_FREE_SAFE(fb) \
do { \
@@ -112,12 +112,12 @@ void GPU_backbuffer_bind(eGPUBackBuffer back_buffer_type);
/**
* Binds a #GPUFrameBuffer making it the active framebuffer for all geometry rendering.
*/
void GPU_framebuffer_bind(GPUFrameBuffer *framebuffer);
void GPU_framebuffer_bind(GPUFrameBuffer *fb);
/**
* Same as `GPU_framebuffer_bind` but do not enable the SRGB transform.
*/
void GPU_framebuffer_bind_no_srgb(GPUFrameBuffer *framebuffer);
void GPU_framebuffer_bind_no_srgb(GPUFrameBuffer *fb);
/**
* Binds back the active context's default frame-buffer.
@@ -174,7 +174,7 @@ struct GPULoadStore {
* })
* \endcode
*/
void GPU_framebuffer_bind_loadstore(GPUFrameBuffer *framebuffer,
void GPU_framebuffer_bind_loadstore(GPUFrameBuffer *fb,
const GPULoadStore *load_store_actions,
uint load_store_actions_len);
#define GPU_framebuffer_bind_ex(_fb, ...) \
@@ -204,7 +204,7 @@ void GPU_framebuffer_bind_loadstore(GPUFrameBuffer *framebuffer,
*
* \note Excess attachments will have no effect as long as they are GPU_ATTACHMENT_IGNORE.
*/
void GPU_framebuffer_subpass_transition_array(GPUFrameBuffer *framebuffer,
void GPU_framebuffer_subpass_transition_array(GPUFrameBuffer *fb,
const GPUAttachmentState *attachment_states,
uint attachment_len);
@@ -255,9 +255,7 @@ void GPU_framebuffer_subpass_transition_array(GPUFrameBuffer *framebuffer,
* Setting #GPUAttachment.mip to -1 will leave the texture in this slot.
* Setting #GPUAttachment.tex to nullptr will detach the texture in this slot.
*/
void GPU_framebuffer_config_array(GPUFrameBuffer *framebuffer,
const GPUAttachment *config,
int config_len);
void GPU_framebuffer_config_array(GPUFrameBuffer *fb, const GPUAttachment *config, int config_len);
/** Empty bind point. */
#define GPU_ATTACHMENT_NONE \
@@ -311,10 +309,7 @@ void GPU_framebuffer_config_array(GPUFrameBuffer *framebuffer,
* DEPRECATED: Prefer using multiple #GPUFrameBuffer with different configurations with
* `GPU_framebuffer_config_array()`.
*/
void GPU_framebuffer_texture_attach(GPUFrameBuffer *framebuffer,
GPUTexture *texture,
int slot,
int mip);
void GPU_framebuffer_texture_attach(GPUFrameBuffer *fb, GPUTexture *texture, int slot, int mip);
/**
* Attach a single layer of an array texture mip level to a #GPUFrameBuffer.
@@ -326,7 +321,7 @@ void GPU_framebuffer_texture_attach(GPUFrameBuffer *framebuffer,
* `GPU_framebuffer_config_array()`.
*/
void GPU_framebuffer_texture_layer_attach(
GPUFrameBuffer *framebuffer, GPUTexture *texture, int slot, int layer, int mip);
GPUFrameBuffer *fb, GPUTexture *texture, int slot, int layer, int mip);
/**
* Attach a single cube-face of an cube-map texture mip level to a #GPUFrameBuffer.
@@ -338,7 +333,7 @@ void GPU_framebuffer_texture_layer_attach(
* `GPU_framebuffer_config_array()`.
*/
void GPU_framebuffer_texture_cubeface_attach(
GPUFrameBuffer *framebuffer, GPUTexture *texture, int slot, int face, int mip);
GPUFrameBuffer *fb, GPUTexture *texture, int slot, int face, int mip);
/**
* Detach a texture from a #GPUFrameBuffer. The texture must be attached.
@@ -346,7 +341,7 @@ void GPU_framebuffer_texture_cubeface_attach(
* DEPRECATED: Prefer using multiple #GPUFrameBuffer with different configurations with
* `GPU_framebuffer_config_array()`.
*/
void GPU_framebuffer_texture_detach(GPUFrameBuffer *framebuffer, GPUTexture *texture);
void GPU_framebuffer_texture_detach(GPUFrameBuffer *fb, GPUTexture *texture);
/**
* Checks a framebuffer current configuration for errors.
@@ -355,7 +350,7 @@ void GPU_framebuffer_texture_detach(GPUFrameBuffer *framebuffer, GPUTexture *tex
* \a err_out is an error output buffer.
* \return false if the framebuffer is invalid.
*/
bool GPU_framebuffer_check_valid(GPUFrameBuffer *framebuffer, char err_out[256]);
bool GPU_framebuffer_check_valid(GPUFrameBuffer *fb, char err_out[256]);
/** \} */
@@ -371,7 +366,7 @@ bool GPU_framebuffer_check_valid(GPUFrameBuffer *framebuffer, char err_out[256])
* Default size is used if the frame-buffer contains no attachments.
* It needs to be re-specified each time an attachment is added.
*/
void GPU_framebuffer_default_size(GPUFrameBuffer *framebuffer, int width, int height);
void GPU_framebuffer_default_size(GPUFrameBuffer *fb, int width, int height);
/** \} */
@@ -388,8 +383,7 @@ void GPU_framebuffer_default_size(GPUFrameBuffer *framebuffer, int width, int he
* \note Setting a singular viewport will only change the state of the first viewport.
* \note Must be called after first bind.
*/
void GPU_framebuffer_viewport_set(
GPUFrameBuffer *framebuffer, int x, int y, int width, int height);
void GPU_framebuffer_viewport_set(GPUFrameBuffer *fb, int x, int y, int width, int height);
/**
* Similar to `GPU_framebuffer_viewport_set()` but specify the bounds of all 16 viewports.
@@ -406,13 +400,13 @@ void GPU_framebuffer_multi_viewports_set(GPUFrameBuffer *gpu_fb,
* Return the viewport offset and size in a int quadruple: (x, y, width, height).
* \note Viewport and scissor size is stored per frame-buffer.
*/
void GPU_framebuffer_viewport_get(GPUFrameBuffer *framebuffer, int r_viewport[4]);
void GPU_framebuffer_viewport_get(GPUFrameBuffer *fb, int r_viewport[4]);
/**
* Reset a frame-buffer viewport bounds to its attachment(s) size.
* \note Viewport and scissor size is stored per frame-buffer.
*/
void GPU_framebuffer_viewport_reset(GPUFrameBuffer *framebuffer);
void GPU_framebuffer_viewport_reset(GPUFrameBuffer *fb);
/** \} */
@@ -432,7 +426,7 @@ void GPU_framebuffer_viewport_reset(GPUFrameBuffer *framebuffer);
* \note `GPU_write_mask`, and stencil test do not affect this command.
* \note Viewport and scissor regions affect this command but are not efficient nor recommended.
*/
void GPU_framebuffer_clear(GPUFrameBuffer *framebuffer,
void GPU_framebuffer_clear(GPUFrameBuffer *fb,
eGPUFrameBufferBits buffers,
const float clear_col[4],
float clear_depth,
@@ -495,7 +489,7 @@ void GPU_framebuffer_clear_color_depth_stencil(GPUFrameBuffer *fb,
* \note `GPU_write_mask`, and stencil test do not affect this command.
* \note Viewport and scissor regions affect this command but are not efficient nor recommended.
*/
void GPU_framebuffer_multi_clear(GPUFrameBuffer *framebuffer, const float (*clear_colors)[4]);
void GPU_framebuffer_multi_clear(GPUFrameBuffer *fb, const float (*clear_colors)[4]);
/**
* Clear all color attachment textures of the active frame-buffer with the given red, green, blue,
@@ -520,7 +514,7 @@ void GPU_clear_depth(float depth);
/** \name Debugging introspection API.
* \{ */
const char *GPU_framebuffer_get_name(GPUFrameBuffer *framebuffer);
const char *GPU_framebuffer_get_name(GPUFrameBuffer *fb);
/** \} */
@@ -536,8 +530,8 @@ const char *GPU_framebuffer_get_name(GPUFrameBuffer *framebuffer);
* Points to #BPyGPUFrameBuffer.fb
*/
#ifndef GPU_NO_USE_PY_REFERENCES
void **GPU_framebuffer_py_reference_get(GPUFrameBuffer *framebuffer);
void GPU_framebuffer_py_reference_set(GPUFrameBuffer *framebuffer, void **py_ref);
void **GPU_framebuffer_py_reference_get(GPUFrameBuffer *fb);
void GPU_framebuffer_py_reference_set(GPUFrameBuffer *fb, void **py_ref);
#endif
/**
@@ -547,7 +541,7 @@ void GPU_framebuffer_py_reference_set(GPUFrameBuffer *framebuffer, void **py_ref
*/
/* TODO(fclem): This has nothing to do with the GPU module and should be move to the pyGPU module.
*/
void GPU_framebuffer_push(GPUFrameBuffer *framebuffer);
void GPU_framebuffer_push(GPUFrameBuffer *fb);
GPUFrameBuffer *GPU_framebuffer_pop();
uint GPU_framebuffer_stack_level_get();
@@ -562,14 +556,14 @@ uint GPU_framebuffer_stack_level_get();
* \note return false if no context is active.
* \note this is undefined behavior if \a framebuffer is `nullptr`.
* DEPRECATED: Kept only because of Python GPU API. */
bool GPU_framebuffer_bound(GPUFrameBuffer *framebuffer);
bool GPU_framebuffer_bound(GPUFrameBuffer *fb);
/**
* Read a region of the framebuffer depth attachment and copy it to \a r_data .
* The pixel data will be converted to \a data_format but it needs to be compatible with the
* attachment type. DEPRECATED: Prefer using `GPU_texture_read()`.
*/
void GPU_framebuffer_read_depth(GPUFrameBuffer *framebuffer,
void GPU_framebuffer_read_depth(GPUFrameBuffer *fb,
int x,
int y,
int width,
@@ -582,7 +576,7 @@ void GPU_framebuffer_read_depth(GPUFrameBuffer *framebuffer,
* The pixel data will be converted to \a data_format but it needs to be compatible with the
* attachment type. DEPRECATED: Prefer using `GPU_texture_read()`.
*/
void GPU_framebuffer_read_color(GPUFrameBuffer *framebuffer,
void GPU_framebuffer_read_color(GPUFrameBuffer *fb,
int x,
int y,
int width,
@@ -625,7 +619,7 @@ void GPU_framebuffer_blit(GPUFrameBuffer *fb_read,
* DEPRECATED: Prefer using a compute shader with arbitrary imageLoad/Store for this purpose
* as it is clearer and likely faster with optimizations.
*/
void GPU_framebuffer_recursive_downsample(GPUFrameBuffer *framebuffer,
void GPU_framebuffer_recursive_downsample(GPUFrameBuffer *fb,
int max_level,
void (*per_level_callback)(void *user_data, int level),
void *user_data);

View File

@@ -10,6 +10,8 @@
#pragma once
#include "BLI_sys_types.h"
/* Draw 2D rectangles (replaces glRect functions) */
/* caller is responsible for vertex format & shader */
void immRectf(uint pos, float x1, float y1, float x2, float y2);

View File

@@ -223,5 +223,5 @@ int GPU_matrix_stack_level_get_projection();
/* Not part of the GPU_matrix API,
* however we need to check these limits in code that calls into these API's. */
#define GPU_MATRIX_ORTHO_CLIP_NEAR_DEFAULT (-100)
#define GPU_MATRIX_ORTHO_CLIP_FAR_DEFAULT (100)
constexpr static int GPU_MATRIX_ORTHO_CLIP_NEAR_DEFAULT = -100;
constexpr static int GPU_MATRIX_ORTHO_CLIP_FAR_DEFAULT = 100;

View File

@@ -66,13 +66,13 @@ enum GPUArchitectureType {
GPU_ARCHITECTURE_TBDR = 1,
};
typedef struct GPUDevice {
struct GPUDevice {
std::string identifier;
int index;
uint32_t vendor_id;
uint32_t device_id;
std::string name;
} GPUDevice;
};
/* GPU Types */
/* TODO: Verify all use-cases of GPU_type_matches to determine which graphics API it should apply

View File

@@ -8,10 +8,10 @@
#pragma once
typedef enum eGPUBackendType {
enum eGPUBackendType {
GPU_BACKEND_NONE = 0,
GPU_BACKEND_OPENGL = 1 << 0,
GPU_BACKEND_METAL = 1 << 1,
GPU_BACKEND_VULKAN = 1 << 3,
GPU_BACKEND_ANY = 0xFFFFFFFFu
} eGPUBackendType;
};

View File

@@ -26,10 +26,10 @@ struct GPUShader;
/* Hardware limit is 16. Position attribute is always needed so we reduce to 15.
* This makes sure the GPUVertexFormat name buffer does not overflow. */
#define GPU_MAX_ATTR 15
constexpr static int GPU_MAX_ATTR = 15;
/* Determined by the maximum uniform buffer size divided by chunk size. */
#define GPU_MAX_UNIFORM_ATTR 8
constexpr static int GPU_MAX_UNIFORM_ATTR = 8;
/* -------------------------------------------------------------------- */
/** \name Creation

View File

@@ -977,17 +977,17 @@ bool GPU_texture_has_integer_format(const GPUTexture *texture);
/**
* Return true if the texture format is a float type.
*/
bool GPU_texture_has_float_format(const GPUTexture *tex);
bool GPU_texture_has_float_format(const GPUTexture *texture);
/**
* Return true if the texture format is an integer normalized type.
*/
bool GPU_texture_has_normalized_format(const GPUTexture *tex);
bool GPU_texture_has_normalized_format(const GPUTexture *texture);
/**
* Return true if the texture format is a signed type.
*/
bool GPU_texture_has_signed_format(const GPUTexture *tex);
bool GPU_texture_has_signed_format(const GPUTexture *texture);
/**
* Returns the pixel dimensions of a texture's mip-map level.

View File

@@ -15,6 +15,8 @@
#pragma once
#include "BLI_sys_types.h"
struct ListBase;
/** Opaque type hiding blender::gpu::UniformBuf. */
@@ -49,4 +51,4 @@ void GPU_uniformbuf_clear_to_zero(GPUUniformBuf *ubo);
#define GPU_UBO_BLOCK_NAME "node_tree"
#define GPU_ATTRIBUTE_UBO_BLOCK_NAME "unf_attrs"
#define GPU_LAYER_ATTRIBUTE_UBO_BLOCK_NAME "drw_layer_attrs"
#define GPU_NODE_TREE_UBO_SLOT 0
constexpr static int GPU_NODE_TREE_UBO_SLOT = 0;

View File

@@ -17,12 +17,12 @@
struct GPUShader;
#define GPU_VERT_ATTR_MAX_LEN 16
#define GPU_VERT_ATTR_MAX_NAMES 6
#define GPU_VERT_ATTR_NAMES_BUF_LEN 256
#define GPU_VERT_FORMAT_MAX_NAMES 63 /* More than enough, actual max is ~30. */
constexpr static int GPU_VERT_ATTR_MAX_LEN = 16;
constexpr static int GPU_VERT_ATTR_MAX_NAMES = 6;
constexpr static int GPU_VERT_ATTR_NAMES_BUF_LEN = 256;
constexpr static int GPU_VERT_FORMAT_MAX_NAMES = 63; /* More than enough, actual max is ~30. */
/* Computed as GPU_VERT_ATTR_NAMES_BUF_LEN / 30 (actual max format name). */
#define GPU_MAX_SAFE_ATTR_NAME 12
constexpr static int GPU_MAX_SAFE_ATTR_NAME = 12;
enum GPUVertCompType {
GPU_COMP_I8 = 0,
@@ -162,32 +162,27 @@ struct GPUNormal {
};
};
/* OpenGL ES packs in a different order as desktop GL but component conversion is the same.
* Of the code here, only GPUPackedNormal needs to change. */
#define SIGNED_INT_10_MAX 511
#define SIGNED_INT_10_MIN -512
BLI_INLINE int clampi(int x, int min_allowed, int max_allowed)
{
#if TRUST_NO_ONE
assert(min_allowed <= max_allowed);
#endif
BLI_assert(min_allowed <= max_allowed);
if (x < min_allowed) {
return min_allowed;
}
else if (x > max_allowed) {
if (x > max_allowed) {
return max_allowed;
}
else {
return x;
}
return x;
}
BLI_INLINE int gpu_convert_normalized_f32_to_i10(float x)
{
int qx = x * 511.0f;
return clampi(qx, SIGNED_INT_10_MIN, SIGNED_INT_10_MAX);
/* OpenGL ES packs in a different order as desktop GL but component conversion is the same.
* Of the code here, only GPUPackedNormal needs to change. */
constexpr int signed_int_10_max = 511;
constexpr int signed_int_10_min = -512;
int qx = x * signed_int_10_max;
return clampi(qx, signed_int_10_min, signed_int_10_max);
}
BLI_INLINE int gpu_convert_i16_to_i10(short x)

View File

@@ -8,8 +8,6 @@
#pragma once
#include <stdbool.h>
#include "DNA_scene_types.h"
#include "DNA_vec_types.h"

View File

@@ -12,8 +12,7 @@
#include "GPU_vertex_buffer.hh"
namespace blender {
namespace gpu {
namespace blender::gpu {
class Context;
@@ -64,5 +63,4 @@ class GPUBackend {
virtual void render_step() = 0;
};
} // namespace gpu
} // namespace blender
} // namespace blender::gpu

View File

@@ -25,8 +25,6 @@
#include "gpu_context_private.hh"
#include "gpu_shader_private.hh"
#include "GPU_batch.hh"
#include <cstring>
using namespace blender::gpu;

View File

@@ -279,7 +279,7 @@ bool GPU_backend_type_selection_is_overridden()
bool GPU_backend_type_selection_detect()
{
blender::VectorSet<eGPUBackendType> backends_to_check;
if (GPU_backend_type_selection_is_overridden()) {
if (g_backend_type_override.has_value()) {
backends_to_check.add(*g_backend_type_override);
}
#if defined(WITH_OPENGL_BACKEND)

View File

@@ -87,7 +87,7 @@ class Context {
virtual void memory_statistics_get(int *r_total_mem, int *r_free_mem) = 0;
virtual void debug_group_begin(const char *, int){};
virtual void debug_group_begin(const char * /*name*/, int /*index*/){};
virtual void debug_group_end(){};
/* Returns true if capture successfully started. */

View File

@@ -12,8 +12,7 @@
#include "GPU_drawlist.hh"
namespace blender {
namespace gpu {
namespace blender::gpu {
/**
* Implementation of Multi Draw Indirect.
@@ -21,7 +20,7 @@ namespace gpu {
*/
class DrawList {
public:
virtual ~DrawList(){};
virtual ~DrawList() = default;
virtual void append(Batch *batch, int i_first, int i_count) = 0;
virtual void submit() = 0;
@@ -41,5 +40,4 @@ static inline const DrawList *unwrap(const GPUDrawList *vert)
return reinterpret_cast<const DrawList *>(vert);
}
} // namespace gpu
} // namespace blender
} // namespace blender::gpu

View File

@@ -277,59 +277,58 @@ GPUFrameBuffer *GPU_framebuffer_create(const char *name)
return wrap(GPUBackend::get()->framebuffer_alloc(name));
}
void GPU_framebuffer_free(GPUFrameBuffer *gpu_fb)
void GPU_framebuffer_free(GPUFrameBuffer *fb)
{
delete unwrap(gpu_fb);
delete unwrap(fb);
}
const char *GPU_framebuffer_get_name(GPUFrameBuffer *gpu_fb)
const char *GPU_framebuffer_get_name(GPUFrameBuffer *fb)
{
return unwrap(gpu_fb)->name_get();
return unwrap(fb)->name_get();
}
/* ---------- Binding ----------- */
void GPU_framebuffer_bind(GPUFrameBuffer *gpu_fb)
void GPU_framebuffer_bind(GPUFrameBuffer *fb)
{
const bool enable_srgb = true;
/* Disable custom loadstore and bind. */
unwrap(gpu_fb)->set_use_explicit_loadstore(false);
unwrap(gpu_fb)->bind(enable_srgb);
unwrap(fb)->set_use_explicit_loadstore(false);
unwrap(fb)->bind(enable_srgb);
}
void GPU_framebuffer_bind_loadstore(GPUFrameBuffer *gpu_fb,
void GPU_framebuffer_bind_loadstore(GPUFrameBuffer *fb,
const GPULoadStore *load_store_actions,
uint actions_len)
{
const bool enable_srgb = true;
/* Bind with explicit loadstore state */
unwrap(gpu_fb)->set_use_explicit_loadstore(true);
unwrap(gpu_fb)->bind(enable_srgb);
unwrap(fb)->set_use_explicit_loadstore(true);
unwrap(fb)->bind(enable_srgb);
/* Update load store */
FrameBuffer *fb = unwrap(gpu_fb);
fb->load_store_config_array(load_store_actions, actions_len);
unwrap(fb)->load_store_config_array(load_store_actions, actions_len);
}
void GPU_framebuffer_subpass_transition_array(GPUFrameBuffer *gpu_fb,
void GPU_framebuffer_subpass_transition_array(GPUFrameBuffer *fb,
const GPUAttachmentState *attachment_states,
uint attachment_len)
{
unwrap(gpu_fb)->subpass_transition(
unwrap(fb)->subpass_transition(
attachment_states[0], Span<GPUAttachmentState>(attachment_states + 1, attachment_len - 1));
}
void GPU_framebuffer_bind_no_srgb(GPUFrameBuffer *gpu_fb)
void GPU_framebuffer_bind_no_srgb(GPUFrameBuffer *fb)
{
const bool enable_srgb = false;
unwrap(gpu_fb)->bind(enable_srgb);
unwrap(fb)->bind(enable_srgb);
}
void GPU_backbuffer_bind(eGPUBackBuffer buffer)
void GPU_backbuffer_bind(eGPUBackBuffer back_buffer_type)
{
Context *ctx = Context::get();
if (buffer == GPU_BACKBUFFER_LEFT) {
if (back_buffer_type == GPU_BACKBUFFER_LEFT) {
ctx->back_left->bind(false);
}
else {
@@ -510,12 +509,12 @@ void GPU_framebuffer_clear_color_depth_stencil(GPUFrameBuffer *fb,
fb, GPU_COLOR_BIT | GPU_DEPTH_BIT | GPU_STENCIL_BIT, clear_col, clear_depth, clear_stencil);
}
void GPU_framebuffer_multi_clear(GPUFrameBuffer *gpu_fb, const float (*clear_cols)[4])
void GPU_framebuffer_multi_clear(GPUFrameBuffer *fb, const float (*clear_colors)[4])
{
BLI_assert_msg(unwrap(gpu_fb)->get_use_explicit_loadstore() == false,
BLI_assert_msg(unwrap(fb)->get_use_explicit_loadstore() == false,
"Using GPU_framebuffer_clear_* functions in conjunction with custom load-store "
"state via GPU_framebuffer_bind_ex is invalid.");
unwrap(gpu_fb)->clear_multi(clear_cols);
unwrap(fb)->clear_multi(clear_colors);
}
void GPU_clear_color(float red, float green, float blue, float alpha)
@@ -537,13 +536,13 @@ void GPU_clear_depth(float depth)
}
void GPU_framebuffer_read_depth(
GPUFrameBuffer *gpu_fb, int x, int y, int w, int h, eGPUDataFormat format, void *data)
GPUFrameBuffer *fb, int x, int y, int w, int h, eGPUDataFormat format, void *data)
{
int rect[4] = {x, y, w, h};
unwrap(gpu_fb)->read(GPU_DEPTH_BIT, format, rect, 1, 1, data);
unwrap(fb)->read(GPU_DEPTH_BIT, format, rect, 1, 1, data);
}
void GPU_framebuffer_read_color(GPUFrameBuffer *gpu_fb,
void GPU_framebuffer_read_color(GPUFrameBuffer *fb,
int x,
int y,
int w,
@@ -554,7 +553,7 @@ void GPU_framebuffer_read_color(GPUFrameBuffer *gpu_fb,
void *data)
{
int rect[4] = {x, y, w, h};
unwrap(gpu_fb)->read(GPU_COLOR_BIT, format, rect, channels, slot, data);
unwrap(fb)->read(GPU_COLOR_BIT, format, rect, channels, slot, data);
}
void GPU_frontbuffer_read_color(
@@ -565,14 +564,14 @@ void GPU_frontbuffer_read_color(
}
/* TODO(fclem): port as texture operation. */
void GPU_framebuffer_blit(GPUFrameBuffer *gpufb_read,
void GPU_framebuffer_blit(GPUFrameBuffer *gpu_fb_read,
int read_slot,
GPUFrameBuffer *gpufb_write,
GPUFrameBuffer *gpu_fb_write,
int write_slot,
eGPUFrameBufferBits blit_buffers)
{
FrameBuffer *fb_read = unwrap(gpufb_read);
FrameBuffer *fb_write = unwrap(gpufb_write);
FrameBuffer *fb_read = unwrap(gpu_fb_read);
FrameBuffer *fb_write = unwrap(gpu_fb_write);
BLI_assert(blit_buffers != 0);
FrameBuffer *prev_fb = Context::get()->active_fb;
@@ -605,24 +604,24 @@ void GPU_framebuffer_blit(GPUFrameBuffer *gpufb_read,
prev_fb->bind(true);
}
void GPU_framebuffer_recursive_downsample(GPUFrameBuffer *gpu_fb,
int max_lvl,
void (*callback)(void *user_data, int level),
void GPU_framebuffer_recursive_downsample(GPUFrameBuffer *fb,
int max_level,
void (*per_level_callback)(void *user_data, int level),
void *user_data)
{
unwrap(gpu_fb)->recursive_downsample(max_lvl, callback, user_data);
unwrap(fb)->recursive_downsample(max_level, per_level_callback, user_data);
}
#ifndef GPU_NO_USE_PY_REFERENCES
void **GPU_framebuffer_py_reference_get(GPUFrameBuffer *gpu_fb)
void **GPU_framebuffer_py_reference_get(GPUFrameBuffer *fb)
{
return unwrap(gpu_fb)->py_ref;
return unwrap(fb)->py_ref;
}
void GPU_framebuffer_py_reference_set(GPUFrameBuffer *gpu_fb, void **py_ref)
void GPU_framebuffer_py_reference_set(GPUFrameBuffer *fb, void **py_ref)
{
BLI_assert(py_ref == nullptr || unwrap(gpu_fb)->py_ref == nullptr);
unwrap(gpu_fb)->py_ref = py_ref;
BLI_assert(py_ref == nullptr || unwrap(fb)->py_ref == nullptr);
unwrap(fb)->py_ref = py_ref;
}
#endif
@@ -671,9 +670,9 @@ uint GPU_framebuffer_stack_level_get()
* Might be bound to multiple contexts.
* \{ */
#define MAX_CTX_FB_LEN 3
struct GPUOffScreen {
constexpr static int MAX_CTX_FB_LEN = 3;
struct {
Context *ctx;
GPUFrameBuffer *fb;
@@ -726,7 +725,7 @@ static GPUFrameBuffer *gpu_offscreen_fb_get(GPUOffScreen *ofs)
GPUOffScreen *GPU_offscreen_create(int width,
int height,
bool depth,
bool with_depth_buffer,
eGPUTextureFormat format,
eGPUTextureUsage usage,
char err_out[256])
@@ -743,14 +742,14 @@ GPUOffScreen *GPU_offscreen_create(int width,
ofs->color = GPU_texture_create_2d("ofs_color", width, height, 1, format, usage, nullptr);
if (depth) {
if (with_depth_buffer) {
/* Format view flag is needed by Workbench Volumes to read the stencil view. */
eGPUTextureUsage depth_usage = usage | GPU_TEXTURE_USAGE_FORMAT_VIEW;
ofs->depth = GPU_texture_create_2d(
"ofs_depth", width, height, 1, GPU_DEPTH24_STENCIL8, depth_usage, nullptr);
}
if ((depth && !ofs->depth) || !ofs->color) {
if ((with_depth_buffer && !ofs->depth) || !ofs->color) {
const char error[] = "GPUTexture: Texture allocation failed.";
if (err_out) {
BLI_strncpy(err_out, error, 256);
@@ -773,33 +772,33 @@ GPUOffScreen *GPU_offscreen_create(int width,
return ofs;
}
void GPU_offscreen_free(GPUOffScreen *ofs)
void GPU_offscreen_free(GPUOffScreen *offscreen)
{
for (auto &framebuffer : ofs->framebuffers) {
for (auto &framebuffer : offscreen->framebuffers) {
if (framebuffer.fb) {
GPU_framebuffer_free(framebuffer.fb);
}
}
if (ofs->color) {
GPU_texture_free(ofs->color);
if (offscreen->color) {
GPU_texture_free(offscreen->color);
}
if (ofs->depth) {
GPU_texture_free(ofs->depth);
if (offscreen->depth) {
GPU_texture_free(offscreen->depth);
}
MEM_freeN(ofs);
MEM_freeN(offscreen);
}
void GPU_offscreen_bind(GPUOffScreen *ofs, bool save)
void GPU_offscreen_bind(GPUOffScreen *offscreen, bool save)
{
if (save) {
GPUFrameBuffer *fb = GPU_framebuffer_active_get();
GPU_framebuffer_push(fb);
}
unwrap(gpu_offscreen_fb_get(ofs))->bind(false);
unwrap(gpu_offscreen_fb_get(offscreen))->bind(false);
}
void GPU_offscreen_unbind(GPUOffScreen * /*ofs*/, bool restore)
void GPU_offscreen_unbind(GPUOffScreen * /*offscreen*/, bool restore)
{
GPUFrameBuffer *fb = nullptr;
if (restore) {
@@ -814,48 +813,48 @@ void GPU_offscreen_unbind(GPUOffScreen * /*ofs*/, bool restore)
}
}
void GPU_offscreen_draw_to_screen(GPUOffScreen *ofs, int x, int y)
void GPU_offscreen_draw_to_screen(GPUOffScreen *offscreen, int x, int y)
{
Context *ctx = Context::get();
FrameBuffer *ofs_fb = unwrap(gpu_offscreen_fb_get(ofs));
FrameBuffer *ofs_fb = unwrap(gpu_offscreen_fb_get(offscreen));
ofs_fb->blit_to(GPU_COLOR_BIT, 0, ctx->active_fb, 0, x, y);
}
void GPU_offscreen_read_color_region(
GPUOffScreen *ofs, eGPUDataFormat format, int x, int y, int w, int h, void *r_data)
GPUOffScreen *offscreen, eGPUDataFormat format, int x, int y, int w, int h, void *r_data)
{
BLI_assert(ELEM(format, GPU_DATA_UBYTE, GPU_DATA_FLOAT));
BLI_assert(x >= 0 && y >= 0 && w > 0 && h > 0);
BLI_assert(x + w <= GPU_texture_width(ofs->color));
BLI_assert(y + h <= GPU_texture_height(ofs->color));
BLI_assert(x + w <= GPU_texture_width(offscreen->color));
BLI_assert(y + h <= GPU_texture_height(offscreen->color));
GPUFrameBuffer *ofs_fb = gpu_offscreen_fb_get(ofs);
GPUFrameBuffer *ofs_fb = gpu_offscreen_fb_get(offscreen);
GPU_framebuffer_read_color(ofs_fb, x, y, w, h, 4, 0, format, r_data);
}
void GPU_offscreen_read_color(GPUOffScreen *ofs, eGPUDataFormat format, void *r_data)
void GPU_offscreen_read_color(GPUOffScreen *offscreen, eGPUDataFormat format, void *r_data)
{
BLI_assert(ELEM(format, GPU_DATA_UBYTE, GPU_DATA_FLOAT));
const int w = GPU_texture_width(ofs->color);
const int h = GPU_texture_height(ofs->color);
const int w = GPU_texture_width(offscreen->color);
const int h = GPU_texture_height(offscreen->color);
GPU_offscreen_read_color_region(ofs, format, 0, 0, w, h, r_data);
GPU_offscreen_read_color_region(offscreen, format, 0, 0, w, h, r_data);
}
int GPU_offscreen_width(const GPUOffScreen *ofs)
int GPU_offscreen_width(const GPUOffScreen *offscreen)
{
return GPU_texture_width(ofs->color);
return GPU_texture_width(offscreen->color);
}
int GPU_offscreen_height(const GPUOffScreen *ofs)
int GPU_offscreen_height(const GPUOffScreen *offscreen)
{
return GPU_texture_height(ofs->color);
return GPU_texture_height(offscreen->color);
}
GPUTexture *GPU_offscreen_color_texture(const GPUOffScreen *ofs)
GPUTexture *GPU_offscreen_color_texture(const GPUOffScreen *offscreen)
{
return ofs->color;
return offscreen->color;
}
eGPUTextureFormat GPU_offscreen_format(const GPUOffScreen *offscreen)
@@ -863,14 +862,14 @@ eGPUTextureFormat GPU_offscreen_format(const GPUOffScreen *offscreen)
return GPU_texture_format(offscreen->color);
}
void GPU_offscreen_viewport_data_get(GPUOffScreen *ofs,
void GPU_offscreen_viewport_data_get(GPUOffScreen *offscreen,
GPUFrameBuffer **r_fb,
GPUTexture **r_color,
GPUTexture **r_depth)
{
*r_fb = gpu_offscreen_fb_get(ofs);
*r_color = ofs->color;
*r_depth = ofs->depth;
*r_fb = gpu_offscreen_fb_get(offscreen);
*r_color = offscreen->color;
*r_depth = offscreen->depth;
}
/** \} */

View File

@@ -61,8 +61,7 @@ inline GPUAttachmentType &operator--(GPUAttachmentType &a)
return a;
}
namespace blender {
namespace gpu {
namespace blender::gpu {
#ifndef NDEBUG
# define DEBUG_NAME_LEN 64
@@ -89,8 +88,8 @@ class FrameBuffer {
/* Flag specifying the current bind operation should use explicit load-store state. */
bool use_explicit_load_store_ = false;
#ifndef GPU_NO_USE_PY_REFERENCES
public:
#ifndef GPU_NO_USE_PY_REFERENCES
/**
* Reference of a pointer that needs to be cleaned when deallocating the frame-buffer.
* Points to #BPyGPUFrameBuffer.fb
@@ -98,7 +97,6 @@ class FrameBuffer {
void **py_ref = nullptr;
#endif
public:
FrameBuffer(const char *name);
virtual ~FrameBuffer();
@@ -238,7 +236,7 @@ class FrameBuffer {
return attachments_[GPU_FB_COLOR_ATTACHMENT0 + slot].tex;
};
inline const char *const name_get() const
inline const char *name_get() const
{
return name_;
};
@@ -270,5 +268,4 @@ static inline const FrameBuffer *unwrap(const GPUFrameBuffer *vert)
#undef DEBUG_NAME_LEN
} // namespace gpu
} // namespace blender
} // namespace blender::gpu

View File

@@ -51,9 +51,8 @@ class Immediate {
/** Uniform color: Kept here to update the wide-line shader just before #immBegin. */
float uniform_color[4];
public:
Immediate(){};
virtual ~Immediate(){};
Immediate() = default;
virtual ~Immediate() = default;
virtual uchar *begin() = 0;
virtual void end() = 0;

View File

@@ -674,7 +674,7 @@ void imm_draw_cylinder_fill_3d(
}
/* Circle Drawing - Tables for Optimized Drawing Speed */
#define CIRCLE_RESOL 32
constexpr static int CIRCLE_RESOL = 32;
static void circball_array_fill(const float verts[CIRCLE_RESOL][3],
const float cent[3],

View File

@@ -76,9 +76,7 @@ void GPU_indexbuf_init(GPUIndexBufBuilder *builder,
uint vertex_len)
{
int verts_per_prim = GPU_indexbuf_primitive_len(prim_type);
#if TRUST_NO_ONE
assert(verts_per_prim != -1);
#endif
BLI_assert(verts_per_prim != -1);
GPU_indexbuf_init_ex(builder, prim_type, prim_len * uint(verts_per_prim), vertex_len);
}
@@ -110,11 +108,9 @@ void GPU_indexbuf_join(GPUIndexBufBuilder *builder_to, const GPUIndexBufBuilder
void GPU_indexbuf_add_generic_vert(GPUIndexBufBuilder *builder, uint v)
{
#if TRUST_NO_ONE
assert(builder->data != nullptr);
assert(builder->index_len < builder->max_index_len);
assert(v <= builder->max_allowed_index);
#endif
BLI_assert(builder->data != nullptr);
BLI_assert(builder->index_len < builder->max_index_len);
BLI_assert(v <= builder->max_allowed_index);
builder->data[builder->index_len++] = v;
builder->index_min = std::min(builder->index_min, v);
builder->index_max = std::max(builder->index_max, v);
@@ -122,38 +118,30 @@ void GPU_indexbuf_add_generic_vert(GPUIndexBufBuilder *builder, uint v)
void GPU_indexbuf_add_primitive_restart(GPUIndexBufBuilder *builder)
{
#if TRUST_NO_ONE
assert(builder->data != nullptr);
assert(builder->index_len < builder->max_index_len);
#endif
BLI_assert(builder->data != nullptr);
BLI_assert(builder->index_len < builder->max_index_len);
builder->data[builder->index_len++] = builder->restart_index_value;
builder->uses_restart_indices = true;
}
void GPU_indexbuf_add_point_vert(GPUIndexBufBuilder *builder, uint v)
{
#if TRUST_NO_ONE
assert(builder->prim_type == GPU_PRIM_POINTS);
#endif
BLI_assert(builder->prim_type == GPU_PRIM_POINTS);
GPU_indexbuf_add_generic_vert(builder, v);
}
void GPU_indexbuf_add_line_verts(GPUIndexBufBuilder *builder, uint v1, uint v2)
{
#if TRUST_NO_ONE
assert(builder->prim_type == GPU_PRIM_LINES);
assert(v1 != v2);
#endif
BLI_assert(builder->prim_type == GPU_PRIM_LINES);
BLI_assert(v1 != v2);
GPU_indexbuf_add_generic_vert(builder, v1);
GPU_indexbuf_add_generic_vert(builder, v2);
}
void GPU_indexbuf_add_tri_verts(GPUIndexBufBuilder *builder, uint v1, uint v2, uint v3)
{
#if TRUST_NO_ONE
assert(builder->prim_type == GPU_PRIM_TRIS);
assert(v1 != v2 && v2 != v3 && v3 != v1);
#endif
BLI_assert(builder->prim_type == GPU_PRIM_TRIS);
BLI_assert(v1 != v2 && v2 != v3 && v3 != v1);
GPU_indexbuf_add_generic_vert(builder, v1);
GPU_indexbuf_add_generic_vert(builder, v2);
GPU_indexbuf_add_generic_vert(builder, v3);
@@ -162,10 +150,8 @@ void GPU_indexbuf_add_tri_verts(GPUIndexBufBuilder *builder, uint v1, uint v2, u
void GPU_indexbuf_add_line_adj_verts(
GPUIndexBufBuilder *builder, uint v1, uint v2, uint v3, uint v4)
{
#if TRUST_NO_ONE
assert(builder->prim_type == GPU_PRIM_LINES_ADJ);
assert(v2 != v3); /* only the line need diff indices */
#endif
BLI_assert(builder->prim_type == GPU_PRIM_LINES_ADJ);
BLI_assert(v2 != v3); /* only the line need diff indices */
GPU_indexbuf_add_generic_vert(builder, v1);
GPU_indexbuf_add_generic_vert(builder, v2);
GPU_indexbuf_add_generic_vert(builder, v3);

View File

@@ -22,7 +22,7 @@
using namespace blender::gpu;
#define MATRIX_STACK_DEPTH 32
constexpr static int MATRIX_STACK_DEPTH = 32;
using Mat4 = float[4][4];
using Mat3 = float[3][3];

View File

@@ -30,7 +30,6 @@ class GPUPlatformGlobal {
GPUArchitectureType architecture_type = GPU_ARCHITECTURE_IMR;
Vector<GPUDevice> devices;
public:
void init(eGPUDeviceType gpu_device,
eGPUOSType os_type,
eGPUDriverType driver_type,

View File

@@ -20,7 +20,7 @@ enum GPUQueryType {
class QueryPool {
public:
virtual ~QueryPool(){};
virtual ~QueryPool() = default;
/**
* Will start and end the query at this index inside the pool. The pool will resize

View File

@@ -12,8 +12,6 @@
#include "BLI_rect.h"
#include "BLI_span.hh"
#include "GPU_select.hh"
#include "gpu_select_private.hh"
struct GPUSelectNextState {

View File

@@ -52,7 +52,7 @@ struct SubRectStride {
};
/** We may want to change back to float if `uint` isn't well supported. */
typedef uint depth_t;
using depth_t = uint;
/**
* Calculate values needed for looping over a sub-region (smaller buffer within a larger buffer).

View File

@@ -10,6 +10,8 @@
#pragma once
#include "GPU_select.hh"
/* gpu_select_pick */
void gpu_select_pick_begin(GPUSelectBuffer *buffer, const rcti *input, eGPUSelectMode mode);
@@ -41,6 +43,6 @@ uint gpu_select_next_end();
/* Return a single offset since picking uses squared viewport. */
int gpu_select_next_get_pick_area_center();
eGPUSelectMode gpu_select_next_get_mode();
void gpu_select_next_set_result(GPUSelectResult *buffer, uint buffer_len);
void gpu_select_next_set_result(GPUSelectResult *hit_buf, uint hit_len);
#define SELECT_ID_NONE ((uint)0xffffffff)

View File

@@ -77,7 +77,6 @@ class ShaderInterface {
*/
uint8_t attr_types_[GPU_VERT_ATTR_MAX_LEN];
public:
ShaderInterface();
virtual ~ShaderInterface();

View File

@@ -20,8 +20,7 @@
#include <mutex>
#include <string>
namespace blender {
namespace gpu {
namespace blender::gpu {
class GPULogParser;
class Context;
@@ -123,7 +122,7 @@ class Shader {
virtual bool get_uses_ssbo_vertex_fetch() const = 0;
virtual int get_ssbo_vertex_fetch_output_num_verts() const = 0;
inline const char *const name_get() const
inline const char *name_get() const
{
return name;
}
@@ -174,7 +173,7 @@ class ShaderCompiler {
};
public:
virtual ~ShaderCompiler(){};
virtual ~ShaderCompiler() = default;
Shader *compile(const shader::ShaderCreateInfo &info, bool is_batch_compilation);
@@ -209,11 +208,11 @@ class ShaderCompilerGeneric : public ShaderCompiler {
Map<BatchHandle, Batch> batches;
public:
virtual ~ShaderCompilerGeneric() override;
~ShaderCompilerGeneric() override;
virtual BatchHandle batch_compile(Span<const shader::ShaderCreateInfo *> &infos) override;
virtual bool batch_is_ready(BatchHandle handle) override;
virtual Vector<Shader *> batch_finalize(BatchHandle &handle) override;
BatchHandle batch_compile(Span<const shader::ShaderCreateInfo *> &infos) override;
bool batch_is_ready(BatchHandle handle) override;
Vector<Shader *> batch_finalize(BatchHandle &handle) override;
};
enum class Severity {
@@ -259,8 +258,7 @@ class GPULogParser {
void printf_begin(Context *ctx);
void printf_end(Context *ctx);
} // namespace gpu
} // namespace blender
} // namespace blender::gpu
/* XXX do not use it. Special hack to use OCIO with batch API. */
GPUShader *immGetShader();

View File

@@ -16,8 +16,7 @@
#include <cstring>
namespace blender {
namespace gpu {
namespace blender::gpu {
/* Encapsulate all pipeline state that we need to track.
* Try to keep small to reduce validation time. */
@@ -105,7 +104,7 @@ BLI_STATIC_ASSERT(sizeof(GPUStateMutable) == sizeof(GPUStateMutable::data),
inline bool operator==(const GPUStateMutable &a, const GPUStateMutable &b)
{
return memcmp(&a, &b, sizeof(GPUStateMutable)) == 0;
return a.data[0] == b.data[0] && a.data[1] == b.data[1] && a.data[2] == b.data[2];
}
inline bool operator!=(const GPUStateMutable &a, const GPUStateMutable &b)
@@ -141,9 +140,8 @@ class StateManager {
GPUStateMutable mutable_state;
bool use_bgl = false;
public:
StateManager();
virtual ~StateManager(){};
virtual ~StateManager() = default;
virtual void apply_state() = 0;
virtual void force_state() = 0;
@@ -169,8 +167,8 @@ class Fence {
bool signalled_ = false;
public:
Fence(){};
virtual ~Fence(){};
Fence() = default;
virtual ~Fence() = default;
virtual void signal() = 0;
virtual void wait() = 0;
@@ -190,5 +188,4 @@ static inline const Fence *unwrap(const GPUFence *pixbuf)
return reinterpret_cast<const Fence *>(pixbuf);
}
} // namespace gpu
} // namespace blender
} // namespace blender::gpu

View File

@@ -15,10 +15,9 @@
#include "gpu_backend.hh"
#include "GPU_material.hh"
#include "GPU_storage_buffer.hh"
#include "GPU_vertex_buffer.hh" /* For GPUUsageType. */
#include "GPU_storage_buffer.hh"
#include "GPU_vertex_buffer.hh"
#include "gpu_context_private.hh"
#include "gpu_storage_buffer_private.hh"

View File

@@ -13,8 +13,7 @@
struct GPUStorageBuf;
namespace blender {
namespace gpu {
namespace blender::gpu {
class VertBuf;
@@ -67,5 +66,4 @@ static inline const StorageBuf *unwrap(const GPUStorageBuf *storage_buf)
#undef DEBUG_NAME_LEN
} // namespace gpu
} // namespace blender
} // namespace blender::gpu

View File

@@ -260,9 +260,9 @@ static inline GPUTexture *gpu_texture_create(const char *name,
const eGPUTextureType type,
int mip_len,
eGPUTextureFormat tex_format,
eGPUDataFormat data_format,
eGPUTextureUsage usage,
const void *pixels)
const void *pixels,
eGPUDataFormat data_format = GPU_DATA_FLOAT)
{
BLI_assert(mip_len > 0);
Texture *tex = GPUBackend::get()->texture_alloc(name);
@@ -300,100 +300,97 @@ static inline GPUTexture *gpu_texture_create(const char *name,
}
GPUTexture *GPU_texture_create_1d(const char *name,
int w,
int width,
int mip_len,
eGPUTextureFormat format,
eGPUTextureUsage usage,
const float *data)
{
return gpu_texture_create(
name, w, 0, 0, GPU_TEXTURE_1D, mip_len, format, GPU_DATA_FLOAT, usage, data);
return gpu_texture_create(name, width, 0, 0, GPU_TEXTURE_1D, mip_len, format, usage, data);
}
GPUTexture *GPU_texture_create_1d_array(const char *name,
int w,
int h,
int width,
int layer_len,
int mip_len,
eGPUTextureFormat format,
eGPUTextureUsage usage,
const float *data)
{
return gpu_texture_create(
name, w, h, 0, GPU_TEXTURE_1D_ARRAY, mip_len, format, GPU_DATA_FLOAT, usage, data);
name, width, layer_len, 0, GPU_TEXTURE_1D_ARRAY, mip_len, format, usage, data);
}
GPUTexture *GPU_texture_create_2d(const char *name,
int w,
int h,
int width,
int height,
int mip_len,
eGPUTextureFormat format,
eGPUTextureUsage usage,
const float *data)
{
return gpu_texture_create(
name, w, h, 0, GPU_TEXTURE_2D, mip_len, format, GPU_DATA_FLOAT, usage, data);
return gpu_texture_create(name, width, height, 0, GPU_TEXTURE_2D, mip_len, format, usage, data);
}
GPUTexture *GPU_texture_create_2d_array(const char *name,
int w,
int h,
int d,
int width,
int height,
int layer_len,
int mip_len,
eGPUTextureFormat format,
eGPUTextureUsage usage,
const float *data)
{
return gpu_texture_create(
name, w, h, d, GPU_TEXTURE_2D_ARRAY, mip_len, format, GPU_DATA_FLOAT, usage, data);
name, width, height, layer_len, GPU_TEXTURE_2D_ARRAY, mip_len, format, usage, data);
}
GPUTexture *GPU_texture_create_3d(const char *name,
int w,
int h,
int d,
int width,
int height,
int depth,
int mip_len,
eGPUTextureFormat texture_format,
eGPUTextureUsage usage,
const void *data)
{
return gpu_texture_create(
name, w, h, d, GPU_TEXTURE_3D, mip_len, texture_format, GPU_DATA_FLOAT, usage, data);
name, width, height, depth, GPU_TEXTURE_3D, mip_len, texture_format, usage, data);
}
GPUTexture *GPU_texture_create_cube(const char *name,
int w,
int width,
int mip_len,
eGPUTextureFormat format,
eGPUTextureUsage usage,
const float *data)
{
return gpu_texture_create(
name, w, w, 0, GPU_TEXTURE_CUBE, mip_len, format, GPU_DATA_FLOAT, usage, data);
return gpu_texture_create(name, width, width, 0, GPU_TEXTURE_CUBE, mip_len, format, usage, data);
}
GPUTexture *GPU_texture_create_cube_array(const char *name,
int w,
int d,
int width,
int layer_len,
int mip_len,
eGPUTextureFormat format,
eGPUTextureUsage usage,
const float *data)
{
return gpu_texture_create(
name, w, w, d, GPU_TEXTURE_CUBE_ARRAY, mip_len, format, GPU_DATA_FLOAT, usage, data);
name, width, width, layer_len, GPU_TEXTURE_CUBE_ARRAY, mip_len, format, usage, data);
}
GPUTexture *GPU_texture_create_compressed_2d(const char *name,
int w,
int h,
int miplen,
int width,
int height,
int mip_len,
eGPUTextureFormat tex_format,
eGPUTextureUsage usage,
const void *data)
{
Texture *tex = GPUBackend::get()->texture_alloc(name);
tex->usage_set(usage);
bool success = tex->init_2D(w, h, 0, miplen, tex_format);
bool success = tex->init_2D(width, height, 0, mip_len, tex_format);
if (!success) {
delete tex;
@@ -401,7 +398,7 @@ GPUTexture *GPU_texture_create_compressed_2d(const char *name,
}
if (data) {
size_t ofs = 0;
for (int mip = 0; mip < miplen; mip++) {
for (int mip = 0; mip < mip_len; mip++) {
int extent[3], offset[3] = {0, 0, 0};
tex->mip_size_get(mip, extent);
@@ -445,21 +442,13 @@ GPUTexture *GPU_texture_create_error(int dimension, bool is_array)
type = (dimension == 2) ? (is_array ? GPU_TEXTURE_2D_ARRAY : GPU_TEXTURE_2D) : type;
type = (dimension == 1) ? (is_array ? GPU_TEXTURE_1D_ARRAY : GPU_TEXTURE_1D) : type;
return gpu_texture_create("invalid_tex",
w,
h,
d,
type,
1,
GPU_RGBA8,
GPU_DATA_FLOAT,
GPU_TEXTURE_USAGE_GENERAL,
pixel);
return gpu_texture_create(
"invalid_tex", w, h, d, type, 1, GPU_RGBA8, GPU_TEXTURE_USAGE_GENERAL, pixel);
}
GPUTexture *GPU_texture_create_view(const char *name,
GPUTexture *src,
eGPUTextureFormat format,
GPUTexture *source_texture,
eGPUTextureFormat view_format,
int mip_start,
int mip_len,
int layer_start,
@@ -469,17 +458,18 @@ GPUTexture *GPU_texture_create_view(const char *name,
{
BLI_assert(mip_len > 0);
BLI_assert(layer_len > 0);
BLI_assert_msg(use_stencil == false || (GPU_texture_usage(src) & GPU_TEXTURE_USAGE_FORMAT_VIEW),
BLI_assert_msg(use_stencil == false ||
(GPU_texture_usage(source_texture) & GPU_TEXTURE_USAGE_FORMAT_VIEW),
"Source texture of TextureView must have GPU_TEXTURE_USAGE_FORMAT_VIEW usage "
"flag if view texture uses stencil texturing.");
BLI_assert_msg((format == GPU_texture_format(src)) ||
(GPU_texture_usage(src) & GPU_TEXTURE_USAGE_FORMAT_VIEW),
BLI_assert_msg((view_format == GPU_texture_format(source_texture)) ||
(GPU_texture_usage(source_texture) & GPU_TEXTURE_USAGE_FORMAT_VIEW),
"Source texture of TextureView must have GPU_TEXTURE_USAGE_FORMAT_VIEW usage "
"flag if view texture format is different.");
Texture *view = GPUBackend::get()->texture_alloc(name);
view->init_view(src,
format,
unwrap(src)->type_get(),
view->init_view(source_texture,
view_format,
unwrap(source_texture)->type_get(),
mip_start,
mip_len,
layer_start,
@@ -498,15 +488,14 @@ eGPUTextureUsage GPU_texture_usage(const GPUTexture *texture_)
/* ------ Update ------ */
void GPU_texture_update_mipmap(GPUTexture *tex_,
int miplvl,
void GPU_texture_update_mipmap(GPUTexture *texture,
int mip_level,
eGPUDataFormat data_format,
const void *pixels)
{
Texture *tex = reinterpret_cast<Texture *>(tex_);
int extent[3] = {1, 1, 1}, offset[3] = {0, 0, 0};
tex->mip_size_get(miplvl, extent);
reinterpret_cast<Texture *>(tex)->update_sub(miplvl, offset, extent, data_format, pixels);
unwrap(texture)->mip_size_get(mip_level, extent);
unwrap(texture)->update_sub(mip_level, offset, extent, data_format, pixels);
}
void GPU_texture_update_sub(GPUTexture *tex,
@@ -521,12 +510,12 @@ void GPU_texture_update_sub(GPUTexture *tex,
{
int offset[3] = {offset_x, offset_y, offset_z};
int extent[3] = {width, height, depth};
reinterpret_cast<Texture *>(tex)->update_sub(0, offset, extent, data_format, pixels);
unwrap(tex)->update_sub(0, offset, extent, data_format, pixels);
}
void GPU_texture_update_sub_from_pixel_buffer(GPUTexture *tex,
void GPU_texture_update_sub_from_pixel_buffer(GPUTexture *texture,
eGPUDataFormat data_format,
GPUPixelBuffer *pix_buf,
GPUPixelBuffer *pixel_buf,
int offset_x,
int offset_y,
int offset_z,
@@ -536,28 +525,27 @@ void GPU_texture_update_sub_from_pixel_buffer(GPUTexture *tex,
{
int offset[3] = {offset_x, offset_y, offset_z};
int extent[3] = {width, height, depth};
reinterpret_cast<Texture *>(tex)->update_sub(offset, extent, data_format, pix_buf);
unwrap(texture)->update_sub(offset, extent, data_format, pixel_buf);
}
void *GPU_texture_read(GPUTexture *tex_, eGPUDataFormat data_format, int miplvl)
void *GPU_texture_read(GPUTexture *texture, eGPUDataFormat data_format, int mip_level)
{
Texture *tex = reinterpret_cast<Texture *>(tex_);
BLI_assert_msg(
GPU_texture_usage(tex_) & GPU_TEXTURE_USAGE_HOST_READ,
GPU_texture_usage(texture) & GPU_TEXTURE_USAGE_HOST_READ,
"The host-read usage flag must be specified up-front. Only textures which require data "
"reads should be flagged, allowing the backend to make certain optimisations.");
return tex->read(miplvl, data_format);
"reads should be flagged, allowing the backend to make certain optimizations.");
return unwrap(texture)->read(mip_level, data_format);
}
void GPU_texture_clear(GPUTexture *tex, eGPUDataFormat data_format, const void *data)
{
BLI_assert(data != nullptr); /* Do not accept nullptr as parameter. */
reinterpret_cast<Texture *>(tex)->clear(data_format, data);
unwrap(tex)->clear(data_format, data);
}
void GPU_texture_update(GPUTexture *tex, eGPUDataFormat data_format, const void *data)
{
reinterpret_cast<Texture *>(tex)->update(data_format, data);
unwrap(tex)->update(data_format, data);
}
void GPU_unpack_row_length_set(uint len)
@@ -567,22 +555,22 @@ void GPU_unpack_row_length_set(uint len)
/* ------ Binding ------ */
void GPU_texture_bind_ex(GPUTexture *tex_, GPUSamplerState state, int unit)
void GPU_texture_bind_ex(GPUTexture *texture, GPUSamplerState state, int unit)
{
Texture *tex = reinterpret_cast<Texture *>(tex_);
Texture *tex = unwrap(texture);
state = (state.type == GPU_SAMPLER_STATE_TYPE_INTERNAL) ? tex->sampler_state : state;
Context::get()->state_manager->texture_bind(tex, state, unit);
}
void GPU_texture_bind(GPUTexture *tex_, int unit)
void GPU_texture_bind(GPUTexture *texture, int unit)
{
Texture *tex = reinterpret_cast<Texture *>(tex_);
Texture *tex = unwrap(texture);
Context::get()->state_manager->texture_bind(tex, tex->sampler_state, unit);
}
void GPU_texture_unbind(GPUTexture *tex_)
void GPU_texture_unbind(GPUTexture *texture)
{
Texture *tex = reinterpret_cast<Texture *>(tex_);
Texture *tex = unwrap(texture);
Context::get()->state_manager->texture_unbind(tex);
}
@@ -608,19 +596,19 @@ void GPU_texture_image_unbind_all()
void GPU_texture_update_mipmap_chain(GPUTexture *tex)
{
reinterpret_cast<Texture *>(tex)->generate_mipmap();
unwrap(tex)->generate_mipmap();
}
void GPU_texture_copy(GPUTexture *dst_, GPUTexture *src_)
{
Texture *src = reinterpret_cast<Texture *>(src_);
Texture *dst = reinterpret_cast<Texture *>(dst_);
Texture *src = unwrap(src_);
Texture *dst = unwrap(dst_);
src->copy_to(dst);
}
void GPU_texture_compare_mode(GPUTexture *tex_, bool use_compare)
void GPU_texture_compare_mode(GPUTexture *texture, bool use_compare)
{
Texture *tex = reinterpret_cast<Texture *>(tex_);
Texture *tex = unwrap(texture);
/* Only depth formats does support compare mode. */
BLI_assert(!(use_compare) || (tex->format_flag_get() & GPU_FORMAT_DEPTH));
@@ -629,18 +617,18 @@ void GPU_texture_compare_mode(GPUTexture *tex_, bool use_compare)
tex->sampler_state.custom_type = GPU_SAMPLER_CUSTOM_COMPARE;
}
void GPU_texture_filter_mode(GPUTexture *tex_, bool use_filter)
void GPU_texture_filter_mode(GPUTexture *texture, bool use_filter)
{
Texture *tex = reinterpret_cast<Texture *>(tex_);
Texture *tex = unwrap(texture);
/* Stencil and integer format does not support filtering. */
BLI_assert(!(use_filter) ||
!(tex->format_flag_get() & (GPU_FORMAT_STENCIL | GPU_FORMAT_INTEGER)));
tex->sampler_state.set_filtering_flag_from_test(GPU_SAMPLER_FILTERING_LINEAR, use_filter);
}
void GPU_texture_mipmap_mode(GPUTexture *tex_, bool use_mipmap, bool use_filter)
void GPU_texture_mipmap_mode(GPUTexture *texture, bool use_mipmap, bool use_filter)
{
Texture *tex = reinterpret_cast<Texture *>(tex_);
Texture *tex = unwrap(texture);
/* Stencil and integer format does not support filtering. */
BLI_assert(!(use_filter || use_mipmap) ||
!(tex->format_flag_get() & (GPU_FORMAT_STENCIL | GPU_FORMAT_INTEGER)));
@@ -648,42 +636,39 @@ void GPU_texture_mipmap_mode(GPUTexture *tex_, bool use_mipmap, bool use_filter)
tex->sampler_state.set_filtering_flag_from_test(GPU_SAMPLER_FILTERING_LINEAR, use_filter);
}
void GPU_texture_anisotropic_filter(GPUTexture *tex_, bool use_aniso)
void GPU_texture_anisotropic_filter(GPUTexture *texture, bool use_aniso)
{
Texture *tex = reinterpret_cast<Texture *>(tex_);
Texture *tex = unwrap(texture);
/* Stencil and integer format does not support filtering. */
BLI_assert(!(use_aniso) ||
!(tex->format_flag_get() & (GPU_FORMAT_STENCIL | GPU_FORMAT_INTEGER)));
tex->sampler_state.set_filtering_flag_from_test(GPU_SAMPLER_FILTERING_ANISOTROPIC, use_aniso);
}
void GPU_texture_extend_mode_x(GPUTexture *tex_, GPUSamplerExtendMode extend_mode)
void GPU_texture_extend_mode_x(GPUTexture *texture, GPUSamplerExtendMode extend_mode)
{
Texture *tex = reinterpret_cast<Texture *>(tex_);
tex->sampler_state.extend_x = extend_mode;
unwrap(texture)->sampler_state.extend_x = extend_mode;
}
void GPU_texture_extend_mode_y(GPUTexture *tex_, GPUSamplerExtendMode extend_mode)
void GPU_texture_extend_mode_y(GPUTexture *texture, GPUSamplerExtendMode extend_mode)
{
Texture *tex = reinterpret_cast<Texture *>(tex_);
tex->sampler_state.extend_yz = extend_mode;
unwrap(texture)->sampler_state.extend_yz = extend_mode;
}
void GPU_texture_extend_mode(GPUTexture *tex_, GPUSamplerExtendMode extend_mode)
void GPU_texture_extend_mode(GPUTexture *texture, GPUSamplerExtendMode extend_mode)
{
Texture *tex = reinterpret_cast<Texture *>(tex_);
tex->sampler_state.extend_x = extend_mode;
tex->sampler_state.extend_yz = extend_mode;
unwrap(texture)->sampler_state.extend_x = extend_mode;
unwrap(texture)->sampler_state.extend_yz = extend_mode;
}
void GPU_texture_swizzle_set(GPUTexture *tex, const char swizzle[4])
void GPU_texture_swizzle_set(GPUTexture *texture, const char swizzle[4])
{
reinterpret_cast<Texture *>(tex)->swizzle_set(swizzle);
unwrap(texture)->swizzle_set(swizzle);
}
void GPU_texture_free(GPUTexture *tex_)
void GPU_texture_free(GPUTexture *texture)
{
Texture *tex = reinterpret_cast<Texture *>(tex_);
Texture *tex = unwrap(texture);
tex->refcount--;
if (tex->refcount < 0) {
@@ -695,14 +680,14 @@ void GPU_texture_free(GPUTexture *tex_)
}
}
void GPU_texture_ref(GPUTexture *tex)
void GPU_texture_ref(GPUTexture *texture)
{
reinterpret_cast<Texture *>(tex)->refcount++;
unwrap(texture)->refcount++;
}
int GPU_texture_dimensions(const GPUTexture *tex_)
int GPU_texture_dimensions(const GPUTexture *texture)
{
eGPUTextureType type = reinterpret_cast<const Texture *>(tex_)->type_get();
eGPUTextureType type = unwrap(texture)->type_get();
if (type & GPU_TEXTURE_1D) {
return 1;
}
@@ -719,51 +704,50 @@ int GPU_texture_dimensions(const GPUTexture *tex_)
return 1;
}
int GPU_texture_width(const GPUTexture *tex)
int GPU_texture_width(const GPUTexture *texture)
{
return reinterpret_cast<const Texture *>(tex)->width_get();
return unwrap(texture)->width_get();
}
int GPU_texture_height(const GPUTexture *tex)
int GPU_texture_height(const GPUTexture *texture)
{
return reinterpret_cast<const Texture *>(tex)->height_get();
return unwrap(texture)->height_get();
}
int GPU_texture_depth(const GPUTexture *tex)
int GPU_texture_depth(const GPUTexture *texture)
{
return reinterpret_cast<const Texture *>(tex)->depth_get();
return unwrap(texture)->depth_get();
}
int GPU_texture_layer_count(const GPUTexture *tex)
int GPU_texture_layer_count(const GPUTexture *texture)
{
return reinterpret_cast<const Texture *>(tex)->layer_count();
return unwrap(texture)->layer_count();
}
int GPU_texture_mip_count(const GPUTexture *tex)
int GPU_texture_mip_count(const GPUTexture *texture)
{
return reinterpret_cast<const Texture *>(tex)->mip_count();
return unwrap(texture)->mip_count();
}
int GPU_texture_original_width(const GPUTexture *tex)
int GPU_texture_original_width(const GPUTexture *texture)
{
return reinterpret_cast<const Texture *>(tex)->src_w;
return unwrap(texture)->src_w;
}
int GPU_texture_original_height(const GPUTexture *tex)
int GPU_texture_original_height(const GPUTexture *texture)
{
return reinterpret_cast<const Texture *>(tex)->src_h;
return unwrap(texture)->src_h;
}
void GPU_texture_original_size_set(GPUTexture *tex_, int w, int h)
void GPU_texture_original_size_set(GPUTexture *texture, int w, int h)
{
Texture *tex = reinterpret_cast<Texture *>(tex_);
tex->src_w = w;
tex->src_h = h;
unwrap(texture)->src_w = w;
unwrap(texture)->src_h = h;
}
eGPUTextureFormat GPU_texture_format(const GPUTexture *tex)
eGPUTextureFormat GPU_texture_format(const GPUTexture *texture)
{
return reinterpret_cast<const Texture *>(tex)->format_get();
return unwrap(texture)->format_get();
}
const char *GPU_texture_format_name(eGPUTextureFormat texture_format)
@@ -909,69 +893,68 @@ const char *GPU_texture_format_name(eGPUTextureFormat texture_format)
return "";
}
bool GPU_texture_has_depth_format(const GPUTexture *tex)
bool GPU_texture_has_depth_format(const GPUTexture *texture)
{
return (reinterpret_cast<const Texture *>(tex)->format_flag_get() & GPU_FORMAT_DEPTH) != 0;
return (unwrap(texture)->format_flag_get() & GPU_FORMAT_DEPTH) != 0;
}
bool GPU_texture_has_stencil_format(const GPUTexture *tex)
bool GPU_texture_has_stencil_format(const GPUTexture *texture)
{
return (reinterpret_cast<const Texture *>(tex)->format_flag_get() & GPU_FORMAT_STENCIL) != 0;
return (unwrap(texture)->format_flag_get() & GPU_FORMAT_STENCIL) != 0;
}
bool GPU_texture_has_integer_format(const GPUTexture *tex)
bool GPU_texture_has_integer_format(const GPUTexture *texture)
{
return (reinterpret_cast<const Texture *>(tex)->format_flag_get() & GPU_FORMAT_INTEGER) != 0;
return (unwrap(texture)->format_flag_get() & GPU_FORMAT_INTEGER) != 0;
}
bool GPU_texture_has_float_format(const GPUTexture *tex)
bool GPU_texture_has_float_format(const GPUTexture *texture)
{
return (reinterpret_cast<const Texture *>(tex)->format_flag_get() & GPU_FORMAT_FLOAT) != 0;
return (unwrap(texture)->format_flag_get() & GPU_FORMAT_FLOAT) != 0;
}
bool GPU_texture_has_normalized_format(const GPUTexture *tex)
bool GPU_texture_has_normalized_format(const GPUTexture *texture)
{
return (reinterpret_cast<const Texture *>(tex)->format_flag_get() &
GPU_FORMAT_NORMALIZED_INTEGER) != 0;
return (unwrap(texture)->format_flag_get() & GPU_FORMAT_NORMALIZED_INTEGER) != 0;
}
bool GPU_texture_has_signed_format(const GPUTexture *tex)
bool GPU_texture_has_signed_format(const GPUTexture *texture)
{
return (reinterpret_cast<const Texture *>(tex)->format_flag_get() & GPU_FORMAT_SIGNED) != 0;
return (unwrap(texture)->format_flag_get() & GPU_FORMAT_SIGNED) != 0;
}
bool GPU_texture_is_cube(const GPUTexture *tex)
bool GPU_texture_is_cube(const GPUTexture *texture)
{
return (reinterpret_cast<const Texture *>(tex)->type_get() & GPU_TEXTURE_CUBE) != 0;
return (unwrap(texture)->type_get() & GPU_TEXTURE_CUBE) != 0;
}
bool GPU_texture_is_array(const GPUTexture *tex)
bool GPU_texture_is_array(const GPUTexture *texture)
{
return (reinterpret_cast<const Texture *>(tex)->type_get() & GPU_TEXTURE_ARRAY) != 0;
return (unwrap(texture)->type_get() & GPU_TEXTURE_ARRAY) != 0;
}
#ifndef GPU_NO_USE_PY_REFERENCES
void **GPU_texture_py_reference_get(GPUTexture *tex)
void **GPU_texture_py_reference_get(GPUTexture *texture)
{
return unwrap(tex)->py_ref;
return unwrap(texture)->py_ref;
}
void GPU_texture_py_reference_set(GPUTexture *tex, void **py_ref)
void GPU_texture_py_reference_set(GPUTexture *texture, void **py_ref)
{
BLI_assert(py_ref == nullptr || unwrap(tex)->py_ref == nullptr);
unwrap(tex)->py_ref = py_ref;
BLI_assert(py_ref == nullptr || unwrap(texture)->py_ref == nullptr);
unwrap(texture)->py_ref = py_ref;
}
#endif
/* TODO: remove. */
int GPU_texture_opengl_bindcode(const GPUTexture *tex)
int GPU_texture_opengl_bindcode(const GPUTexture *texture)
{
return reinterpret_cast<const Texture *>(tex)->gl_bindcode_get();
return unwrap(texture)->gl_bindcode_get();
}
void GPU_texture_get_mipmap_size(GPUTexture *tex, int lvl, int *r_size)
void GPU_texture_get_mipmap_size(GPUTexture *texture, int mip_level, int *r_size)
{
return reinterpret_cast<Texture *>(tex)->mip_size_get(lvl, r_size);
return unwrap(texture)->mip_size_get(mip_level, r_size);
}
/** \} */
@@ -995,30 +978,30 @@ GPUPixelBuffer *GPU_pixel_buffer_create(size_t size)
return wrap(pixbuf);
}
void GPU_pixel_buffer_free(GPUPixelBuffer *pix_buf)
void GPU_pixel_buffer_free(GPUPixelBuffer *pixel_buf)
{
PixelBuffer *handle = unwrap(pix_buf);
PixelBuffer *handle = unwrap(pixel_buf);
delete handle;
}
void *GPU_pixel_buffer_map(GPUPixelBuffer *pix_buf)
void *GPU_pixel_buffer_map(GPUPixelBuffer *pixel_buf)
{
return reinterpret_cast<PixelBuffer *>(pix_buf)->map();
return unwrap(pixel_buf)->map();
}
void GPU_pixel_buffer_unmap(GPUPixelBuffer *pix_buf)
void GPU_pixel_buffer_unmap(GPUPixelBuffer *pixel_buf)
{
reinterpret_cast<PixelBuffer *>(pix_buf)->unmap();
unwrap(pixel_buf)->unmap();
}
size_t GPU_pixel_buffer_size(GPUPixelBuffer *pix_buf)
size_t GPU_pixel_buffer_size(GPUPixelBuffer *pixel_buf)
{
return reinterpret_cast<PixelBuffer *>(pix_buf)->get_size();
return unwrap(pixel_buf)->get_size();
}
int64_t GPU_pixel_buffer_get_native_handle(GPUPixelBuffer *pix_buf)
int64_t GPU_pixel_buffer_get_native_handle(GPUPixelBuffer *pixel_buf)
{
return reinterpret_cast<PixelBuffer *>(pix_buf)->get_native_handle();
return unwrap(pixel_buf)->get_native_handle();
}
/** \} */

View File

@@ -14,8 +14,7 @@
#include "gpu_framebuffer_private.hh"
namespace blender {
namespace gpu {
namespace blender::gpu {
enum eGPUTextureFormatFlag {
/* The format has a depth component and can be used as depth attachment. */
@@ -341,7 +340,7 @@ class PixelBuffer {
public:
PixelBuffer(size_t size) : size_(size){};
virtual ~PixelBuffer(){};
virtual ~PixelBuffer() = default;
virtual void *map() = 0;
virtual void unmap() = 0;
@@ -1170,5 +1169,4 @@ static inline eGPUTextureFormat to_texture_format(const GPUVertFormat *format)
return GPU_DEPTH_COMPONENT24;
}
} // namespace gpu
} // namespace blender
} // namespace blender::gpu

View File

@@ -12,8 +12,7 @@
struct GPUUniformBuf;
namespace blender {
namespace gpu {
namespace blender::gpu {
#ifndef NDEBUG
# define DEBUG_NAME_LEN 64
@@ -69,5 +68,4 @@ static inline const UniformBuf *unwrap(const GPUUniformBuf *vert)
#undef DEBUG_NAME_LEN
} // namespace gpu
} // namespace blender
} // namespace blender::gpu

View File

@@ -17,8 +17,6 @@
#include "gpu_context_private.hh" /* TODO: remove. */
#include "GPU_vertex_buffer.hh"
#include <cstring>
/* -------------------------------------------------------------------- */

View File

@@ -9,6 +9,7 @@
*/
#include "GPU_vertex_format.hh"
#include "BLI_assert.h"
#include "GPU_capabilities.hh"
#include "gpu_shader_create_info.hh"
@@ -33,7 +34,7 @@ using namespace blender::gpu::shader;
void GPU_vertformat_clear(GPUVertFormat *format)
{
#if TRUST_NO_ONE
#ifndef NDEBUG
memset(format, 0, sizeof(GPUVertFormat));
#else
format->attr_len = 0;
@@ -56,9 +57,7 @@ void GPU_vertformat_copy(GPUVertFormat *dest, const GPUVertFormat &src)
static uint comp_size(GPUVertCompType type)
{
#if TRUST_NO_ONE
assert(type <= GPU_COMP_F32); /* other types have irregular sizes (not bytes) */
#endif
BLI_assert(type <= GPU_COMP_F32); /* other types have irregular sizes (not bytes) */
const uint sizes[] = {1, 1, 2, 2, 4, 4, 4};
return sizes[type];
}
@@ -89,9 +88,7 @@ static uint attr_align(const GPUVertAttr *a, uint minimum_stride)
uint vertex_buffer_size(const GPUVertFormat *format, uint vertex_len)
{
#if TRUST_NO_ONE
assert(format->packed && format->stride > 0);
#endif
BLI_assert(format->packed && format->stride > 0);
return format->stride * vertex_len;
}
@@ -112,12 +109,9 @@ static uchar copy_attr_name(GPUVertFormat *format, const char *name)
break;
}
}
#if TRUST_NO_ONE
assert(terminated);
assert(format->name_offset <= GPU_VERT_ATTR_NAMES_BUF_LEN);
#else
(void)terminated;
#endif
BLI_assert(terminated);
BLI_assert(format->name_offset <= GPU_VERT_ATTR_NAMES_BUF_LEN);
UNUSED_VARS_NDEBUG(terminated);
return name_offset;
}
@@ -127,33 +121,33 @@ uint GPU_vertformat_attr_add(GPUVertFormat *format,
uint comp_len,
GPUVertFetchMode fetch_mode)
{
#if TRUST_NO_ONE
assert(format->name_len < GPU_VERT_FORMAT_MAX_NAMES); /* there's room for more */
assert(format->attr_len < GPU_VERT_ATTR_MAX_LEN); /* there's room for more */
assert(!format->packed); /* packed means frozen/locked */
assert((comp_len >= 1 && comp_len <= 4) || comp_len == 8 || comp_len == 12 || comp_len == 16);
BLI_assert(format->name_len < GPU_VERT_FORMAT_MAX_NAMES); /* there's room for more */
BLI_assert(format->attr_len < GPU_VERT_ATTR_MAX_LEN); /* there's room for more */
BLI_assert(!format->packed); /* packed means frozen/locked */
BLI_assert((comp_len >= 1 && comp_len <= 4) || comp_len == 8 || comp_len == 12 ||
comp_len == 16);
switch (comp_type) {
case GPU_COMP_F32:
/* float type can only kept as float */
assert(fetch_mode == GPU_FETCH_FLOAT);
BLI_assert(fetch_mode == GPU_FETCH_FLOAT);
break;
case GPU_COMP_I10:
/* 10_10_10 format intended for normals (XYZ) or colors (RGB)
* extra component packed.w can be manually set to { -2, -1, 0, 1 } */
assert(ELEM(comp_len, 3, 4));
BLI_assert(ELEM(comp_len, 3, 4));
/* Not strictly required, may relax later. */
assert(fetch_mode == GPU_FETCH_INT_TO_FLOAT_UNIT);
BLI_assert(fetch_mode == GPU_FETCH_INT_TO_FLOAT_UNIT);
break;
default:
/* integer types can be kept as int or converted/normalized to float */
assert(fetch_mode != GPU_FETCH_FLOAT);
BLI_assert(fetch_mode != GPU_FETCH_FLOAT);
/* only support float matrices (see Batch_update_program_bindings) */
assert(!ELEM(comp_len, 8, 12, 16));
BLI_assert(!ELEM(comp_len, 8, 12, 16));
}
#endif
format->name_len++; /* Multi-name support. */
const uint attr_id = format->attr_len++;
@@ -174,10 +168,8 @@ uint GPU_vertformat_attr_add(GPUVertFormat *format,
void GPU_vertformat_alias_add(GPUVertFormat *format, const char *alias)
{
GPUVertAttr *attr = &format->attrs[format->attr_len - 1];
#if TRUST_NO_ONE
assert(format->name_len < GPU_VERT_FORMAT_MAX_NAMES); /* there's room for more */
assert(attr->name_len < GPU_VERT_ATTR_MAX_NAMES);
#endif
BLI_assert(format->name_len < GPU_VERT_FORMAT_MAX_NAMES); /* there's room for more */
BLI_assert(attr->name_len < GPU_VERT_ATTR_MAX_NAMES);
format->name_len++; /* Multi-name support. */
attr->names[attr->name_len++] = copy_attr_name(format, alias);
}

View File

@@ -10,6 +10,8 @@
#pragma once
#include "GPU_vertex_buffer.hh"
struct GPUVertFormat;
void VertexFormat_pack(GPUVertFormat *format);

View File

@@ -741,10 +741,7 @@ void MTLBatch::prepare_vertex_descriptor_and_bindings(MTLVertBuf **buffers, int
void MTLBatch::draw_advanced(int v_first, int v_count, int i_first, int i_count)
{
#if TRUST_NO_ONE
BLI_assert(v_count > 0 && i_count > 0);
#endif
/* Setup RenderPipelineState for batch. */
MTLContext *ctx = MTLContext::get();

View File

@@ -504,11 +504,9 @@ void gpu::MTLTexture::update_sub(
this->ensure_baked();
/* Safety checks. */
#if TRUST_NO_ONE
BLI_assert(mip >= mip_min_ && mip <= mip_max_);
BLI_assert(mip < texture_.mipmapLevelCount);
BLI_assert(texture_.mipmapLevelCount >= mip_max_);
#endif
/* DEPTH FLAG - Depth formats cannot use direct BLIT - pass off to their own routine which will
* do a depth-only render. */