GPUFrameBuffer: GL backend isolation

This is related to the Vulkan port T68990.

This is a full cleanup of the Framebuffer module and a separation
of OpenGL related functions.

There is some changes with how the default framebuffers are handled.
Now the default framebuffers are individually wrapped inside special
GLFrameBuffers. This make it easier to keep track of the currently bound
framebuffer state and have some specificity for operations on these
framebuffers.

Another change is dropping the optimisation of only configuring the
changed attachements during framebuffers update. This does not give
any benefits and add some complexity to the code. This might be brought
back if it has a performance impact on some systems.

This also adds support for naming framebuffers but it is currently not
used.
This commit is contained in:
Clément Foucault
2020-08-29 01:13:54 +02:00
parent 0850afb34e
commit f3a65a1b4a
19 changed files with 1248 additions and 833 deletions

View File

@@ -1462,7 +1462,7 @@ void DRW_draw_render_loop_ex(struct Depsgraph *depsgraph,
DRW_hair_init();
/* No framebuffer allowed before drawing. */
BLI_assert(GPU_framebuffer_active_get() == NULL);
BLI_assert(GPU_framebuffer_active_get() == GPU_framebuffer_back_get());
/* Init engines */
drw_engines_init();

View File

@@ -970,19 +970,14 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
switch (cmd_type) {
case DRW_CMD_CLEAR:
GPU_framebuffer_clear(
#ifndef NDEBUG
GPU_framebuffer_active_get(),
#else
NULL,
#endif
cmd->clear.clear_channels,
(float[4]){cmd->clear.r / 255.0f,
cmd->clear.g / 255.0f,
cmd->clear.b / 255.0f,
cmd->clear.a / 255.0f},
cmd->clear.depth,
cmd->clear.stencil);
GPU_framebuffer_clear(GPU_framebuffer_active_get(),
cmd->clear.clear_channels,
(float[4]){cmd->clear.r / 255.0f,
cmd->clear.g / 255.0f,
cmd->clear.b / 255.0f,
cmd->clear.a / 255.0f},
cmd->clear.depth,
cmd->clear.stencil);
break;
case DRW_CMD_DRWSTATE:
state.drw_state_enabled |= cmd->state.enable;

View File

@@ -92,6 +92,7 @@ set(SRC
opengl/gl_batch.cc
opengl/gl_context.cc
opengl/gl_drawlist.cc
opengl/gl_framebuffer.cc
opengl/gl_shader.cc
opengl/gl_shader_interface.cc
opengl/gl_state.cc
@@ -151,9 +152,11 @@ set(SRC
opengl/gl_batch.hh
opengl/gl_context.hh
opengl/gl_drawlist.hh
opengl/gl_framebuffer.hh
opengl/gl_shader.hh
opengl/gl_shader_interface.hh
opengl/gl_state.hh
opengl/gl_texture.hh
opengl/gl_uniform_buffer.hh
opengl/gl_vertex_array.hh
)

View File

@@ -19,6 +19,13 @@
/** \file
* \ingroup gpu
*
* GPU Framebuffer
* - this is a wrapper for an OpenGL framebuffer object (FBO). in practice
* multiple FBO's may be created.
* - actual FBO creation & config is deferred until GPU_framebuffer_bind or
* GPU_framebuffer_check_valid to allow creation & config while another
* opengl context is bound (since FBOs are not shared between ogl contexts).
*/
#pragma once
@@ -41,9 +48,8 @@ typedef enum eGPUFrameBufferBits {
} eGPUFrameBufferBits;
typedef enum eGPUBackBuffer {
GPU_BACKBUFFER = 0,
GPU_BACKBUFFER_LEFT = 0,
GPU_BACKBUFFER_RIGHT,
GPU_BACKBUFFER_LEFT,
} eGPUBackBuffer;
/** Opaque pointer hiding blender::gpu::FrameBuffer. */
@@ -53,15 +59,6 @@ typedef struct GPUFrameBuffer {
typedef struct GPUOffScreen GPUOffScreen;
/* GPU Framebuffer
* - this is a wrapper for an OpenGL framebuffer object (FBO). in practice
* multiple FBO's may be created, to get around limitations on the number
* of attached textures and the dimension requirements.
* - actual FBO creation & config is deferred until GPU_framebuffer_bind or
* GPU_framebuffer_check_valid to allow creation & config while another
* opengl context is bound (since FBOs are not shared between ogl contexts).
*/
GPUFrameBuffer *GPU_framebuffer_create(void);
void GPU_framebuffer_free(GPUFrameBuffer *fb);
void GPU_framebuffer_bind(GPUFrameBuffer *fb);
@@ -72,6 +69,7 @@ bool GPU_framebuffer_bound(GPUFrameBuffer *fb);
bool GPU_framebuffer_check_valid(GPUFrameBuffer *fb, char err_out[256]);
GPUFrameBuffer *GPU_framebuffer_active_get(void);
GPUFrameBuffer *GPU_framebuffer_back_get(void);
#define GPU_FRAMEBUFFER_FREE_SAFE(fb) \
do { \
@@ -84,13 +82,10 @@ GPUFrameBuffer *GPU_framebuffer_active_get(void);
/* Framebuffer setup : You need to call GPU_framebuffer_bind for these
* to be effective. */
void GPU_framebuffer_texture_attach(GPUFrameBuffer *fb, struct GPUTexture *tex, int slot, int mip);
void GPU_framebuffer_texture_layer_attach(
GPUFrameBuffer *fb, struct GPUTexture *tex, int slot, int layer, int mip);
void GPU_framebuffer_texture_cubeface_attach(
GPUFrameBuffer *fb, struct GPUTexture *tex, int slot, int face, int mip);
void GPU_framebuffer_texture_attach_ex(GPUFrameBuffer *gpu_fb,
GPUAttachment attachement,
int slot);
void GPU_framebuffer_texture_detach(GPUFrameBuffer *fb, struct GPUTexture *tex);
void GPU_framebuffer_texture_detach_slot(GPUFrameBuffer *fb, struct GPUTexture *tex, int type);
/**
* How to use #GPU_framebuffer_ensure_config().
@@ -155,6 +150,16 @@ void GPU_framebuffer_config_array(GPUFrameBuffer *fb, const GPUAttachment *confi
_tex, _face, _mip, \
}
#define GPU_framebuffer_texture_attach(_fb, _texture, _slot, _mip) \
GPU_framebuffer_texture_attach_ex( \
_fb, (GPUAttachment)GPU_ATTACHMENT_TEXTURE_MIP(_texture, _mip), _slot)
#define GPU_framebuffer_texture_layer_attach(_fb, _texture, _slot, layer, _mip) \
GPU_framebuffer_texture_attach_ex( \
_fb, (GPUAttachment)GPU_ATTACHMENT_TEXTURE_LAYER_MIP(_texture, layer, _mip), _slot)
#define GPU_framebuffer_texture_cubeface_attach(_fb, _texture, _slot, face, _mip) \
GPU_framebuffer_texture_attach_ex( \
_fb, (GPUAttachment)GPU_ATTACHMENT_TEXTURE_CUBEFACE_MIP(_texture, face, _mip), _slot)
/* Framebuffer operations */
void GPU_framebuffer_viewport_set(GPUFrameBuffer *fb, int x, int y, int w, int h);

View File

@@ -275,8 +275,10 @@ void GPU_texture_mipmap_mode(GPUTexture *tex, bool use_mipmap, bool use_filter);
void GPU_texture_wrap_mode(GPUTexture *tex, bool use_repeat, bool use_clamp);
void GPU_texture_swizzle_set(GPUTexture *tex, const char swizzle[4]);
/* TODO should be private internal functions. */
void GPU_texture_attach_framebuffer(GPUTexture *tex, struct GPUFrameBuffer *fb, int attachment);
int GPU_texture_detach_framebuffer(GPUTexture *tex, struct GPUFrameBuffer *fb);
void GPU_texture_detach_framebuffer(GPUTexture *tex, struct GPUFrameBuffer *fb);
int GPU_texture_framebuffer_attachement_get(GPUTexture *tex, struct GPUFrameBuffer *fb);
int GPU_texture_target(const GPUTexture *tex);
int GPU_texture_width(const GPUTexture *tex);

View File

@@ -32,6 +32,7 @@ namespace gpu {
class Batch;
class DrawList;
class FrameBuffer;
class Shader;
class UniformBuf;
@@ -45,7 +46,7 @@ class GPUBackend {
virtual Batch *batch_alloc(void) = 0;
virtual DrawList *drawlist_alloc(int list_length) = 0;
// virtual FrameBuffer *framebuffer_alloc(void) = 0;
virtual FrameBuffer *framebuffer_alloc(const char *name) = 0;
virtual Shader *shader_alloc(const char *name) = 0;
// virtual Texture *texture_alloc(void) = 0;
virtual UniformBuf *uniformbuf_alloc(int size, const char *name) = 0;

View File

@@ -49,6 +49,18 @@ struct GPUContext {
GPUMatrixState *matrix_state = NULL;
blender::gpu::GPUStateManager *state_manager = NULL;
/**
* All 4 window framebuffers.
* None of them are valid in an offscreen context.
* Right framebuffers are only available if using stereo rendering.
* Front framebuffers contains (in principle, but not always) the last frame color.
* Default framebuffer is back_left.
*/
blender::gpu::FrameBuffer *back_left = NULL;
blender::gpu::FrameBuffer *front_left = NULL;
blender::gpu::FrameBuffer *back_right = NULL;
blender::gpu::FrameBuffer *front_right = NULL;
protected:
/** Thread on which this context is active. */
pthread_t thread_;

View File

@@ -32,97 +32,233 @@
#include "GPU_shader.h"
#include "GPU_texture.h"
#include "gpu_backend.hh"
#include "gpu_context_private.hh"
#include "gpu_private.h"
#include "gpu_texture_private.hh"
#include "gpu_framebuffer_private.hh"
using namespace blender::gpu;
namespace blender::gpu {
static GLenum convert_attachment_type_to_gl(GPUAttachmentType type)
/* -------------------------------------------------------------------- */
/** \name Constructor / Destructor
* \{ */
FrameBuffer::FrameBuffer(const char *name)
{
#define ATTACHMENT(type) \
case GPU_FB_##type: { \
return GL_##type; \
} \
((void)0)
switch (type) {
ATTACHMENT(DEPTH_ATTACHMENT);
ATTACHMENT(DEPTH_STENCIL_ATTACHMENT);
ATTACHMENT(COLOR_ATTACHMENT0);
ATTACHMENT(COLOR_ATTACHMENT1);
ATTACHMENT(COLOR_ATTACHMENT2);
ATTACHMENT(COLOR_ATTACHMENT3);
ATTACHMENT(COLOR_ATTACHMENT4);
ATTACHMENT(COLOR_ATTACHMENT5);
default:
BLI_assert(0);
return GL_COLOR_ATTACHMENT0;
}
}
static GPUAttachmentType attachment_type_from_tex(GPUTexture *tex, int slot)
{
switch (GPU_texture_format(tex)) {
case GPU_DEPTH_COMPONENT32F:
case GPU_DEPTH_COMPONENT24:
case GPU_DEPTH_COMPONENT16:
return GPU_FB_DEPTH_ATTACHMENT;
case GPU_DEPTH24_STENCIL8:
case GPU_DEPTH32F_STENCIL8:
return GPU_FB_DEPTH_STENCIL_ATTACHMENT;
default:
return static_cast<GPUAttachmentType>(GPU_FB_COLOR_ATTACHMENT0 + slot);
}
}
static GLenum convert_buffer_bits_to_gl(eGPUFrameBufferBits bits)
{
GLbitfield mask = 0;
mask |= (bits & GPU_DEPTH_BIT) ? GL_DEPTH_BUFFER_BIT : 0;
mask |= (bits & GPU_STENCIL_BIT) ? GL_STENCIL_BUFFER_BIT : 0;
mask |= (bits & GPU_COLOR_BIT) ? GL_COLOR_BUFFER_BIT : 0;
return mask;
}
static void gpu_print_framebuffer_error(GLenum status, char err_out[256])
{
const char *format = "GPUFrameBuffer: framebuffer status %s\n";
const char *err = "unknown";
#define FORMAT_STATUS(X) \
case GL_FRAMEBUFFER_##X: { \
err = "GL_FRAMEBUFFER_" #X; \
break; \
} \
((void)0)
switch (status) {
/* success */
FORMAT_STATUS(COMPLETE);
/* errors shared by OpenGL desktop & ES */
FORMAT_STATUS(INCOMPLETE_ATTACHMENT);
FORMAT_STATUS(INCOMPLETE_MISSING_ATTACHMENT);
FORMAT_STATUS(UNSUPPORTED);
#if 0 /* for OpenGL ES only */
FORMAT_STATUS(INCOMPLETE_DIMENSIONS);
#else /* for desktop GL only */
FORMAT_STATUS(INCOMPLETE_DRAW_BUFFER);
FORMAT_STATUS(INCOMPLETE_READ_BUFFER);
FORMAT_STATUS(INCOMPLETE_MULTISAMPLE);
FORMAT_STATUS(UNDEFINED);
#endif
}
#undef FORMAT_STATUS
if (err_out) {
BLI_snprintf(err_out, 256, format, err);
if (name) {
BLI_strncpy(name_, name, sizeof(name_));
}
else {
fprintf(stderr, format, err);
name_[0] = '\0';
}
/* Force config on first use. */
dirty_attachments_ = true;
for (int i = 0; i < ARRAY_SIZE(attachments_); i++) {
attachments_[i].tex = NULL;
attachments_[i].mip = -1;
attachments_[i].layer = -1;
}
}
FrameBuffer::~FrameBuffer()
{
GPUFrameBuffer *gpu_fb = reinterpret_cast<GPUFrameBuffer *>(this);
for (int i = 0; i < ARRAY_SIZE(attachments_); i++) {
if (attachments_[i].tex != NULL) {
GPU_texture_detach_framebuffer(attachments_[i].tex, gpu_fb);
}
}
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Attachments managment
* \{ */
void FrameBuffer::attachment_set(GPUAttachmentType type, const GPUAttachment &new_attachment)
{
if (new_attachment.mip == -1) {
return; /* GPU_ATTACHMENT_LEAVE */
}
if (type >= GPU_FB_MAX_ATTACHEMENT) {
fprintf(stderr,
"GPUFramebuffer: Error: Trying to attach texture to type %d but maximum slot is %d.\n",
type - GPU_FB_COLOR_ATTACHMENT0,
GPU_FB_MAX_COLOR_ATTACHMENT);
return;
}
if (new_attachment.tex) {
if (new_attachment.layer > 0) {
BLI_assert(ELEM(GPU_texture_target(new_attachment.tex),
GL_TEXTURE_2D_ARRAY,
GL_TEXTURE_CUBE_MAP,
GL_TEXTURE_CUBE_MAP_ARRAY_ARB));
}
if (GPU_texture_stencil(new_attachment.tex)) {
BLI_assert(ELEM(type, GPU_FB_DEPTH_STENCIL_ATTACHMENT));
}
else if (GPU_texture_depth(new_attachment.tex)) {
BLI_assert(ELEM(type, GPU_FB_DEPTH_ATTACHMENT));
}
}
GPUAttachment &attachment = attachments_[type];
if (attachment.tex == new_attachment.tex && attachment.layer == new_attachment.layer &&
attachment.mip == new_attachment.mip) {
return; /* Exact same texture already bound here. */
}
/* Unbind previous and bind new. */
/* TODO(fclem) cleanup the casts. */
if (attachment.tex) {
GPU_texture_detach_framebuffer(attachment.tex, reinterpret_cast<GPUFrameBuffer *>(this));
}
attachment = new_attachment;
/* Might be null if this is for unbinding. */
if (attachment.tex) {
GPU_texture_attach_framebuffer(attachment.tex, reinterpret_cast<GPUFrameBuffer *>(this), type);
}
else {
/* GPU_ATTACHMENT_NONE */
}
dirty_attachments_ = true;
}
void FrameBuffer::recursive_downsample(int max_lvl,
void (*callback)(void *userData, int level),
void *userData)
{
GPUContext *ctx = GPU_context_active_get();
/* Bind to make sure the framebuffer is up to date. */
this->bind(true);
if (width_ == 1 && height_ == 1) {
return;
}
/* HACK: Make the framebuffer appear not bound to avoid assert in GPU_texture_bind. */
ctx->active_fb = NULL;
int levels = floor(log2(max_ii(width_, height_)));
max_lvl = min_ii(max_lvl, levels);
int current_dim[2] = {width_, height_};
int mip_lvl;
for (mip_lvl = 1; mip_lvl < max_lvl + 1; mip_lvl++) {
/* calculate next viewport size */
current_dim[0] = max_ii(current_dim[0] / 2, 1);
current_dim[1] = max_ii(current_dim[1] / 2, 1);
/* Replace attaached miplevel for each attachement. */
for (int att = 0; att < ARRAY_SIZE(attachments_); att++) {
GPUTexture *tex = attachments_[att].tex;
if (tex != NULL) {
/* Some Intel HDXXX have issue with rendering to a mipmap that is below
* the texture GL_TEXTURE_MAX_LEVEL. So even if it not correct, in this case
* we allow GL_TEXTURE_MAX_LEVEL to be one level lower. In practice it does work! */
int map_lvl = (GPU_mip_render_workaround()) ? mip_lvl : (mip_lvl - 1);
/* Restrict fetches only to previous level. */
GPU_texture_bind(tex, 0);
glTexParameteri(GPU_texture_target(tex), GL_TEXTURE_BASE_LEVEL, mip_lvl - 1);
glTexParameteri(GPU_texture_target(tex), GL_TEXTURE_MAX_LEVEL, map_lvl);
GPU_texture_unbind(tex);
/* Bind next level. */
attachments_[att].mip = mip_lvl;
}
}
/* Update the internal attachments and viewport size. */
dirty_attachments_ = true;
this->bind(true);
/* HACK: Make the framebuffer appear not bound to avoid assert in GPU_texture_bind. */
ctx->active_fb = NULL;
callback(userData, mip_lvl);
/* This is the last mipmap level. Exit loop without incrementing mip_lvl. */
if (current_dim[0] == 1 && current_dim[1] == 1) {
break;
}
}
for (int att = 0; att < ARRAY_SIZE(attachments_); att++) {
if (attachments_[att].tex != NULL) {
/* Reset mipmap level range. */
GPUTexture *tex = attachments_[att].tex;
GPU_texture_bind(tex, 0);
glTexParameteri(GPU_texture_target(tex), GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(GPU_texture_target(tex), GL_TEXTURE_MAX_LEVEL, mip_lvl);
GPU_texture_unbind(tex);
/* Reset base level. NOTE: might not be the one bound at the start of this function. */
attachments_[att].mip = 0;
}
}
/* Reattach base level textures. */
this->bind(true);
}
/** \} */
} // namespace blender::gpu
/* -------------------------------------------------------------------- */
/** \name C-API
* \{ */
using namespace blender;
using namespace blender::gpu;
GPUFrameBuffer *GPU_framebuffer_create()
{
/* We generate the FB object later at first use in order to
* create the framebuffer in the right opengl context. */
return (GPUFrameBuffer *)GPUBackend::get()->framebuffer_alloc("FB");
}
void GPU_framebuffer_free(GPUFrameBuffer *gpu_fb)
{
delete reinterpret_cast<FrameBuffer *>(gpu_fb);
}
/* ---------- Binding ----------- */
void GPU_framebuffer_bind(GPUFrameBuffer *gpu_fb)
{
FrameBuffer *fb = reinterpret_cast<FrameBuffer *>(gpu_fb);
const bool enable_srgb = true;
fb->bind(enable_srgb);
}
/* Workaround for binding a srgb framebuffer without doing the srgb transform. */
void GPU_framebuffer_bind_no_srgb(GPUFrameBuffer *gpu_fb)
{
FrameBuffer *fb = reinterpret_cast<FrameBuffer *>(gpu_fb);
const bool enable_srgb = false;
fb->bind(enable_srgb);
}
/* For stereo rendering. */
void GPU_backbuffer_bind(eGPUBackBuffer buffer)
{
GPUContext *ctx = GPU_context_active_get();
if (buffer == GPU_BACKBUFFER_LEFT) {
ctx->back_left->bind(false);
}
else {
ctx->back_right->bind(false);
}
}
void GPU_framebuffer_restore(void)
{
GPU_context_active_get()->back_left->bind(false);
}
GPUFrameBuffer *GPU_framebuffer_active_get(void)
@@ -131,134 +267,43 @@ GPUFrameBuffer *GPU_framebuffer_active_get(void)
return reinterpret_cast<GPUFrameBuffer *>(ctx ? ctx->active_fb : NULL);
}
/* GPUFrameBuffer */
GPUFrameBuffer *GPU_framebuffer_create(void)
/* Returns the default framebuffer. Will always exists even if it's just a dummy. */
GPUFrameBuffer *GPU_framebuffer_back_get(void)
{
/* We generate the FB object later at first use in order to
* create the framebuffer in the right opengl context. */
return (GPUFrameBuffer *)new FrameBuffer();
GPUContext *ctx = GPU_context_active_get();
return reinterpret_cast<GPUFrameBuffer *>(ctx ? ctx->back_left : NULL);
}
static void gpu_framebuffer_init(FrameBuffer *fb)
bool GPU_framebuffer_bound(GPUFrameBuffer *gpu_fb)
{
fb->object = GPU_fbo_alloc();
fb->ctx = GPU_context_active_get();
/* Not really needed for now. */
// gpu_context_add_framebuffer(fb->ctx, fb);
return (gpu_fb == GPU_framebuffer_active_get());
}
void GPU_framebuffer_free(GPUFrameBuffer *gpu_fb)
/* ---------- Attachment Management ----------- */
bool GPU_framebuffer_check_valid(GPUFrameBuffer *gpu_fb, char err_out[256])
{
FrameBuffer *fb = reinterpret_cast<FrameBuffer *>(gpu_fb);
for (int i_type = 0; i_type < GPU_FB_MAX_ATTACHEMENT; i_type++) {
GPUAttachmentType type = static_cast<GPUAttachmentType>(i_type);
if (fb->attachments[type].tex != NULL) {
GPU_framebuffer_texture_detach(gpu_fb, fb->attachments[type].tex);
}
}
if (fb->object != 0) {
/* This restores the framebuffer if it was bound */
GPU_fbo_free(fb->object, fb->ctx);
/* Not really needed for now. */
// gpu_context_remove_framebuffer(fb->ctx, fb);
}
/* TODO(fclem) check if bound in its associated context context. */
if (GPU_framebuffer_active_get() == gpu_fb) {
GPU_context_active_get()->active_fb = NULL;
}
delete fb;
return reinterpret_cast<FrameBuffer *>(gpu_fb)->check(err_out);
}
/* ---------- Attach ----------- */
static void gpu_framebuffer_texture_attach_ex(
FrameBuffer *fb, GPUTexture *tex, int slot, int layer, int mip)
void GPU_framebuffer_texture_attach_ex(GPUFrameBuffer *gpu_fb, GPUAttachment attachement, int slot)
{
if (slot >= GPU_FB_MAX_COLOR_ATTACHMENT) {
fprintf(stderr,
"Attaching to index %d framebuffer slot unsupported. "
"Use at most %d\n",
slot,
GPU_FB_MAX_COLOR_ATTACHMENT);
return;
}
GPUFrameBuffer *gpufb = reinterpret_cast<GPUFrameBuffer *>(fb);
GPUAttachmentType type = attachment_type_from_tex(tex, slot);
GPUAttachment *attachment = &fb->attachments[type];
if ((attachment->tex == tex) && (attachment->mip == mip) && (attachment->layer == layer)) {
return; /* Exact same texture already bound here. */
}
if (attachment->tex != NULL) {
GPU_framebuffer_texture_detach(gpufb, attachment->tex);
}
if (attachment->tex == NULL) {
GPU_texture_attach_framebuffer(tex, gpufb, type);
}
attachment->tex = tex;
attachment->mip = mip;
attachment->layer = layer;
GPU_FB_ATTACHEMENT_SET_DIRTY(fb->dirty_flag, type);
}
void GPU_framebuffer_texture_attach(GPUFrameBuffer *gpu_fb, GPUTexture *tex, int slot, int mip)
{
FrameBuffer *fb = reinterpret_cast<FrameBuffer *>(gpu_fb);
gpu_framebuffer_texture_attach_ex(fb, tex, slot, -1, mip);
}
void GPU_framebuffer_texture_layer_attach(
GPUFrameBuffer *gpu_fb, GPUTexture *tex, int slot, int layer, int mip)
{
FrameBuffer *fb = reinterpret_cast<FrameBuffer *>(gpu_fb);
/* NOTE: We could support 1D ARRAY texture. */
BLI_assert(GPU_texture_target(tex) == GL_TEXTURE_2D_ARRAY);
gpu_framebuffer_texture_attach_ex(fb, tex, slot, layer, mip);
}
void GPU_framebuffer_texture_cubeface_attach(
GPUFrameBuffer *gpu_fb, GPUTexture *tex, int slot, int face, int mip)
{
FrameBuffer *fb = reinterpret_cast<FrameBuffer *>(gpu_fb);
BLI_assert(GPU_texture_cube(tex));
gpu_framebuffer_texture_attach_ex(fb, tex, slot, face, mip);
}
/* ---------- Detach ----------- */
void GPU_framebuffer_texture_detach_slot(GPUFrameBuffer *gpu_fb, GPUTexture *tex, int type)
{
FrameBuffer *fb = reinterpret_cast<FrameBuffer *>(gpu_fb);
GPUAttachment *attachment = &fb->attachments[type];
if (attachment->tex != tex) {
fprintf(stderr,
"Warning, attempting to detach Texture %p from framebuffer %p "
"but texture is not attached.\n",
tex,
fb);
return;
}
attachment->tex = NULL;
GPU_FB_ATTACHEMENT_SET_DIRTY(fb->dirty_flag, type);
GPUAttachmentType type = blender::gpu::Texture::attachment_type(attachement.tex, slot);
reinterpret_cast<FrameBuffer *>(gpu_fb)->attachment_set(type, attachement);
}
void GPU_framebuffer_texture_detach(GPUFrameBuffer *gpu_fb, GPUTexture *tex)
{
GPUAttachmentType type = (GPUAttachmentType)GPU_texture_detach_framebuffer(tex, gpu_fb);
GPU_framebuffer_texture_detach_slot(gpu_fb, tex, type);
GPUAttachment attachement = GPU_ATTACHMENT_NONE;
int type = GPU_texture_framebuffer_attachement_get(tex, gpu_fb);
if (type != -1) {
reinterpret_cast<FrameBuffer *>(gpu_fb)->attachment_set((GPUAttachmentType)type, attachement);
}
else {
BLI_assert(!"Error: Texture: Framebuffer is not attached");
}
}
/* ---------- Config (Attach & Detach) ----------- */
/**
* First GPUAttachment in *config is always the depth/depth_stencil buffer.
* Following GPUAttachments are color buffers.
@@ -270,280 +315,30 @@ void GPU_framebuffer_config_array(GPUFrameBuffer *gpu_fb,
int config_len)
{
FrameBuffer *fb = reinterpret_cast<FrameBuffer *>(gpu_fb);
if (config[0].tex) {
BLI_assert(GPU_texture_depth(config[0].tex));
gpu_framebuffer_texture_attach_ex(fb, config[0].tex, 0, config[0].layer, config[0].mip);
}
else if (config[0].mip == -1) {
/* Leave texture attached */
}
else if (fb->attachments[GPU_FB_DEPTH_ATTACHMENT].tex != NULL) {
GPU_framebuffer_texture_detach(gpu_fb, fb->attachments[GPU_FB_DEPTH_ATTACHMENT].tex);
}
else if (fb->attachments[GPU_FB_DEPTH_STENCIL_ATTACHMENT].tex != NULL) {
GPU_framebuffer_texture_detach(gpu_fb, fb->attachments[GPU_FB_DEPTH_STENCIL_ATTACHMENT].tex);
}
int slot = 0;
for (int i = 1; i < config_len; i++, slot++) {
if (config[i].tex != NULL) {
BLI_assert(GPU_texture_depth(config[i].tex) == false);
gpu_framebuffer_texture_attach_ex(fb, config[i].tex, slot, config[i].layer, config[i].mip);
}
else if (config[i].mip != -1) {
GPUTexture *tex = fb->color_tex(slot);
if (tex != NULL) {
GPU_framebuffer_texture_detach(gpu_fb, tex);
}
}
const GPUAttachment &depth_attachement = config[0];
Span<GPUAttachment> color_attachments(config + 1, config_len - 1);
if (depth_attachement.mip == -1) {
/* GPU_ATTACHMENT_LEAVE */
}
}
/* ---------- Bind / Restore ----------- */
static void gpu_framebuffer_attachment_attach(GPUAttachment *attach, GPUAttachmentType attach_type)
{
int tex_bind = GPU_texture_opengl_bindcode(attach->tex);
GLenum gl_attachment = convert_attachment_type_to_gl(attach_type);
if (attach->layer > -1) {
if (GPU_texture_cube(attach->tex)) {
glFramebufferTexture2D(GL_FRAMEBUFFER,
gl_attachment,
GL_TEXTURE_CUBE_MAP_POSITIVE_X + attach->layer,
tex_bind,
attach->mip);
}
else {
glFramebufferTextureLayer(
GL_FRAMEBUFFER, gl_attachment, tex_bind, attach->mip, attach->layer);
}
else if (depth_attachement.tex == NULL) {
/* GPU_ATTACHMENT_NONE: Need to clear both targets. */
fb->attachment_set(GPU_FB_DEPTH_STENCIL_ATTACHMENT, depth_attachement);
fb->attachment_set(GPU_FB_DEPTH_ATTACHMENT, depth_attachement);
}
else {
glFramebufferTexture(GL_FRAMEBUFFER, gl_attachment, tex_bind, attach->mip);
}
}
static void gpu_framebuffer_attachment_detach(GPUAttachment *UNUSED(attachment),
GPUAttachmentType attach_type)
{
GLenum gl_attachment = convert_attachment_type_to_gl(attach_type);
glFramebufferTexture(GL_FRAMEBUFFER, gl_attachment, 0, 0);
}
static void gpu_framebuffer_update_attachments(FrameBuffer *fb)
{
GLenum gl_attachments[GPU_FB_MAX_COLOR_ATTACHMENT];
int numslots = 0;
BLI_assert(GPU_context_active_get()->active_fb == fb);
/* Update attachments */
FOREACH_ATTACHMENT_RANGE(type, 0, GPU_FB_MAX_ATTACHEMENT)
{
if (type >= GPU_FB_COLOR_ATTACHMENT0) {
if (fb->attachments[type].tex) {
gl_attachments[numslots] = convert_attachment_type_to_gl(type);
}
else {
gl_attachments[numslots] = GL_NONE;
}
numslots++;
}
if (GPU_FB_ATTACHEMENT_IS_DIRTY(fb->dirty_flag, type) == false) {
continue;
}
if (fb->attachments[type].tex != NULL) {
gpu_framebuffer_attachment_attach(&fb->attachments[type], type);
fb->multisample = (GPU_texture_samples(fb->attachments[type].tex) > 0);
fb->width = GPU_texture_width(fb->attachments[type].tex);
fb->height = GPU_texture_height(fb->attachments[type].tex);
}
else {
gpu_framebuffer_attachment_detach(&fb->attachments[type], type);
}
}
fb->dirty_flag = 0;
/* Update draw buffers (color targets)
* This state is saved in the FBO */
if (numslots) {
glDrawBuffers(numslots, gl_attachments);
}
else {
glDrawBuffer(GL_NONE);
}
}
/**
* Hack to solve the problem of some bugged AMD GPUs (see `GPU_unused_fb_slot_workaround`).
* If there is an empty color slot between the color slots,
* all textures after this slot are apparently skipped/discarded.
*/
static void gpu_framebuffer_update_attachments_and_fill_empty_slots(FrameBuffer *fb)
{
GLenum gl_attachments[GPU_FB_MAX_COLOR_ATTACHMENT];
int dummy_tex = 0;
BLI_assert(GPU_context_active_get()->active_fb == fb);
/* Update attachments */
for (int i_type = GPU_FB_MAX_ATTACHEMENT - 1; i_type >= 0; --i_type) {
GPUAttachmentType type = static_cast<GPUAttachmentType>(i_type);
GPUTexture *tex = fb->attachments[type].tex;
if (type >= GPU_FB_COLOR_ATTACHMENT0) {
int slot = type - GPU_FB_COLOR_ATTACHMENT0;
if (tex != NULL || (dummy_tex != 0)) {
gl_attachments[slot] = convert_attachment_type_to_gl(type);
if (dummy_tex == 0) {
dummy_tex = GPU_texture_opengl_bindcode(tex);
}
}
else {
gl_attachments[slot] = GL_NONE;
}
}
else {
dummy_tex = 0;
}
if ((dummy_tex != 0) && tex == NULL) {
/* Fill empty slot */
glFramebufferTexture(GL_FRAMEBUFFER, convert_attachment_type_to_gl(type), dummy_tex, 0);
}
else if (GPU_FB_ATTACHEMENT_IS_DIRTY(fb->dirty_flag, type)) {
if (tex != NULL) {
gpu_framebuffer_attachment_attach(&fb->attachments[type], type);
fb->multisample = (GPU_texture_samples(tex) > 0);
fb->width = GPU_texture_width(tex);
fb->height = GPU_texture_height(tex);
}
else {
gpu_framebuffer_attachment_detach(&fb->attachments[type], type);
}
}
}
fb->dirty_flag = 0;
/* Update draw buffers (color targets)
* This state is saved in the FBO */
glDrawBuffers(GPU_FB_MAX_COLOR_ATTACHMENT, gl_attachments);
}
#define FRAMEBUFFER_STACK_DEPTH 16
static struct {
GPUFrameBuffer *framebuffers[FRAMEBUFFER_STACK_DEPTH];
uint top;
} FrameBufferStack = {{0}};
static void gpuPushFrameBuffer(GPUFrameBuffer *fb)
{
BLI_assert(FrameBufferStack.top < FRAMEBUFFER_STACK_DEPTH);
FrameBufferStack.framebuffers[FrameBufferStack.top] = fb;
FrameBufferStack.top++;
}
static GPUFrameBuffer *gpuPopFrameBuffer(void)
{
BLI_assert(FrameBufferStack.top > 0);
FrameBufferStack.top--;
return FrameBufferStack.framebuffers[FrameBufferStack.top];
}
#undef FRAMEBUFFER_STACK_DEPTH
void GPU_framebuffer_bind(GPUFrameBuffer *gpu_fb)
{
FrameBuffer *fb = reinterpret_cast<FrameBuffer *>(gpu_fb);
if (fb->object == 0) {
gpu_framebuffer_init(fb);
GPUAttachmentType type = GPU_texture_stencil(depth_attachement.tex) ?
GPU_FB_DEPTH_STENCIL_ATTACHMENT :
GPU_FB_DEPTH_ATTACHMENT;
fb->attachment_set(type, depth_attachement);
}
if (GPU_framebuffer_active_get() != gpu_fb) {
glBindFramebuffer(GL_FRAMEBUFFER, fb->object);
glEnable(GL_FRAMEBUFFER_SRGB);
GPUTexture *first_target = fb->attachments[GPU_FB_COLOR_ATTACHMENT0].tex;
const bool is_srgb_target = (first_target &&
(GPU_texture_format(first_target) == GPU_SRGB8_A8));
GPU_shader_set_framebuffer_srgb_target(is_srgb_target);
GPUAttachmentType type = GPU_FB_COLOR_ATTACHMENT0;
for (const GPUAttachment &attachement : color_attachments) {
fb->attachment_set(type, attachement);
++type;
}
GPU_context_active_get()->active_fb = fb;
if (fb->dirty_flag != 0) {
if (GPU_unused_fb_slot_workaround()) {
/* XXX: Please AMD, fix this. */
gpu_framebuffer_update_attachments_and_fill_empty_slots(fb);
}
else {
gpu_framebuffer_update_attachments(fb);
}
}
/* TODO manually check for errors? */
#if 0
char err_out[256];
if (!GPU_framebuffer_check_valid(fb, err_out)) {
printf("Invalid %s\n", err_out);
}
#endif
GPU_viewport(0, 0, fb->width, fb->height);
}
/* Workaround for binding a srgb framebuffer without doing the srgb transform. */
void GPU_framebuffer_bind_no_srgb(GPUFrameBuffer *gpu_fb)
{
FrameBuffer *fb = reinterpret_cast<FrameBuffer *>(gpu_fb);
GPU_framebuffer_bind(gpu_fb);
glDisable(GL_FRAMEBUFFER_SRGB);
GPUTexture *first_target = fb->attachments[GPU_FB_COLOR_ATTACHMENT0].tex;
const bool is_srgb_target = (first_target && (GPU_texture_format(first_target) == GPU_SRGB8_A8));
GPU_shader_set_framebuffer_srgb_target(!is_srgb_target);
}
void GPU_framebuffer_restore(void)
{
if (GPU_framebuffer_active_get() != NULL) {
glBindFramebuffer(GL_FRAMEBUFFER, GPU_framebuffer_default());
GPU_context_active_get()->active_fb = NULL;
glDisable(GL_FRAMEBUFFER_SRGB);
GPU_shader_set_framebuffer_srgb_target(false);
}
}
bool GPU_framebuffer_bound(GPUFrameBuffer *gpu_fb)
{
FrameBuffer *fb = reinterpret_cast<FrameBuffer *>(gpu_fb);
return (gpu_fb == GPU_framebuffer_active_get()) && (fb->object != 0);
}
bool GPU_framebuffer_check_valid(GPUFrameBuffer *gpu_fb, char err_out[256])
{
if (!GPU_framebuffer_bound(gpu_fb)) {
GPU_framebuffer_bind(gpu_fb);
}
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE) {
GPU_framebuffer_restore();
gpu_print_framebuffer_error(status, err_out);
return false;
}
return true;
}
/* ---------- Framebuffer Operations ----------- */
@@ -567,121 +362,31 @@ void GPU_framebuffer_clear(GPUFrameBuffer *gpu_fb,
float clear_depth,
uint clear_stencil)
{
CHECK_FRAMEBUFFER_IS_BOUND(gpu_fb);
/* Save and restore the state. */
eGPUWriteMask write_mask = GPU_write_mask_get();
uint stencil_mask = GPU_stencil_mask_get();
eGPUStencilTest stencil_test = GPU_stencil_test_get();
if (buffers & GPU_COLOR_BIT) {
GPU_color_mask(true, true, true, true);
glClearColor(clear_col[0], clear_col[1], clear_col[2], clear_col[3]);
}
if (buffers & GPU_DEPTH_BIT) {
GPU_depth_mask(true);
glClearDepth(clear_depth);
}
if (buffers & GPU_STENCIL_BIT) {
GPU_stencil_write_mask_set(0xFFu);
GPU_stencil_test(GPU_STENCIL_ALWAYS);
glClearStencil(clear_stencil);
}
GPU_context_active_get()->state_manager->apply_state();
GLbitfield mask = convert_buffer_bits_to_gl(buffers);
glClear(mask);
if (buffers & (GPU_COLOR_BIT | GPU_DEPTH_BIT)) {
GPU_write_mask(write_mask);
}
if (buffers & GPU_STENCIL_BIT) {
GPU_stencil_write_mask_set(stencil_mask);
GPU_stencil_test(stencil_test);
}
reinterpret_cast<FrameBuffer *>(gpu_fb)->clear(buffers, clear_col, clear_depth, clear_stencil);
}
/* Clear all textures bound to this framebuffer with a different color. */
/* Clear all textures attached to this framebuffer with a different color. */
void GPU_framebuffer_multi_clear(GPUFrameBuffer *gpu_fb, const float (*clear_cols)[4])
{
CHECK_FRAMEBUFFER_IS_BOUND(gpu_fb);
reinterpret_cast<FrameBuffer *>(gpu_fb)->clear_multi(clear_cols);
}
FrameBuffer *fb = reinterpret_cast<FrameBuffer *>(gpu_fb);
void GPU_clear_color(float red, float green, float blue, float alpha)
{
float clear_col[4] = {red, green, blue, alpha};
GPU_context_active_get()->active_fb->clear(GPU_COLOR_BIT, clear_col, 0.0f, 0x0);
}
/* Save and restore the state. */
eGPUWriteMask write_mask = GPU_write_mask_get();
GPU_color_mask(true, true, true, true);
int i_type = GPU_FB_COLOR_ATTACHMENT0;
for (int i = 0; i_type < GPU_FB_MAX_ATTACHEMENT; i++, i_type++) {
GPUAttachmentType type = static_cast<GPUAttachmentType>(i_type);
if (fb->attachments[type].tex != NULL) {
glClearBufferfv(GL_COLOR, i, clear_cols[i]);
}
}
GPU_write_mask(write_mask);
void GPU_clear_depth(float depth)
{
float clear_col[4] = {0};
GPU_context_active_get()->active_fb->clear(GPU_DEPTH_BIT, clear_col, depth, 0x0);
}
void GPU_framebuffer_read_depth(GPUFrameBuffer *gpu_fb, int x, int y, int w, int h, float *data)
{
CHECK_FRAMEBUFFER_IS_BOUND(gpu_fb);
GLenum type = GL_DEPTH_COMPONENT;
glReadBuffer(GL_COLOR_ATTACHMENT0); /* This is OK! */
glReadPixels(x, y, w, h, type, GL_FLOAT, data);
}
static GLenum gpu_get_gl_datatype(eGPUDataFormat format)
{
switch (format) {
case GPU_DATA_FLOAT:
return GL_FLOAT;
case GPU_DATA_INT:
return GL_INT;
case GPU_DATA_UNSIGNED_INT:
return GL_UNSIGNED_INT;
case GPU_DATA_UNSIGNED_BYTE:
return GL_UNSIGNED_BYTE;
case GPU_DATA_UNSIGNED_INT_24_8:
return GL_UNSIGNED_INT_24_8;
case GPU_DATA_10_11_11_REV:
return GL_UNSIGNED_INT_10F_11F_11F_REV;
default:
BLI_assert(!"Unhandled data format");
return GL_FLOAT;
}
}
static GLenum gpu_get_gl_channel_type(int channels)
{
switch (channels) {
case 1:
return GL_RED;
case 2:
return GL_RG;
case 3:
return GL_RGB;
case 4:
return GL_RGBA;
default:
BLI_assert(!"Wrong number of read channels");
return GL_RED;
}
}
static void gpu_framebuffer_read_color_ex(
int x, int y, int w, int h, int channels, GLenum readfb, eGPUDataFormat format, float *data)
{
GLenum type = gpu_get_gl_channel_type(channels);
GLenum gl_format = gpu_get_gl_datatype(format);
/* TODO: needed for selection buffers to work properly, this should be handled better. */
if (type == GL_RED && gl_format == GL_UNSIGNED_INT) {
type = GL_RED_INTEGER;
}
glReadBuffer(readfb);
glReadPixels(x, y, w, h, type, gl_format, data);
int rect[4] = {x, y, w, h};
reinterpret_cast<FrameBuffer *>(gpu_fb)->read(GPU_DEPTH_BIT, GPU_DATA_FLOAT, rect, 1, 1, data);
}
void GPU_framebuffer_read_color(GPUFrameBuffer *gpu_fb,
@@ -694,12 +399,20 @@ void GPU_framebuffer_read_color(GPUFrameBuffer *gpu_fb,
eGPUDataFormat format,
void *data)
{
CHECK_FRAMEBUFFER_IS_BOUND(gpu_fb);
gpu_framebuffer_read_color_ex(
x, y, w, h, channels, GL_COLOR_ATTACHMENT0 + slot, format, (float *)data);
int rect[4] = {x, y, w, h};
reinterpret_cast<FrameBuffer *>(gpu_fb)->read(GPU_COLOR_BIT, format, rect, channels, slot, data);
}
/* TODO(fclem) rename to read_color. */
void GPU_frontbuffer_read_pixels(
int x, int y, int w, int h, int channels, eGPUDataFormat format, void *data)
{
int rect[4] = {x, y, w, h};
GPU_context_active_get()->front_left->read(GPU_COLOR_BIT, format, rect, channels, 0, data);
}
/* read_slot and write_slot are only used for color buffers. */
/* TODO(fclem) port as texture operation. */
void GPU_framebuffer_blit(GPUFrameBuffer *gpufb_read,
int read_slot,
GPUFrameBuffer *gpufb_write,
@@ -712,74 +425,36 @@ void GPU_framebuffer_blit(GPUFrameBuffer *gpufb_read,
FrameBuffer *prev_fb = GPU_context_active_get()->active_fb;
/* Framebuffers must be up to date. This simplify this function. */
if (fb_read->dirty_flag != 0 || fb_read->object == 0) {
GPU_framebuffer_bind(gpufb_read);
#ifndef NDEBUG
GPUTexture *read_tex, *write_tex;
if (blit_buffers & (GPU_DEPTH_BIT | GPU_STENCIL_BIT)) {
read_tex = fb_read->depth_tex();
write_tex = fb_write->depth_tex();
}
if (fb_write->dirty_flag != 0 || fb_write->object == 0) {
GPU_framebuffer_bind(gpufb_write);
else {
read_tex = fb_read->color_tex(read_slot);
write_tex = fb_write->color_tex(write_slot);
}
const bool do_color = (blit_buffers & GPU_COLOR_BIT);
const bool do_depth = (blit_buffers & GPU_DEPTH_BIT);
const bool do_stencil = (blit_buffers & GPU_STENCIL_BIT);
GPUTexture *read_tex = ((do_depth || do_stencil) ? fb_read->depth_tex() :
fb_read->color_tex(read_slot));
GPUTexture *write_tex = ((do_depth || do_stencil) ? fb_write->depth_tex() :
fb_write->color_tex(read_slot));
if (do_depth) {
if (blit_buffers & GPU_DEPTH_BIT) {
BLI_assert(GPU_texture_depth(read_tex) && GPU_texture_depth(write_tex));
BLI_assert(GPU_texture_format(read_tex) == GPU_texture_format(write_tex));
}
if (do_stencil) {
if (blit_buffers & GPU_STENCIL_BIT) {
BLI_assert(GPU_texture_stencil(read_tex) && GPU_texture_stencil(write_tex));
BLI_assert(GPU_texture_format(read_tex) == GPU_texture_format(write_tex));
}
if (GPU_texture_samples(write_tex) != 0 || GPU_texture_samples(read_tex) != 0) {
/* Can only blit multisample textures to another texture of the same size. */
BLI_assert((fb_read->width == fb_write->width) && (fb_read->height == fb_write->height));
BLI_assert((GPU_texture_width(write_tex) == GPU_texture_width(read_tex)) &&
(GPU_texture_height(write_tex) == GPU_texture_height(read_tex)));
}
#endif
glBindFramebuffer(GL_READ_FRAMEBUFFER, fb_read->object);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fb_write->object);
fb_read->blit_to(blit_buffers, read_slot, fb_write, write_slot, 0, 0);
if (do_color) {
glReadBuffer(GL_COLOR_ATTACHMENT0 + read_slot);
glDrawBuffer(GL_COLOR_ATTACHMENT0 + write_slot);
/* XXX we messed with the glDrawBuffer, this will reset the
* glDrawBuffers the next time we bind fb_write. */
fb_write->dirty_flag = GPU_FB_DIRTY_DRAWBUFFER;
}
GLbitfield mask = convert_buffer_bits_to_gl(blit_buffers);
GPU_context_active_get()->state_manager->apply_state();
glBlitFramebuffer(0,
0,
fb_read->width,
fb_read->height,
0,
0,
fb_write->width,
fb_write->height,
mask,
GL_NEAREST);
/* Restore previous framebuffer */
if (fb_write == prev_fb) {
GPU_framebuffer_bind(gpufb_write); /* To update drawbuffers */
}
else if (prev_fb) {
glBindFramebuffer(GL_FRAMEBUFFER, prev_fb->object);
GPU_context_active_get()->active_fb = prev_fb;
}
else {
glBindFramebuffer(GL_FRAMEBUFFER, GPU_framebuffer_default());
GPU_context_active_get()->active_fb = NULL;
}
/* FIXME(fclem) sRGB is not saved. */
prev_fb->bind(true);
}
/**
@@ -792,77 +467,40 @@ void GPU_framebuffer_recursive_downsample(GPUFrameBuffer *gpu_fb,
void (*callback)(void *userData, int level),
void *userData)
{
GPUContext *ctx = GPU_context_active_get();
FrameBuffer *fb = reinterpret_cast<FrameBuffer *>(gpu_fb);
/* Framebuffer must be up to date and bound. This simplify this function. */
if (ctx->active_fb != fb || fb->dirty_flag != 0 || fb->object == 0) {
GPU_framebuffer_bind(gpu_fb);
}
/* HACK: We make the framebuffer appear not bound in order to
* not trigger any error in GPU_texture_bind(). */
FrameBuffer *prev_fb = ctx->active_fb;
ctx->active_fb = NULL;
int levels = floor(log2(max_ii(fb->width, fb->height)));
max_lvl = min_ii(max_lvl, levels);
int i;
int current_dim[2] = {fb->width, fb->height};
for (i = 1; i < max_lvl + 1; i++) {
/* calculate next viewport size */
current_dim[0] = max_ii(current_dim[0] / 2, 1);
current_dim[1] = max_ii(current_dim[1] / 2, 1);
for (int i_type = 0; i_type < GPU_FB_MAX_ATTACHEMENT; i_type++) {
GPUAttachmentType type = static_cast<GPUAttachmentType>(i_type);
if (fb->attachments[type].tex != NULL) {
/* Some Intel HDXXX have issue with rendering to a mipmap that is below
* the texture GL_TEXTURE_MAX_LEVEL. So even if it not correct, in this case
* we allow GL_TEXTURE_MAX_LEVEL to be one level lower. In practice it does work! */
int next_lvl = (GPU_mip_render_workaround()) ? i : i - 1;
/* bind next level for rendering but first restrict fetches only to previous level */
GPUTexture *tex = fb->attachments[type].tex;
GPU_texture_bind(tex, 0);
glTexParameteri(GPU_texture_target(tex), GL_TEXTURE_BASE_LEVEL, i - 1);
glTexParameteri(GPU_texture_target(tex), GL_TEXTURE_MAX_LEVEL, next_lvl);
GPU_texture_unbind(tex);
/* copy attachment and replace miplevel. */
GPUAttachment attachment = fb->attachments[type];
attachment.mip = i;
gpu_framebuffer_attachment_attach(&attachment, type);
}
}
BLI_assert(GL_FRAMEBUFFER_COMPLETE == glCheckFramebufferStatus(GL_FRAMEBUFFER));
GPU_viewport(0, 0, current_dim[0], current_dim[1]);
callback(userData, i);
if (current_dim[0] == 1 && current_dim[1] == 1) {
break;
}
}
for (int i_type = 0; i_type < GPU_FB_MAX_ATTACHEMENT; i_type++) {
GPUAttachmentType type = static_cast<GPUAttachmentType>(i_type);
if (fb->attachments[type].tex != NULL) {
/* reset mipmap level range */
GPUTexture *tex = fb->attachments[type].tex;
GPU_texture_bind(tex, 0);
glTexParameteri(GPU_texture_target(tex), GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(GPU_texture_target(tex), GL_TEXTURE_MAX_LEVEL, i - 1);
GPU_texture_unbind(tex);
/* Reattach original level */
/* NOTE: This is not necessary but this makes the FBO config
* remain in sync with the GPUFrameBuffer config. */
gpu_framebuffer_attachment_attach(&fb->attachments[type], type);
}
}
ctx->active_fb = prev_fb;
reinterpret_cast<FrameBuffer *>(gpu_fb)->recursive_downsample(max_lvl, callback, userData);
}
/* GPUOffScreen */
/** \} */
/* -------------------------------------------------------------------- */
/** \name GPUOffScreen
*
* Container that holds a framebuffer and its textures.
* Might be bound to multiple contexts.
* \{ */
#define FRAMEBUFFER_STACK_DEPTH 16
static struct {
GPUFrameBuffer *framebuffers[FRAMEBUFFER_STACK_DEPTH];
uint top;
} FrameBufferStack = {{0}};
static void gpuPushFrameBuffer(GPUFrameBuffer *fb)
{
BLI_assert(FrameBufferStack.top < FRAMEBUFFER_STACK_DEPTH);
FrameBufferStack.framebuffers[FrameBufferStack.top] = fb;
FrameBufferStack.top++;
}
static GPUFrameBuffer *gpuPopFrameBuffer(void)
{
BLI_assert(FrameBufferStack.top > 0);
FrameBufferStack.top--;
return FrameBufferStack.framebuffers[FrameBufferStack.top];
}
#undef FRAMEBUFFER_STACK_DEPTH
#define MAX_CTX_FB_LEN 3
@@ -1013,35 +651,20 @@ void GPU_offscreen_unbind(GPUOffScreen *ofs, bool restore)
void GPU_offscreen_draw_to_screen(GPUOffScreen *ofs, int x, int y)
{
const int w = GPU_texture_width(ofs->color);
const int h = GPU_texture_height(ofs->color);
GPU_context_active_get()->state_manager->apply_state();
GPUContext *ctx = GPU_context_active_get();
FrameBuffer *ofs_fb = reinterpret_cast<FrameBuffer *>(gpu_offscreen_fb_get(ofs));
glBindFramebuffer(GL_READ_FRAMEBUFFER, ofs_fb->object);
GLenum status = glCheckFramebufferStatus(GL_READ_FRAMEBUFFER);
if (status == GL_FRAMEBUFFER_COMPLETE) {
glBlitFramebuffer(0, 0, w, h, x, y, x + w, y + h, GL_COLOR_BUFFER_BIT, GL_NEAREST);
}
else {
gpu_print_framebuffer_error(status, NULL);
}
glBindFramebuffer(GL_READ_FRAMEBUFFER, GPU_framebuffer_default());
ofs_fb->blit_to(GPU_COLOR_BIT, 0, ctx->active_fb, 0, x, y);
}
void GPU_offscreen_read_pixels(GPUOffScreen *ofs, eGPUDataFormat type, void *pixels)
void GPU_offscreen_read_pixels(GPUOffScreen *ofs, eGPUDataFormat format, void *pixels)
{
BLI_assert(ELEM(format, GPU_DATA_UNSIGNED_BYTE, GPU_DATA_FLOAT));
const int w = GPU_texture_width(ofs->color);
const int h = GPU_texture_height(ofs->color);
BLI_assert(ELEM(type, GPU_DATA_UNSIGNED_BYTE, GPU_DATA_FLOAT));
GLenum gl_type = (type == GPU_DATA_FLOAT) ? GL_FLOAT : GL_UNSIGNED_BYTE;
glReadPixels(0, 0, w, h, GL_RGBA, gl_type, pixels);
GPUFrameBuffer *ofs_fb = gpu_offscreen_fb_get(ofs);
GPU_framebuffer_read_color(ofs_fb, 0, 0, w, h, 4, 0, format, pixels);
}
int GPU_offscreen_width(const GPUOffScreen *ofs)
@@ -1070,42 +693,4 @@ void GPU_offscreen_viewport_data_get(GPUOffScreen *ofs,
*r_depth = ofs->depth;
}
void GPU_clear_color(float red, float green, float blue, float alpha)
{
BLI_assert((GPU_write_mask_get() & GPU_WRITE_COLOR) != 0);
GPU_context_active_get()->state_manager->apply_state();
glClearColor(red, green, blue, alpha);
glClear(GL_COLOR_BUFFER_BIT);
}
void GPU_clear_depth(float depth)
{
BLI_assert((GPU_write_mask_get() & GPU_WRITE_DEPTH) != 0);
GPU_context_active_get()->state_manager->apply_state();
glClearDepth(depth);
glClear(GL_DEPTH_BUFFER_BIT);
}
void GPU_frontbuffer_read_pixels(
int x, int y, int w, int h, int channels, eGPUDataFormat format, void *data)
{
gpu_framebuffer_read_color_ex(x, y, w, h, channels, GL_FRONT, format, (float *)data);
}
/* For stereo rendering. */
void GPU_backbuffer_bind(eGPUBackBuffer buffer)
{
if (buffer == GPU_BACKBUFFER) {
glDrawBuffer(GL_BACK);
}
else if (buffer == GPU_BACKBUFFER_LEFT) {
glDrawBuffer(GL_BACK_LEFT);
}
else if (buffer == GPU_BACKBUFFER_RIGHT) {
glDrawBuffer(GL_BACK_RIGHT);
}
}
/** \} */

View File

@@ -19,19 +19,26 @@
/** \file
* \ingroup gpu
*
* GPU Framebuffer
* - this is a wrapper for an OpenGL framebuffer object (FBO). in practice
* multiple FBO's may be created.
* - actual FBO creation & config is deferred until GPU_framebuffer_bind or
* GPU_framebuffer_check_valid to allow creation & config while another
* opengl context is bound (since FBOs are not shared between ogl contexts).
*/
#pragma once
#include "BLI_span.hh"
#include "MEM_guardedalloc.h"
#include "GPU_framebuffer.h"
#include "glew-mx.h" /* For GLuint. To remove. */
struct GPUTexture;
typedef enum {
typedef enum GPUAttachmentType : int {
GPU_FB_DEPTH_ATTACHMENT = 0,
GPU_FB_DEPTH_STENCIL_ATTACHMENT,
GPU_FB_COLOR_ATTACHMENT0,
@@ -45,50 +52,99 @@ typedef enum {
/* Keep in mind that GL max is GL_MAX_DRAW_BUFFERS and is at least 8, corresponding to
* the maximum number of COLOR attachments specified by glDrawBuffers. */
GPU_FB_MAX_ATTACHEMENT,
GPU_FB_MAX_COLOR_ATTACHMENT = (GPU_FB_MAX_ATTACHEMENT - GPU_FB_COLOR_ATTACHMENT0),
} GPUAttachmentType;
inline constexpr GPUAttachmentType operator-(GPUAttachmentType a, int b)
{
return static_cast<GPUAttachmentType>(static_cast<int>(a) - b);
}
inline constexpr GPUAttachmentType operator+(GPUAttachmentType a, int b)
{
return static_cast<GPUAttachmentType>(static_cast<int>(a) + b);
}
inline GPUAttachmentType &operator++(GPUAttachmentType &a)
{
a = a + 1;
return a;
}
inline GPUAttachmentType &operator--(GPUAttachmentType &a)
{
a = a - 1;
return a;
}
namespace blender {
namespace gpu {
#define FOREACH_ATTACHMENT_RANGE(att, _start, _end) \
for (GPUAttachmentType att = static_cast<GPUAttachmentType>(_start); att < _end; \
att = static_cast<GPUAttachmentType>(att + 1))
#define GPU_FB_MAX_COLOR_ATTACHMENT (GPU_FB_MAX_ATTACHEMENT - GPU_FB_COLOR_ATTACHMENT0)
#define GPU_FB_DIRTY_DRAWBUFFER (1 << 15)
#define GPU_FB_ATTACHEMENT_IS_DIRTY(flag, type) ((flag & (1 << type)) != 0)
#define GPU_FB_ATTACHEMENT_SET_DIRTY(flag, type) (flag |= (1 << type))
#ifdef DEBUG
# define DEBUG_NAME_LEN 64
#else
# define DEBUG_NAME_LEN 16
#endif
class FrameBuffer {
public:
GPUContext *ctx;
GLuint object;
GPUAttachment attachments[GPU_FB_MAX_ATTACHEMENT];
uint16_t dirty_flag;
int width, height;
bool multisample;
/* TODO Check that we always use the right context when binding
* (FBOs are not shared across ogl contexts). */
// void *ctx;
protected:
/** Set of texture attachements to render to. DEPTH and DEPTH_STENCIL are mutualy exclusive. */
GPUAttachment attachments_[GPU_FB_MAX_ATTACHEMENT];
/** Is true if internal representation need to be updated. */
bool dirty_attachments_;
/** Size of attachement textures. */
int width_, height_;
/** Debug name. */
char name_[DEBUG_NAME_LEN];
public:
GPUTexture *depth_tex(void) const
FrameBuffer(const char *name);
virtual ~FrameBuffer();
virtual void bind(bool enabled_srgb) = 0;
virtual bool check(char err_out[256]) = 0;
virtual void clear(eGPUFrameBufferBits buffers,
const float clear_col[4],
float clear_depth,
uint clear_stencil) = 0;
virtual void clear_multi(const float (*clear_col)[4]) = 0;
virtual void read(eGPUFrameBufferBits planes,
eGPUDataFormat format,
const int area[4],
int channel_len,
int slot,
void *r_data) = 0;
virtual void blit_to(eGPUFrameBufferBits planes,
int src_slot,
FrameBuffer *dst,
int dst_slot,
int dst_offset_x,
int dst_offset_y) = 0;
void attachment_set(GPUAttachmentType type, const GPUAttachment &new_attachment);
void recursive_downsample(int max_lvl,
void (*callback)(void *userData, int level),
void *userData);
inline GPUTexture *depth_tex(void) const
{
if (attachments[GPU_FB_DEPTH_ATTACHMENT].tex) {
return attachments[GPU_FB_DEPTH_ATTACHMENT].tex;
if (attachments_[GPU_FB_DEPTH_ATTACHMENT].tex) {
return attachments_[GPU_FB_DEPTH_ATTACHMENT].tex;
}
return attachments[GPU_FB_DEPTH_STENCIL_ATTACHMENT].tex;
return attachments_[GPU_FB_DEPTH_STENCIL_ATTACHMENT].tex;
};
GPUTexture *color_tex(int slot) const
inline GPUTexture *color_tex(int slot) const
{
return attachments[GPU_FB_COLOR_ATTACHMENT0 + slot].tex;
return attachments_[GPU_FB_COLOR_ATTACHMENT0 + slot].tex;
};
MEM_CXX_CLASS_ALLOC_FUNCS("FrameBuffer")
};
#undef DEBUG_NAME_LEN
} // namespace gpu
} // namespace blender

View File

@@ -44,6 +44,7 @@
#include "GPU_texture.h"
#include "gpu_context_private.hh"
#include "gpu_framebuffer_private.hh"
#define WARN_NOT_BOUND(_tex) \
{ \
@@ -109,6 +110,9 @@ struct GPUTexture {
GPUContext *copy_fb_ctx;
};
using namespace blender;
using namespace blender::gpu;
static uint gpu_get_bytesize(eGPUTextureFormat data_type);
static void gpu_texture_framebuffer_ensure(GPUTexture *tex);
@@ -2020,7 +2024,8 @@ void GPU_texture_free(GPUTexture *tex)
if (tex->refcount == 0) {
for (int i = 0; i < GPU_TEX_MAX_FBO_ATTACHED; i++) {
if (tex->fb[i] != NULL) {
GPU_framebuffer_texture_detach_slot(tex->fb[i], tex, tex->fb_attachment[i]);
FrameBuffer *framebuffer = reinterpret_cast<FrameBuffer *>(tex->fb[i]);
framebuffer->attachment_set((GPUAttachmentType)tex->fb_attachment[i], GPU_ATTACHMENT_NONE);
}
}
@@ -2132,17 +2137,26 @@ void GPU_texture_attach_framebuffer(GPUTexture *tex, GPUFrameBuffer *fb, int att
}
/* Return previous attachment point */
int GPU_texture_detach_framebuffer(GPUTexture *tex, GPUFrameBuffer *fb)
void GPU_texture_detach_framebuffer(GPUTexture *tex, GPUFrameBuffer *fb)
{
for (int i = 0; i < GPU_TEX_MAX_FBO_ATTACHED; i++) {
if (tex->fb[i] == fb) {
tex->fb[i] = NULL;
return;
}
}
BLI_assert(!"Error: Texture: Framebuffer is not attached");
}
/* Return attachment type for the given framebuffer or -1 if not attached. */
int GPU_texture_framebuffer_attachement_get(GPUTexture *tex, GPUFrameBuffer *fb)
{
for (int i = 0; i < GPU_TEX_MAX_FBO_ATTACHED; i++) {
if (tex->fb[i] == fb) {
return tex->fb_attachment[i];
}
}
BLI_assert(!"Error: Texture: Framebuffer is not attached");
return 0;
return -1;
}
void GPU_texture_get_mipmap_size(GPUTexture *tex, int lvl, int *size)

View File

@@ -0,0 +1,53 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2020 Blender Foundation.
* All rights reserved.
*/
/** \file
* \ingroup gpu
*/
#pragma once
#include "BLI_assert.h"
namespace blender {
namespace gpu {
class Texture {
public:
/** TODO(fclem): make it a non-static function. */
static GPUAttachmentType attachment_type(GPUTexture *tex, int slot)
{
switch (GPU_texture_format(tex)) {
case GPU_DEPTH_COMPONENT32F:
case GPU_DEPTH_COMPONENT24:
case GPU_DEPTH_COMPONENT16:
BLI_assert(slot == 0);
return GPU_FB_DEPTH_ATTACHMENT;
case GPU_DEPTH24_STENCIL8:
case GPU_DEPTH32F_STENCIL8:
BLI_assert(slot == 0);
return GPU_FB_DEPTH_STENCIL_ATTACHMENT;
default:
return static_cast<GPUAttachmentType>(GPU_FB_COLOR_ATTACHMENT0 + slot);
}
}
};
} // namespace gpu
} // namespace blender

View File

@@ -30,6 +30,7 @@
#include "gl_batch.hh"
#include "gl_context.hh"
#include "gl_drawlist.hh"
#include "gl_framebuffer.hh"
#include "gl_shader.hh"
#include "gl_uniform_buffer.hh"
@@ -61,6 +62,11 @@ class GLBackend : public GPUBackend {
return new GLDrawList(list_length);
};
FrameBuffer *framebuffer_alloc(const char *name)
{
return new GLFrameBuffer(name);
};
Shader *shader_alloc(const char *name)
{
return new GLShader(name);

View File

@@ -59,6 +59,35 @@ GLContext::GLContext(void *ghost_window, GLSharedOrphanLists &shared_orphan_list
glBindBuffer(GL_ARRAY_BUFFER, 0);
state_manager = new GLStateManager();
if (ghost_window) {
GLuint default_fbo = GHOST_GetDefaultOpenGLFramebuffer((GHOST_WindowHandle)ghost_window);
GHOST_RectangleHandle bounds = GHOST_GetClientBounds((GHOST_WindowHandle)ghost_window);
int w = GHOST_GetWidthRectangle(bounds);
int h = GHOST_GetHeightRectangle(bounds);
GHOST_DisposeRectangle(bounds);
if (default_fbo != 0) {
front_left = new GLFrameBuffer("front_left", this, GL_COLOR_ATTACHMENT0, default_fbo, w, h);
back_left = new GLFrameBuffer("back_left", this, GL_COLOR_ATTACHMENT0, default_fbo, w, h);
}
else {
front_left = new GLFrameBuffer("front_left", this, GL_FRONT_LEFT, 0, w, h);
back_left = new GLFrameBuffer("back_left", this, GL_BACK_LEFT, 0, w, h);
}
/* TODO(fclem) enable is supported. */
const bool supports_stereo_quad_buffer = false;
if (supports_stereo_quad_buffer) {
front_right = new GLFrameBuffer("front_right", this, GL_FRONT_RIGHT, 0, w, h);
back_right = new GLFrameBuffer("back_right", this, GL_BACK_RIGHT, 0, w, h);
}
}
else {
/* For offscreen contexts. Default framebuffer is NULL. */
back_left = new GLFrameBuffer("back_left", this, GL_NONE, 0, 0, 0);
}
active_fb = back_left;
}
GLContext::~GLContext()
@@ -73,6 +102,19 @@ GLContext::~GLContext()
}
glDeleteVertexArrays(1, &default_vao_);
glDeleteBuffers(1, &default_attr_vbo_);
if (front_left) {
delete front_left;
}
if (back_left) {
delete back_left;
}
if (front_right) {
delete front_right;
}
if (back_right) {
delete back_right;
}
}
/** \} */

View File

@@ -0,0 +1,420 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2020 Blender Foundation.
* All rights reserved.
*/
/** \file
* \ingroup gpu
*/
#include "BKE_global.h"
#include "GPU_extensions.h"
#include "gl_backend.hh"
#include "gl_framebuffer.hh"
#include "gl_texture.hh"
namespace blender::gpu {
/* -------------------------------------------------------------------- */
/** \name Creation & Deletion
* \{ */
GLFrameBuffer::GLFrameBuffer(const char *name) : FrameBuffer(name)
{
/* Just-In-Time init. See GLFrameBuffer::init(). */
immutable_ = false;
fbo_id_ = 0;
}
GLFrameBuffer::GLFrameBuffer(
const char *name, GLContext *ctx, GLenum target, GLuint fbo, int w, int h)
: FrameBuffer(name)
{
context_ = ctx;
immutable_ = true;
fbo_id_ = fbo;
gl_attachments_[0] = target;
/* Never update an internal framebuffer. */
dirty_attachments_ = false;
width_ = w;
height_ = h;
srgb_ = false;
#ifndef __APPLE__
if (fbo_id_ && (G.debug & G_DEBUG_GPU) && (GLEW_VERSION_4_3 || GLEW_KHR_debug)) {
char sh_name[32];
SNPRINTF(sh_name, "FrameBuffer-%s", name);
glObjectLabel(GL_FRAMEBUFFER, fbo_id_, -1, sh_name);
}
#endif
}
GLFrameBuffer::~GLFrameBuffer()
{
if (context_ != NULL) {
context_->fbo_free(fbo_id_);
/* Restore default framebuffer if this framebuffer was bound. */
if (context_->active_fb == this && context_->back_left != this) {
/* If this assert triggers it means the framebuffer is being freed while in use by another
* context which, by the way, is TOTALLY UNSAFE!!! */
BLI_assert(context_ == GPU_context_active_get());
GPU_framebuffer_restore();
}
}
}
void GLFrameBuffer::init(void)
{
context_ = static_cast<GLContext *>(GPU_context_active_get());
glGenFramebuffers(1, &fbo_id_);
#ifndef __APPLE__
if ((G.debug & G_DEBUG_GPU) && (GLEW_VERSION_4_3 || GLEW_KHR_debug)) {
char sh_name[64];
SNPRINTF(sh_name, "FrameBuffer-%s", name_);
/* Binding before setting the label is needed on some drivers. */
glBindFramebuffer(GL_FRAMEBUFFER, fbo_id_);
glObjectLabel(GL_FRAMEBUFFER, fbo_id_, -1, sh_name);
}
#endif
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Config
* \{ */
/* This is a rather slow operation. Don't check in normal cases. */
bool GLFrameBuffer::check(char err_out[256])
{
this->bind(true);
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
#define FORMAT_STATUS(X) \
case X: { \
err = #X; \
break; \
}
const char *err;
switch (status) {
FORMAT_STATUS(GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT);
FORMAT_STATUS(GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT);
FORMAT_STATUS(GL_FRAMEBUFFER_UNSUPPORTED);
FORMAT_STATUS(GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER);
FORMAT_STATUS(GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER);
FORMAT_STATUS(GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE);
FORMAT_STATUS(GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS);
FORMAT_STATUS(GL_FRAMEBUFFER_UNDEFINED);
case GL_FRAMEBUFFER_COMPLETE:
return true;
default:
err = "unknown";
break;
}
#undef FORMAT_STATUS
const char *format = "GPUFrameBuffer: framebuffer status %s\n";
if (err_out) {
BLI_snprintf(err_out, 256, format, err);
}
else {
fprintf(stderr, format, err);
}
return false;
}
void GLFrameBuffer::update_attachments(void)
{
/* Default framebuffers cannot have attachements. */
BLI_assert(immutable_ == false);
/* First color texture OR the depth texture if no color is attached.
* Used to determine framebuffer colorspace and dimensions. */
GPUAttachmentType first_attachment = GPU_FB_MAX_ATTACHEMENT;
/* NOTE: Inverse iteration to get the first color texture. */
for (GPUAttachmentType type = GPU_FB_MAX_ATTACHEMENT - 1; type >= 0; --type) {
GPUAttachment &attach = attachments_[type];
GLenum gl_attachment = to_gl(type);
if (type >= GPU_FB_COLOR_ATTACHMENT0) {
gl_attachments_[type - GPU_FB_COLOR_ATTACHMENT0] = (attach.tex) ? gl_attachment : GL_NONE;
first_attachment = (attach.tex) ? type : first_attachment;
}
else if (first_attachment == GPU_FB_MAX_ATTACHEMENT) {
/* Only use depth texture to get infos if there is no color attachment. */
first_attachment = (attach.tex) ? type : first_attachment;
}
if (attach.tex == NULL) {
glFramebufferTexture(GL_FRAMEBUFFER, gl_attachment, 0, 0);
continue;
}
GLuint gl_tex = GPU_texture_opengl_bindcode(attach.tex);
if (attach.layer > -1 && GPU_texture_cube(attach.tex) && !GPU_texture_array(attach.tex)) {
/* Could be avoided if ARB_direct_state_access is required. In this case
* glFramebufferTextureLayer would bind the correct face. */
GLenum gl_target = GL_TEXTURE_CUBE_MAP_POSITIVE_X + attach.layer;
glFramebufferTexture2D(GL_FRAMEBUFFER, gl_attachment, gl_target, gl_tex, attach.mip);
}
else if (attach.layer > -1) {
glFramebufferTextureLayer(GL_FRAMEBUFFER, gl_attachment, gl_tex, attach.mip, attach.layer);
}
else {
/* The whole texture level is attached. The framebuffer is potentially layered. */
glFramebufferTexture(GL_FRAMEBUFFER, gl_attachment, gl_tex, attach.mip);
}
/* We found one depth buffer type. Stop here, otherwise we would
* override it by setting GPU_FB_DEPTH_ATTACHMENT */
if (type == GPU_FB_DEPTH_STENCIL_ATTACHMENT) {
break;
}
}
if (GPU_unused_fb_slot_workaround()) {
/* Fill normally un-occupied slots to avoid rendering artifacts on some hardware. */
GLuint gl_tex = 0;
/* NOTE: Inverse iteration to get the first color texture. */
for (int i = ARRAY_SIZE(gl_attachments_) - 1; i >= 0; --i) {
GPUAttachmentType type = GPU_FB_COLOR_ATTACHMENT0 + i;
GPUAttachment &attach = attachments_[type];
if (attach.tex != NULL) {
gl_tex = GPU_texture_opengl_bindcode(attach.tex);
}
else if (gl_tex != 0) {
GLenum gl_attachment = to_gl(type);
gl_attachments_[i] = gl_attachment;
glFramebufferTexture(GL_FRAMEBUFFER, gl_attachment, gl_tex, 0);
}
}
}
if (first_attachment != GPU_FB_MAX_ATTACHEMENT) {
GPUAttachment &attach = attachments_[first_attachment];
int size[3];
GPU_texture_get_mipmap_size(attach.tex, attach.mip, size);
width_ = size[0];
height_ = size[1];
srgb_ = (GPU_texture_format(attach.tex) == GPU_SRGB8_A8);
}
dirty_attachments_ = false;
glDrawBuffers(ARRAY_SIZE(gl_attachments_), gl_attachments_);
if (G.debug & G_DEBUG_GPU) {
BLI_assert(this->check(NULL));
}
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Binding
* \{ */
void GLFrameBuffer::bind(bool enabled_srgb)
{
GPUContext *ctx = GPU_context_active_get();
BLI_assert(ctx);
if (context_ != NULL && context_ != ctx) {
BLI_assert(!"Trying to use the same framebuffer in multiple context");
}
if (!immutable_ && fbo_id_ == 0) {
this->init();
}
if (ctx->active_fb != this) {
glBindFramebuffer(GL_FRAMEBUFFER, fbo_id_);
/* Internal framebuffers have only one color output and needs to be set everytime. */
if (immutable_ && fbo_id_ == 0) {
glDrawBuffer(gl_attachments_[0]);
}
}
if (dirty_attachments_) {
this->update_attachments();
}
if (ctx->active_fb != this) {
ctx->active_fb = this;
if (enabled_srgb) {
glEnable(GL_FRAMEBUFFER_SRGB);
}
else {
glDisable(GL_FRAMEBUFFER_SRGB);
}
GPU_shader_set_framebuffer_srgb_target(enabled_srgb && srgb_);
}
GPU_viewport(0, 0, width_, height_);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Operations.
* \{ */
void GLFrameBuffer::clear(eGPUFrameBufferBits buffers,
const float clear_col[4],
float clear_depth,
uint clear_stencil)
{
/* Save and restore the state. */
eGPUWriteMask write_mask = GPU_write_mask_get();
uint stencil_mask = GPU_stencil_mask_get();
eGPUStencilTest stencil_test = GPU_stencil_test_get();
if (buffers & GPU_COLOR_BIT) {
GPU_color_mask(true, true, true, true);
glClearColor(clear_col[0], clear_col[1], clear_col[2], clear_col[3]);
}
if (buffers & GPU_DEPTH_BIT) {
GPU_depth_mask(true);
glClearDepth(clear_depth);
}
if (buffers & GPU_STENCIL_BIT) {
GPU_stencil_write_mask_set(0xFFu);
GPU_stencil_test(GPU_STENCIL_ALWAYS);
glClearStencil(clear_stencil);
}
context_->state_manager->apply_state();
GLbitfield mask = to_gl(buffers);
glClear(mask);
if (buffers & (GPU_COLOR_BIT | GPU_DEPTH_BIT)) {
GPU_write_mask(write_mask);
}
if (buffers & GPU_STENCIL_BIT) {
GPU_stencil_write_mask_set(stencil_mask);
GPU_stencil_test(stencil_test);
}
}
void GLFrameBuffer::clear_multi(const float (*clear_cols)[4])
{
/* Save and restore the state. */
eGPUWriteMask write_mask = GPU_write_mask_get();
GPU_color_mask(true, true, true, true);
context_->state_manager->apply_state();
/* WATCH: This can easilly access clear_cols out of bounds it clear_cols is not big enough for
* all attachments.
* TODO(fclem) fix this insecurity? */
int type = GPU_FB_COLOR_ATTACHMENT0;
for (int i = 0; type < GPU_FB_MAX_ATTACHEMENT; i++, type++) {
if (attachments_[type].tex != NULL) {
glClearBufferfv(GL_COLOR, i, clear_cols[i]);
}
}
GPU_write_mask(write_mask);
}
void GLFrameBuffer::read(eGPUFrameBufferBits plane,
eGPUDataFormat data_format,
const int area[4],
int channel_len,
int slot,
void *r_data)
{
GLenum format, type, mode;
mode = gl_attachments_[slot];
type = to_gl(data_format);
switch (plane) {
case GPU_DEPTH_BIT:
format = GL_DEPTH_COMPONENT;
break;
case GPU_COLOR_BIT:
format = channel_len_to_gl(channel_len);
/* TODO: needed for selection buffers to work properly, this should be handled better. */
if (format == GL_RED && type == GL_UNSIGNED_INT) {
format = GL_RED_INTEGER;
}
break;
case GPU_STENCIL_BIT:
fprintf(stderr, "GPUFramebuffer: Error: Trying to read stencil bit. Unsupported.");
return;
default:
fprintf(stderr, "GPUFramebuffer: Error: Trying to read more than one framebuffer plane.");
return;
}
glBindFramebuffer(GL_READ_FRAMEBUFFER, fbo_id_);
glReadBuffer(mode);
glReadPixels(UNPACK4(area), format, type, r_data);
}
/* Copy src at the give offset inside dst. */
void GLFrameBuffer::blit_to(
eGPUFrameBufferBits planes, int src_slot, FrameBuffer *dst_, int dst_slot, int x, int y)
{
GLFrameBuffer *src = this;
GLFrameBuffer *dst = static_cast<GLFrameBuffer *>(dst_);
/* Framebuffers must be up to date. This simplify this function. */
if (src->dirty_attachments_) {
src->bind(true);
}
if (dst->dirty_attachments_) {
dst->bind(true);
}
glBindFramebuffer(GL_READ_FRAMEBUFFER, src->fbo_id_);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, dst->fbo_id_);
if (planes & GPU_COLOR_BIT) {
BLI_assert(src->immutable_ == false || src_slot == 0);
BLI_assert(dst->immutable_ == false || dst_slot == 0);
BLI_assert(src->gl_attachments_[src_slot] != GL_NONE);
BLI_assert(dst->gl_attachments_[dst_slot] != GL_NONE);
glReadBuffer(src->gl_attachments_[src_slot]);
glDrawBuffer(dst->gl_attachments_[dst_slot]);
}
GPU_context_active_get()->state_manager->apply_state();
int w = src->width_;
int h = src->height_;
GLbitfield mask = to_gl(planes);
glBlitFramebuffer(0, 0, w, h, x, y, x + w, y + h, mask, GL_NEAREST);
if (!dst->immutable_) {
/* Restore the draw buffers. */
glDrawBuffers(ARRAY_SIZE(dst->gl_attachments_), dst->gl_attachments_);
}
}
/** \} */
} // namespace blender::gpu

View File

@@ -0,0 +1,142 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2020 Blender Foundation.
* All rights reserved.
*/
/** \file
* \ingroup gpu
*
* Encapsulation of Framebuffer states (attached textures, viewport, scissors).
*/
#pragma once
#include "MEM_guardedalloc.h"
#include "glew-mx.h"
#include "gpu_framebuffer_private.hh"
namespace blender::gpu {
/**
* Implementation of FrameBuffer object using OpenGL.
**/
class GLFrameBuffer : public FrameBuffer {
private:
/** OpenGL handle. */
GLuint fbo_id_ = 0;
/** Context the handle is from. Framebuffers are not shared accros contexts. */
GLContext *context_ = NULL;
/** Copy of the GL state. Contains ONLY color attachments enums for slot binding. */
GLenum gl_attachments_[GPU_FB_MAX_COLOR_ATTACHMENT];
/** Internal framebuffers are immutable. */
bool immutable_;
/** True is the framebuffer has it's first color target using the GPU_SRGB8_A8 format. */
bool srgb_;
public:
/**
* Create a conventional framebuffer to attach texture to.
**/
GLFrameBuffer(const char *name);
/**
* Special Framebuffer encapsulating internal window framebuffer.
* (i.e.: GL_FRONT_LEFT, GL_BACK_RIGHT, ...)
* @param ctx context the handle is from.
* @param target the internal GL name (i.e: GL_BACK_LEFT).
* @param fbo the (optional) already created object for some implementation. Default is 0.
* @param w buffer width.
* @param h buffer height.
**/
GLFrameBuffer(const char *name, GLContext *ctx, GLenum target, GLuint fbo, int w, int h);
~GLFrameBuffer();
void bind(bool enabled_srgb) override;
bool check(char err_out[256]) override;
void clear(eGPUFrameBufferBits buffers,
const float clear_col[4],
float clear_depth,
uint clear_stencil) override;
void clear_multi(const float (*clear_cols)[4]) override;
void read(eGPUFrameBufferBits planes,
eGPUDataFormat format,
const int area[4],
int channel_len,
int slot,
void *r_data) override;
void blit_to(eGPUFrameBufferBits planes,
int src_slot,
FrameBuffer *dst,
int dst_slot,
int dst_offset_x,
int dst_offset_y) override;
private:
void init(void);
void update_attachments(void);
void update_drawbuffers(void);
MEM_CXX_CLASS_ALLOC_FUNCS("GLFrameBuffer");
};
/* -------------------------------------------------------------------- */
/** \name Enums Conversion
* \{ */
static inline GLenum to_gl(const GPUAttachmentType type)
{
#define ATTACHMENT(X) \
case GPU_FB_##X: { \
return GL_##X; \
} \
((void)0)
switch (type) {
ATTACHMENT(DEPTH_ATTACHMENT);
ATTACHMENT(DEPTH_STENCIL_ATTACHMENT);
ATTACHMENT(COLOR_ATTACHMENT0);
ATTACHMENT(COLOR_ATTACHMENT1);
ATTACHMENT(COLOR_ATTACHMENT2);
ATTACHMENT(COLOR_ATTACHMENT3);
ATTACHMENT(COLOR_ATTACHMENT4);
ATTACHMENT(COLOR_ATTACHMENT5);
default:
BLI_assert(0);
return GL_COLOR_ATTACHMENT0;
}
#undef ATTACHMENT
}
static inline GLbitfield to_gl(const eGPUFrameBufferBits bits)
{
GLbitfield mask = 0;
mask |= (bits & GPU_DEPTH_BIT) ? GL_DEPTH_BUFFER_BIT : 0;
mask |= (bits & GPU_STENCIL_BIT) ? GL_STENCIL_BUFFER_BIT : 0;
mask |= (bits & GPU_COLOR_BIT) ? GL_COLOR_BUFFER_BIT : 0;
return mask;
}
/** \} */
} // namespace blender::gpu

View File

@@ -0,0 +1,81 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2020 Blender Foundation.
* All rights reserved.
*/
/** \file
* \ingroup gpu
*
* GPU Framebuffer
* - this is a wrapper for an OpenGL framebuffer object (FBO). in practice
* multiple FBO's may be created.
* - actual FBO creation & config is deferred until GPU_framebuffer_bind or
* GPU_framebuffer_check_valid to allow creation & config while another
* opengl context is bound (since FBOs are not shared between ogl contexts).
*/
#pragma once
#include "BLI_assert.h"
#include "glew-mx.h"
namespace blender {
namespace gpu {
static GLenum to_gl(eGPUDataFormat format)
{
switch (format) {
case GPU_DATA_FLOAT:
return GL_FLOAT;
case GPU_DATA_INT:
return GL_INT;
case GPU_DATA_UNSIGNED_INT:
return GL_UNSIGNED_INT;
case GPU_DATA_UNSIGNED_BYTE:
return GL_UNSIGNED_BYTE;
case GPU_DATA_UNSIGNED_INT_24_8:
return GL_UNSIGNED_INT_24_8;
case GPU_DATA_10_11_11_REV:
return GL_UNSIGNED_INT_10F_11F_11F_REV;
default:
BLI_assert(!"Unhandled data format");
return GL_FLOAT;
}
}
/* Assume Unorm / Float target. Used with glReadPixels. */
static GLenum channel_len_to_gl(int channel_len)
{
switch (channel_len) {
case 1:
return GL_RED;
case 2:
return GL_RG;
case 3:
return GL_RGB;
case 4:
return GL_RGBA;
default:
BLI_assert(!"Wrong number of texture channels");
return GL_RED;
}
}
} // namespace gpu
} // namespace blender

View File

@@ -826,13 +826,11 @@ static void wm_draw_window(bContext *C, wmWindow *win)
}
else if (win->stereo3d_format->display_mode == S3D_DISPLAY_PAGEFLIP) {
/* For pageflip we simply draw to both back buffers. */
GPU_backbuffer_bind(GPU_BACKBUFFER_LEFT);
wm_draw_window_onscreen(C, win, 0);
GPU_backbuffer_bind(GPU_BACKBUFFER_RIGHT);
wm_draw_window_onscreen(C, win, 1);
GPU_backbuffer_bind(GPU_BACKBUFFER);
GPU_backbuffer_bind(GPU_BACKBUFFER_LEFT);
wm_draw_window_onscreen(C, win, 0);
}
else if (ELEM(win->stereo3d_format->display_mode, S3D_DISPLAY_ANAGLYPH, S3D_DISPLAY_INTERLACE)) {
/* For anaglyph and interlace, we draw individual regions with

View File

@@ -84,7 +84,7 @@ void wm_surface_set_drawable(wmSurface *surface, bool activate)
void wm_surface_make_drawable(wmSurface *surface)
{
BLI_assert(GPU_framebuffer_active_get() == NULL);
BLI_assert(GPU_framebuffer_active_get() == GPU_framebuffer_back_get());
if (surface != g_drawable) {
wm_surface_clear_drawable();
@@ -95,7 +95,7 @@ void wm_surface_make_drawable(wmSurface *surface)
void wm_surface_reset_drawable(void)
{
BLI_assert(BLI_thread_is_main());
BLI_assert(GPU_framebuffer_active_get() == NULL);
BLI_assert(GPU_framebuffer_active_get() == GPU_framebuffer_back_get());
if (g_drawable) {
wm_surface_clear_drawable();

View File

@@ -1115,7 +1115,7 @@ void wm_window_clear_drawable(wmWindowManager *wm)
void wm_window_make_drawable(wmWindowManager *wm, wmWindow *win)
{
BLI_assert(GPU_framebuffer_active_get() == NULL);
BLI_assert(GPU_framebuffer_active_get() == GPU_framebuffer_back_get());
if (win != wm->windrawable && win->ghostwin) {
// win->lmbut = 0; /* keeps hanging when mousepressed while other window opened */
@@ -1136,7 +1136,7 @@ void wm_window_make_drawable(wmWindowManager *wm, wmWindow *win)
void wm_window_reset_drawable(void)
{
BLI_assert(BLI_thread_is_main());
BLI_assert(GPU_framebuffer_active_get() == NULL);
BLI_assert(GPU_framebuffer_active_get() == GPU_framebuffer_back_get());
wmWindowManager *wm = G_MAIN->wm.first;
if (wm == NULL) {
@@ -2487,25 +2487,25 @@ void *WM_opengl_context_create(void)
* So we should call this function only on the main thread.
*/
BLI_assert(BLI_thread_is_main());
BLI_assert(GPU_framebuffer_active_get() == NULL);
BLI_assert(GPU_framebuffer_active_get() == GPU_framebuffer_back_get());
return GHOST_CreateOpenGLContext(g_system);
}
void WM_opengl_context_dispose(void *context)
{
BLI_assert(GPU_framebuffer_active_get() == NULL);
BLI_assert(GPU_framebuffer_active_get() == GPU_framebuffer_back_get());
GHOST_DisposeOpenGLContext(g_system, (GHOST_ContextHandle)context);
}
void WM_opengl_context_activate(void *context)
{
BLI_assert(GPU_framebuffer_active_get() == NULL);
BLI_assert(GPU_framebuffer_active_get() == GPU_framebuffer_back_get());
GHOST_ActivateOpenGLContext((GHOST_ContextHandle)context);
}
void WM_opengl_context_release(void *context)
{
BLI_assert(GPU_framebuffer_active_get() == NULL);
BLI_assert(GPU_framebuffer_active_get() == GPU_framebuffer_back_get());
GHOST_ReleaseOpenGLContext((GHOST_ContextHandle)context);
}