Listing the "Blender Foundation" as copyright holder implied the Blender Foundation holds copyright to files which may include work from many developers. While keeping copyright on headers makes sense for isolated libraries, Blender's own code may be refactored or moved between files in a way that makes the per file copyright holders less meaningful. Copyright references to the "Blender Foundation" have been replaced with "Blender Authors", with the exception of `./extern/` since these this contains libraries which are more isolated, any changed to license headers there can be handled on a case-by-case basis. Some directories in `./intern/` have also been excluded: - `./intern/cycles/` it's own `AUTHORS` file is planned. - `./intern/opensubdiv/`. An "AUTHORS" file has been added, using the chromium projects authors file as a template. Design task: #110784 Ref !110783.
325 lines
8.4 KiB
C++
325 lines
8.4 KiB
C++
/* SPDX-FileCopyrightText: 2014 Blender Authors
|
|
*
|
|
* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
|
|
/** \file
|
|
* \ingroup gpu
|
|
*
|
|
* Interface for accessing GPU-related methods for selection. The semantics are
|
|
* similar to `glRenderMode(GL_SELECT)` from older OpenGL versions.
|
|
*/
|
|
#include <cstdlib>
|
|
#include <cstring>
|
|
|
|
#include "DNA_userdef_types.h"
|
|
|
|
#include "GPU_select.h"
|
|
|
|
#include "BLI_rect.h"
|
|
|
|
#include "BLI_utildefines.h"
|
|
|
|
#include "gpu_select_private.h"
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
/** \name Internal Types
|
|
* \{ */
|
|
|
|
/* Internal algorithm used */
|
|
enum eGPUSelectAlgo {
|
|
/**
|
|
* `glBegin/EndQuery(GL_SAMPLES_PASSED... )`, `gpu_select_query.c`
|
|
* Only sets 4th component (ID) correctly.
|
|
*/
|
|
ALGO_GL_QUERY = 1,
|
|
/**
|
|
* Read depth buffer for every drawing pass and extract depths, `gpu_select_pick.cc`
|
|
* Only sets 4th component (ID) correctly.
|
|
*/
|
|
ALGO_GL_PICK = 2,
|
|
/** Use Select-Next draw engine. */
|
|
ALGO_SELECT_NEXT = 3,
|
|
};
|
|
|
|
struct GPUSelectState {
|
|
/* To ignore selection id calls when not initialized */
|
|
bool select_is_active;
|
|
/* mode of operation */
|
|
eGPUSelectMode mode;
|
|
/* internal algorithm for selection */
|
|
eGPUSelectAlgo algorithm;
|
|
/* allow GPU_select_begin/end without drawing */
|
|
bool use_cache;
|
|
/**
|
|
* Signifies that #GPU_select_cache_begin has been called,
|
|
* future calls to #GPU_select_begin should initialize the cache.
|
|
*
|
|
* \note #GPU_select_cache_begin could perform initialization but doesn't as it's inconvenient
|
|
* for callers making the cache begin/end calls outside lower level selection logic
|
|
* where the `mode` to pass to #GPU_select_begin yet isn't known.
|
|
*/
|
|
bool use_cache_needs_init;
|
|
};
|
|
|
|
static GPUSelectState g_select_state = {false};
|
|
|
|
/** \} */
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
/** \name Public API
|
|
* \{ */
|
|
|
|
static void gpu_select_begin_ex(GPUSelectResult *buffer,
|
|
const uint buffer_len,
|
|
const rcti *input,
|
|
eGPUSelectMode mode,
|
|
int oldhits,
|
|
bool use_select_next)
|
|
{
|
|
if (mode == GPU_SELECT_NEAREST_SECOND_PASS) {
|
|
/* In the case hits was '-1',
|
|
* don't start the second pass since it's not going to give useful results.
|
|
* As well as buffer overflow in 'gpu_select_query_load_id'. */
|
|
BLI_assert(oldhits != -1);
|
|
}
|
|
|
|
g_select_state.select_is_active = true;
|
|
g_select_state.mode = mode;
|
|
|
|
if (use_select_next) {
|
|
g_select_state.algorithm = ALGO_SELECT_NEXT;
|
|
}
|
|
else if (ELEM(g_select_state.mode, GPU_SELECT_PICK_ALL, GPU_SELECT_PICK_NEAREST)) {
|
|
g_select_state.algorithm = ALGO_GL_PICK;
|
|
}
|
|
else {
|
|
g_select_state.algorithm = ALGO_GL_QUERY;
|
|
}
|
|
|
|
/* This function is called when cache has already been initialized,
|
|
* so only manipulate cache values when cache is pending. */
|
|
if (g_select_state.use_cache_needs_init) {
|
|
g_select_state.use_cache_needs_init = false;
|
|
|
|
switch (g_select_state.algorithm) {
|
|
case ALGO_SELECT_NEXT:
|
|
case ALGO_GL_QUERY: {
|
|
g_select_state.use_cache = false;
|
|
break;
|
|
}
|
|
default: {
|
|
g_select_state.use_cache = true;
|
|
gpu_select_pick_cache_begin();
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
switch (g_select_state.algorithm) {
|
|
case ALGO_SELECT_NEXT: {
|
|
gpu_select_next_begin(buffer, buffer_len, input, mode);
|
|
break;
|
|
}
|
|
case ALGO_GL_QUERY: {
|
|
gpu_select_query_begin(buffer, buffer_len, input, mode, oldhits);
|
|
break;
|
|
}
|
|
default: /* ALGO_GL_PICK */
|
|
{
|
|
gpu_select_pick_begin(buffer, buffer_len, input, mode);
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
void GPU_select_begin_next(GPUSelectResult *buffer,
|
|
const uint buffer_len,
|
|
const rcti *input,
|
|
eGPUSelectMode mode,
|
|
int oldhits)
|
|
{
|
|
gpu_select_begin_ex(
|
|
buffer, buffer_len, input, mode, oldhits, U.experimental.enable_overlay_next);
|
|
}
|
|
|
|
void GPU_select_begin(GPUSelectResult *buffer,
|
|
const uint buffer_len,
|
|
const rcti *input,
|
|
eGPUSelectMode mode,
|
|
int oldhits)
|
|
{
|
|
gpu_select_begin_ex(buffer, buffer_len, input, mode, oldhits, false);
|
|
}
|
|
|
|
bool GPU_select_load_id(uint id)
|
|
{
|
|
/* if no selection mode active, ignore */
|
|
if (!g_select_state.select_is_active) {
|
|
return true;
|
|
}
|
|
|
|
switch (g_select_state.algorithm) {
|
|
case ALGO_SELECT_NEXT:
|
|
/* This shouldn't use this pipeline. */
|
|
BLI_assert_unreachable();
|
|
return false;
|
|
|
|
case ALGO_GL_QUERY: {
|
|
return gpu_select_query_load_id(id);
|
|
}
|
|
default: /* ALGO_GL_PICK */
|
|
{
|
|
return gpu_select_pick_load_id(id, false);
|
|
}
|
|
}
|
|
}
|
|
|
|
uint GPU_select_end()
|
|
{
|
|
uint hits = 0;
|
|
|
|
switch (g_select_state.algorithm) {
|
|
case ALGO_SELECT_NEXT: {
|
|
hits = gpu_select_next_end();
|
|
break;
|
|
}
|
|
case ALGO_GL_QUERY: {
|
|
hits = gpu_select_query_end();
|
|
break;
|
|
}
|
|
default: /* ALGO_GL_PICK */
|
|
{
|
|
hits = gpu_select_pick_end();
|
|
break;
|
|
}
|
|
}
|
|
|
|
g_select_state.select_is_active = false;
|
|
|
|
return hits;
|
|
}
|
|
|
|
/** \} */
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
/** \name Caching
|
|
*
|
|
* Support multiple begin/end's as long as they are within the initial region.
|
|
* Currently only used by #ALGO_GL_PICK.
|
|
* \{ */
|
|
|
|
void GPU_select_cache_begin()
|
|
{
|
|
BLI_assert(g_select_state.select_is_active == false);
|
|
/* Ensure #GPU_select_cache_end is always called. */
|
|
BLI_assert(g_select_state.use_cache_needs_init == false);
|
|
|
|
/* Signal that cache should be used, instead of calling the algorithms cache-begin function.
|
|
* This is more convenient as the exact method of selection may not be known by the caller. */
|
|
g_select_state.use_cache_needs_init = true;
|
|
}
|
|
|
|
void GPU_select_cache_load_id()
|
|
{
|
|
BLI_assert(g_select_state.use_cache == true);
|
|
if (g_select_state.algorithm == ALGO_GL_PICK) {
|
|
gpu_select_pick_cache_load_id();
|
|
}
|
|
}
|
|
|
|
void GPU_select_cache_end()
|
|
{
|
|
if (g_select_state.algorithm == ALGO_GL_PICK) {
|
|
BLI_assert(g_select_state.use_cache == true);
|
|
gpu_select_pick_cache_end();
|
|
}
|
|
g_select_state.use_cache = false;
|
|
/* Paranoid assignment, should already be false. */
|
|
g_select_state.use_cache_needs_init = false;
|
|
}
|
|
|
|
bool GPU_select_is_cached()
|
|
{
|
|
return g_select_state.use_cache && gpu_select_pick_is_cached();
|
|
}
|
|
|
|
/** \} */
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
/** \name Utilities
|
|
* \{ */
|
|
|
|
const GPUSelectResult *GPU_select_buffer_near(const GPUSelectResult *buffer, int hits)
|
|
{
|
|
const GPUSelectResult *buffer_near = nullptr;
|
|
uint depth_min = uint(-1);
|
|
for (int i = 0; i < hits; i++) {
|
|
if (buffer->depth < depth_min) {
|
|
BLI_assert(buffer->id != -1);
|
|
depth_min = buffer->depth;
|
|
buffer_near = buffer;
|
|
}
|
|
buffer++;
|
|
}
|
|
return buffer_near;
|
|
}
|
|
|
|
uint GPU_select_buffer_remove_by_id(GPUSelectResult *buffer, int hits, uint select_id)
|
|
{
|
|
GPUSelectResult *buffer_src = buffer;
|
|
GPUSelectResult *buffer_dst = buffer;
|
|
int hits_final = 0;
|
|
for (int i = 0; i < hits; i++) {
|
|
if (buffer_src->id != select_id) {
|
|
if (buffer_dst != buffer_src) {
|
|
memcpy(buffer_dst, buffer_src, sizeof(GPUSelectResult));
|
|
}
|
|
buffer_dst++;
|
|
hits_final += 1;
|
|
}
|
|
buffer_src++;
|
|
}
|
|
return hits_final;
|
|
}
|
|
|
|
void GPU_select_buffer_stride_realign(const rcti *src, const rcti *dst, uint *r_buf)
|
|
{
|
|
const int x = dst->xmin - src->xmin;
|
|
const int y = dst->ymin - src->ymin;
|
|
|
|
BLI_assert(src->xmin <= dst->xmin && src->ymin <= dst->ymin && src->xmax >= dst->xmax &&
|
|
src->ymax >= dst->ymax);
|
|
BLI_assert(x >= 0 && y >= 0);
|
|
|
|
const int src_x = BLI_rcti_size_x(src);
|
|
const int src_y = BLI_rcti_size_y(src);
|
|
const int dst_x = BLI_rcti_size_x(dst);
|
|
const int dst_y = BLI_rcti_size_y(dst);
|
|
|
|
int last_px_id = src_x * (y + dst_y - 1) + (x + dst_x - 1);
|
|
memset(&r_buf[last_px_id + 1], 0, (src_x * src_y - (last_px_id + 1)) * sizeof(*r_buf));
|
|
|
|
if (last_px_id < 0) {
|
|
/* Nothing to write. */
|
|
BLI_assert(last_px_id == -1);
|
|
return;
|
|
}
|
|
|
|
int last_px_written = dst_x * dst_y - 1;
|
|
const int skip = src_x - dst_x;
|
|
|
|
while (true) {
|
|
for (int i = dst_x; i--;) {
|
|
r_buf[last_px_id--] = r_buf[last_px_written--];
|
|
}
|
|
if (last_px_written < 0) {
|
|
break;
|
|
}
|
|
last_px_id -= skip;
|
|
memset(&r_buf[last_px_id + 1], 0, skip * sizeof(*r_buf));
|
|
}
|
|
memset(r_buf, 0, (last_px_id + 1) * sizeof(*r_buf));
|
|
}
|
|
|
|
/** \} */
|