DRW: Move GPU context lock out of the DST variable.

This doesnt remove the locking but isolate the `GPUContext`
lock to oustide the `DrawManager` class.

This has no functional change.

Pull Request: https://projects.blender.org/blender/blender/pulls/135189
This commit is contained in:
Clément Foucault
2025-02-27 15:39:05 +01:00
committed by Clément Foucault
parent 1fc2daf3e2
commit 583e2b7240
4 changed files with 69 additions and 53 deletions

View File

@@ -81,6 +81,7 @@ struct DrawEngineType {
}; };
/* Shaders */ /* Shaders */
/** IMPORTANT: Modify the currently bound context. */
void DRW_shader_init(); void DRW_shader_init();
void DRW_shader_exit(); void DRW_shader_exit();

View File

@@ -111,6 +111,26 @@
static CLG_LogRef LOG = {"draw.manager"}; static CLG_LogRef LOG = {"draw.manager"};
/* -------------------------------------------------------------------- */
/** \name Settings
*
* A global GPUContext is used for rendering every viewports (even on different windows).
* This is because some resources cannot be shared between contexts (GPUFramebuffers, GPUBatch).
* \{ */
/** Unique ghost context used by Viewports. */
static void *system_gpu_context = nullptr;
/** GPUContext associated to the system_gpu_context. */
static GPUContext *blender_gpu_context = nullptr;
/**
* GPUContext cannot be used concurrently. This isn't required at the moment since viewports
* aren't rendered in parallel but this could happen in the future. The old implementation of DRW
* was also locking so this is a bit of a preventive measure. Could eventually be removed.
*/
static TicketMutex *system_gpu_context_mutex = nullptr;
/** \} */
/** Render State: No persistent data between draw calls. */ /** Render State: No persistent data between draw calls. */
DRWManager DST = {nullptr}; DRWManager DST = {nullptr};
@@ -121,7 +141,7 @@ static struct {
static void drw_state_prepare_clean_for_draw(DRWManager *dst) static void drw_state_prepare_clean_for_draw(DRWManager *dst)
{ {
memset(dst, 0x0, offsetof(DRWManager, system_gpu_context)); memset(dst, 0x0, offsetof(DRWManager, debug));
} }
/* This function is used to reset draw manager to a state /* This function is used to reset draw manager to a state
@@ -131,7 +151,7 @@ static void drw_state_prepare_clean_for_draw(DRWManager *dst)
#ifndef NDEBUG #ifndef NDEBUG
static void drw_state_ensure_not_reused(DRWManager *dst) static void drw_state_ensure_not_reused(DRWManager *dst)
{ {
memset(dst, 0xff, offsetof(DRWManager, system_gpu_context)); memset(dst, 0xff, offsetof(DRWManager, debug));
} }
#endif /* !NDEBUG */ #endif /* !NDEBUG */
@@ -1128,7 +1148,7 @@ void DRW_notify_view_update(const DRWUpdateContext *update_ctx)
* Check for recursive lock which can deadlock. This should not * Check for recursive lock which can deadlock. This should not
* happen, but in case there is a bug where depsgraph update is called * happen, but in case there is a bug where depsgraph update is called
* during drawing we try not to hang Blender. */ * during drawing we try not to hang Blender. */
if (!BLI_ticket_mutex_lock_check_recursive(DST.system_gpu_context_mutex)) { if (!BLI_ticket_mutex_lock_check_recursive(system_gpu_context_mutex)) {
CLOG_ERROR(&LOG, "GPU context already bound"); CLOG_ERROR(&LOG, "GPU context already bound");
BLI_assert_unreachable(); BLI_assert_unreachable();
return; return;
@@ -1168,7 +1188,7 @@ void DRW_notify_view_update(const DRWUpdateContext *update_ctx)
drw_manager_exit(&DST); drw_manager_exit(&DST);
BLI_ticket_mutex_unlock(DST.system_gpu_context_mutex); BLI_ticket_mutex_unlock(system_gpu_context_mutex);
} }
/* update a viewport which belongs to a GPUOffscreen */ /* update a viewport which belongs to a GPUOffscreen */
@@ -2815,7 +2835,7 @@ void DRW_engines_free()
using namespace blender::draw; using namespace blender::draw;
drw_registered_engines_free(); drw_registered_engines_free();
if (DST.system_gpu_context == nullptr) { if (system_gpu_context == nullptr) {
/* Nothing has been setup. Nothing to clear. /* Nothing has been setup. Nothing to clear.
* Otherwise, DRW_gpu_context_enable can * Otherwise, DRW_gpu_context_enable can
* create a context in background mode. (see #62355) */ * create a context in background mode. (see #62355) */
@@ -2837,7 +2857,7 @@ void DRW_engines_free()
void DRW_render_context_enable(Render *render) void DRW_render_context_enable(Render *render)
{ {
if (G.background && DST.system_gpu_context == nullptr) { if (G.background && system_gpu_context == nullptr) {
WM_init_gpu(); WM_init_gpu();
} }
@@ -2895,57 +2915,61 @@ void DRW_render_context_disable(Render *render)
void DRW_gpu_context_create() void DRW_gpu_context_create()
{ {
BLI_assert(DST.system_gpu_context == nullptr); /* Ensure it's called once */ BLI_assert(system_gpu_context == nullptr); /* Ensure it's called once */
DST.system_gpu_context_mutex = BLI_ticket_mutex_alloc(); /* Setup compilation context. Called first as it changes the active GPUContext. */
/* This changes the active context. */
DST.system_gpu_context = WM_system_gpu_context_create();
WM_system_gpu_context_activate(DST.system_gpu_context);
/* Be sure to create blender_gpu_context too. */
DST.blender_gpu_context = GPU_context_create(nullptr, DST.system_gpu_context);
/* Setup compilation context. */
DRW_shader_init(); DRW_shader_init();
/* Activate the window's context afterwards. */
system_gpu_context_mutex = BLI_ticket_mutex_alloc();
/* This changes the active context. */
system_gpu_context = WM_system_gpu_context_create();
WM_system_gpu_context_activate(system_gpu_context);
/* Be sure to create blender_gpu_context too. */
blender_gpu_context = GPU_context_create(nullptr, system_gpu_context);
/* Some part of the code assumes no context is left bound. */
GPU_context_active_set(nullptr);
WM_system_gpu_context_release(system_gpu_context);
/* Activate the window's context if any. */
wm_window_reset_drawable(); wm_window_reset_drawable();
} }
void DRW_gpu_context_destroy() void DRW_gpu_context_destroy()
{ {
BLI_assert(BLI_thread_is_main()); BLI_assert(BLI_thread_is_main());
if (DST.system_gpu_context != nullptr) { if (system_gpu_context != nullptr) {
DRW_shader_exit(); DRW_shader_exit();
WM_system_gpu_context_activate(DST.system_gpu_context); WM_system_gpu_context_activate(system_gpu_context);
GPU_context_active_set(DST.blender_gpu_context); GPU_context_active_set(blender_gpu_context);
GPU_context_discard(DST.blender_gpu_context); GPU_context_discard(blender_gpu_context);
WM_system_gpu_context_dispose(DST.system_gpu_context); WM_system_gpu_context_dispose(system_gpu_context);
BLI_ticket_mutex_free(DST.system_gpu_context_mutex); BLI_ticket_mutex_free(system_gpu_context_mutex);
} }
} }
void DRW_gpu_context_enable_ex(bool /*restore*/) void DRW_gpu_context_enable_ex(bool /*restore*/)
{ {
if (DST.system_gpu_context != nullptr) { if (system_gpu_context != nullptr) {
/* IMPORTANT: We don't support immediate mode in render mode! /* IMPORTANT: We don't support immediate mode in render mode!
* This shall remain in effect until immediate mode supports * This shall remain in effect until immediate mode supports
* multiple threads. */ * multiple threads. */
BLI_ticket_mutex_lock(DST.system_gpu_context_mutex); BLI_ticket_mutex_lock(system_gpu_context_mutex);
GPU_render_begin(); GPU_render_begin();
WM_system_gpu_context_activate(DST.system_gpu_context); WM_system_gpu_context_activate(system_gpu_context);
GPU_context_active_set(DST.blender_gpu_context); GPU_context_active_set(blender_gpu_context);
GPU_context_begin_frame(DST.blender_gpu_context); GPU_context_begin_frame(blender_gpu_context);
} }
} }
void DRW_gpu_context_disable_ex(bool restore) void DRW_gpu_context_disable_ex(bool restore)
{ {
if (DST.system_gpu_context != nullptr) { if (system_gpu_context != nullptr) {
GPU_context_end_frame(DST.blender_gpu_context); GPU_context_end_frame(blender_gpu_context);
if (BLI_thread_is_main() && restore) { if (BLI_thread_is_main() && restore) {
wm_window_reset_drawable(); wm_window_reset_drawable();
} }
else { else {
WM_system_gpu_context_release(DST.system_gpu_context); WM_system_gpu_context_release(system_gpu_context);
GPU_context_active_set(nullptr); GPU_context_active_set(nullptr);
} }
@@ -2953,7 +2977,7 @@ void DRW_gpu_context_disable_ex(bool restore)
* called outside of an existing render loop. */ * called outside of an existing render loop. */
GPU_render_end(); GPU_render_end();
BLI_ticket_mutex_unlock(DST.system_gpu_context_mutex); BLI_ticket_mutex_unlock(system_gpu_context_mutex);
} }
} }
@@ -2961,7 +2985,7 @@ void DRW_gpu_context_enable()
{ {
/* TODO: should be replace by a more elegant alternative. */ /* TODO: should be replace by a more elegant alternative. */
if (G.background && DST.system_gpu_context == nullptr) { if (G.background && system_gpu_context == nullptr) {
WM_init_gpu(); WM_init_gpu();
} }
DRW_gpu_context_enable_ex(true); DRW_gpu_context_enable_ex(true);
@@ -2977,16 +3001,14 @@ void DRW_system_gpu_render_context_enable(void *re_system_gpu_context)
/* If thread is main you should use DRW_gpu_context_enable(). */ /* If thread is main you should use DRW_gpu_context_enable(). */
BLI_assert(!BLI_thread_is_main()); BLI_assert(!BLI_thread_is_main());
/* TODO: get rid of the blocking. Only here because of the static global DST. */ BLI_ticket_mutex_lock(system_gpu_context_mutex);
BLI_ticket_mutex_lock(DST.system_gpu_context_mutex);
WM_system_gpu_context_activate(re_system_gpu_context); WM_system_gpu_context_activate(re_system_gpu_context);
} }
void DRW_system_gpu_render_context_disable(void *re_system_gpu_context) void DRW_system_gpu_render_context_disable(void *re_system_gpu_context)
{ {
WM_system_gpu_context_release(re_system_gpu_context); WM_system_gpu_context_release(re_system_gpu_context);
/* TODO: get rid of the blocking. */ BLI_ticket_mutex_unlock(system_gpu_context_mutex);
BLI_ticket_mutex_unlock(DST.system_gpu_context_mutex);
} }
void DRW_blender_gpu_render_context_enable(void *re_gpu_context) void DRW_blender_gpu_render_context_enable(void *re_gpu_context)
@@ -3018,28 +3040,28 @@ void *DRW_system_gpu_context_get()
* have to work from the main thread, which is tricky to get working too. The preferable solution * have to work from the main thread, which is tricky to get working too. The preferable solution
* would be using a separate thread for VR drawing where a single context can stay active. */ * would be using a separate thread for VR drawing where a single context can stay active. */
return DST.system_gpu_context; return system_gpu_context;
} }
void *DRW_xr_blender_gpu_context_get() void *DRW_xr_blender_gpu_context_get()
{ {
/* XXX: See comment on #DRW_system_gpu_context_get(). */ /* XXX: See comment on #DRW_system_gpu_context_get(). */
return DST.blender_gpu_context; return blender_gpu_context;
} }
void DRW_xr_drawing_begin() void DRW_xr_drawing_begin()
{ {
/* XXX: See comment on #DRW_system_gpu_context_get(). */ /* XXX: See comment on #DRW_system_gpu_context_get(). */
BLI_ticket_mutex_lock(DST.system_gpu_context_mutex); BLI_ticket_mutex_lock(system_gpu_context_mutex);
} }
void DRW_xr_drawing_end() void DRW_xr_drawing_end()
{ {
/* XXX: See comment on #DRW_system_gpu_context_get(). */ /* XXX: See comment on #DRW_system_gpu_context_get(). */
BLI_ticket_mutex_unlock(DST.system_gpu_context_mutex); BLI_ticket_mutex_unlock(system_gpu_context_mutex);
} }
#endif #endif
@@ -3095,7 +3117,7 @@ bool DRW_gpu_context_release()
return false; return false;
} }
if (GPU_context_active_get() != DST.blender_gpu_context) { if (GPU_context_active_get() != blender_gpu_context) {
/* Context release is requested from the outside of the draw manager main draw loop, indicate /* Context release is requested from the outside of the draw manager main draw loop, indicate
* this to the `DRW_gpu_context_activate()` so that it restores drawable of the window. * this to the `DRW_gpu_context_activate()` so that it restores drawable of the window.
*/ */
@@ -3103,7 +3125,7 @@ bool DRW_gpu_context_release()
} }
GPU_context_active_set(nullptr); GPU_context_active_set(nullptr);
WM_system_gpu_context_release(DST.system_gpu_context); WM_system_gpu_context_release(system_gpu_context);
return true; return true;
} }
@@ -3115,8 +3137,8 @@ void DRW_gpu_context_activate(bool drw_state)
} }
if (drw_state) { if (drw_state) {
WM_system_gpu_context_activate(DST.system_gpu_context); WM_system_gpu_context_activate(system_gpu_context);
GPU_context_active_set(DST.blender_gpu_context); GPU_context_active_set(blender_gpu_context);
} }
else { else {
wm_window_reset_drawable(); wm_window_reset_drawable();

View File

@@ -139,14 +139,6 @@ struct DRWManager {
/* ---------- Nothing after this point is cleared after use ----------- */ /* ---------- Nothing after this point is cleared after use ----------- */
/* system_gpu_context serves as the offset for clearing only
* the top portion of the struct so DO NOT MOVE IT! */
/** Unique ghost context used by the draw manager. */
void *system_gpu_context;
GPUContext *blender_gpu_context;
/** Mutex to lock the drw manager and avoid concurrent context usage. */
TicketMutex *system_gpu_context_mutex;
DRWDebugModule *debug; DRWDebugModule *debug;
}; };

View File

@@ -193,9 +193,10 @@ void DRW_shader_init()
compiler_data().system_gpu_context = WM_system_gpu_context_create(); compiler_data().system_gpu_context = WM_system_gpu_context_create();
compiler_data().blender_gpu_context = GPU_context_create(nullptr, compiler_data().blender_gpu_context = GPU_context_create(nullptr,
compiler_data().system_gpu_context); compiler_data().system_gpu_context);
/* Some part of the code assumes no context is left bound. */
GPU_context_active_set(nullptr); GPU_context_active_set(nullptr);
WM_system_gpu_context_activate(DST.system_gpu_context); WM_system_gpu_context_release(compiler_data().system_gpu_context);
GPU_context_active_set(DST.blender_gpu_context);
BLI_threadpool_init(&compilation_threadpool(), drw_deferred_shader_compilation_exec, 1); BLI_threadpool_init(&compilation_threadpool(), drw_deferred_shader_compilation_exec, 1);
BLI_threadpool_insert(&compilation_threadpool(), nullptr); BLI_threadpool_insert(&compilation_threadpool(), nullptr);