From ed9fc54e0e2fbf868411775efbf1cd537b090bfa Mon Sep 17 00:00:00 2001 From: Aras Pranckevicius Date: Wed, 1 Oct 2025 10:08:18 +0200 Subject: [PATCH] Fix #147006: VSE preview crashes with too high output resolution Check whether creating GPU textures succeeds before using them. However, currently when that happens there is no indication to the user that "hey the resolution is maybe too big". Not sure what is the best way to do that however; e.g. Workbench rendering also does not indicate that in a similar scenario. While at it, add GPU_is_safe_texture_size instead of duplicating the same logic in two places. Pull Request: https://projects.blender.org/blender/blender/pulls/147043 --- source/blender/editors/space_node/node_edit.cc | 7 +------ .../editors/space_sequencer/sequencer_preview_draw.cc | 11 +++++++++-- source/blender/gpu/GPU_capabilities.hh | 10 ++++++++++ source/blender/gpu/intern/gpu_capabilities.cc | 6 ++++++ source/blender/render/intern/pipeline.cc | 7 +------ 5 files changed, 27 insertions(+), 14 deletions(-) diff --git a/source/blender/editors/space_node/node_edit.cc b/source/blender/editors/space_node/node_edit.cc index af9f082cd50..c72dce9a696 100644 --- a/source/blender/editors/space_node/node_edit.cc +++ b/source/blender/editors/space_node/node_edit.cc @@ -310,12 +310,7 @@ static bool is_compositing_possible(const bContext *C) int width, height; BKE_render_resolution(&scene->r, false, &width, &height); - const int max_texture_size = GPU_max_texture_size(); - - /* There is no way to know if the render size is too large except if we actually allocate a test - * texture, which we want to avoid due its cost. So we employ a heuristic that so far has worked - * with all known GPU drivers. */ - if (size_t(width) * height > (size_t(max_texture_size) * max_texture_size) / 4) { + if (!GPU_is_safe_texture_size(width, height)) { WM_global_report(RPT_ERROR, "Render size too large for GPU, use CPU compositor instead"); return false; } diff --git a/source/blender/editors/space_sequencer/sequencer_preview_draw.cc b/source/blender/editors/space_sequencer/sequencer_preview_draw.cc index e5cd8b6658d..0fde6e51e37 100644 --- a/source/blender/editors/space_sequencer/sequencer_preview_draw.cc +++ b/source/blender/editors/space_sequencer/sequencer_preview_draw.cc @@ -903,6 +903,9 @@ static void update_gpu_scopes(const ImBuf *input_ibuf, const gpu::TextureFormat format = gpu::TextureFormat::SFLOAT_16_16_16_16; display_texture = GPU_texture_create_2d( "seq_scope_display_buf", width, height, 1, format, usage, nullptr); + if (display_texture == nullptr) { + return; + } GPU_texture_filter_mode(display_texture, false); GPU_matrix_push(); @@ -1515,7 +1518,9 @@ static blender::gpu::Texture *create_texture(const ImBuf &ibuf) texture = GPU_texture_create_2d( "seq_display_buf", ibuf.x, ibuf.y, 1, texture_format, texture_usage, nullptr); - GPU_texture_update(texture, GPU_DATA_FLOAT, ibuf.float_buffer.data); + if (texture) { + GPU_texture_update(texture, GPU_DATA_FLOAT, ibuf.float_buffer.data); + } } else if (ibuf.byte_buffer.data) { texture = GPU_texture_create_2d("seq_display_buf", @@ -1525,7 +1530,9 @@ static blender::gpu::Texture *create_texture(const ImBuf &ibuf) blender::gpu::TextureFormat::UNORM_8_8_8_8, texture_usage, nullptr); - GPU_texture_update(texture, GPU_DATA_UBYTE, ibuf.byte_buffer.data); + if (texture) { + GPU_texture_update(texture, GPU_DATA_UBYTE, ibuf.byte_buffer.data); + } } if (texture) { diff --git a/source/blender/gpu/GPU_capabilities.hh b/source/blender/gpu/GPU_capabilities.hh index 4e85ba102d3..f70705ae5d5 100644 --- a/source/blender/gpu/GPU_capabilities.hh +++ b/source/blender/gpu/GPU_capabilities.hh @@ -44,6 +44,16 @@ const char *GPU_extension_get(int i); int GPU_texture_size_with_limit(int res); +/** + * Returns whether it should be "safe" to use texture of a given size. + * + * The heuristic is that maybe allocating texture that is 25% of + * #GPU_max_texture_size squared is fine. Note that the actual texture creation + * can still fail even if deemed "safe" by this function, depending on current memory + * usage, texture format, etc. + */ +bool GPU_is_safe_texture_size(int width, int height); + bool GPU_use_subprocess_compilation(); int GPU_max_parallel_compilations(); diff --git a/source/blender/gpu/intern/gpu_capabilities.cc b/source/blender/gpu/intern/gpu_capabilities.cc index 5ade31b716c..c190b205f21 100644 --- a/source/blender/gpu/intern/gpu_capabilities.cc +++ b/source/blender/gpu/intern/gpu_capabilities.cc @@ -46,6 +46,12 @@ int GPU_texture_size_with_limit(int res) return min_ii(reslimit, res); } +bool GPU_is_safe_texture_size(int width, int height) +{ + const int max_texture_size = GPU_max_texture_size(); + return size_t(width) * height <= size_t(max_texture_size) * max_texture_size / 4; +} + int GPU_max_texture_layers() { return GCaps.max_texture_layers; diff --git a/source/blender/render/intern/pipeline.cc b/source/blender/render/intern/pipeline.cc index e3ff7211925..1d901657a5e 100644 --- a/source/blender/render/intern/pipeline.cc +++ b/source/blender/render/intern/pipeline.cc @@ -1809,12 +1809,7 @@ static bool is_compositing_possible_on_gpu(Scene *scene, ReportList *reports) int width, height; BKE_render_resolution(&scene->r, false, &width, &height); - const int max_texture_size = GPU_max_texture_size(); - - /* There is no way to know if the render size is too large except if we actually allocate a test - * texture, which we want to avoid due its cost. So we employ a heuristic that so far has worked - * with all known GPU drivers. */ - if (size_t(width) * height > (size_t(max_texture_size) * max_texture_size) / 4) { + if (!GPU_is_safe_texture_size(width, height)) { BKE_report(reports, RPT_ERROR, "Render size too large for GPU, use CPU compositor instead"); return false; }