Fix #140113: White flickering when changing a texture on EEVEE

Fixed by not doing async loading and always stage correct
texture reference.

Unfortunately the code is getting a bit messy since the
texture loading is not done at the GPUMaterial level.
So we need one async and one synchronous path inside
`PassBase<T>::material_set`.

`ImageGPUTextures` now contains references to the location
of the future `GPUTexture *`.

Also fix #140001

Pull Request: https://projects.blender.org/blender/blender/pulls/140203
This commit is contained in:
Clément Foucault
2025-06-11 15:23:24 +02:00
committed by Clément Foucault
parent 7b291de3c3
commit 2f63488ce9
8 changed files with 79 additions and 79 deletions

View File

@@ -604,8 +604,8 @@ GPUTexture *BKE_image_get_gpu_viewer_texture(Image *image, ImageUser *iuser);
* tiles as used in material shaders.
*/
struct ImageGPUTextures {
GPUTexture *texture;
GPUTexture *tile_mapping;
GPUTexture **texture;
GPUTexture **tile_mapping;
};
ImageGPUTextures BKE_image_get_gpu_material_texture(Image *image,

View File

@@ -409,10 +409,13 @@ static ImageGPUTextures image_get_gpu_texture(Image *ima,
if (current_view >= 2) {
current_view = 0;
}
GPUTexture **tex = get_image_gpu_texture_ptr(ima, textarget, current_view);
if (*tex) {
result.texture = *tex;
result.tile_mapping = *get_image_gpu_texture_ptr(ima, TEXTARGET_TILE_MAPPING, current_view);
result.texture = get_image_gpu_texture_ptr(ima, textarget, current_view);
if (textarget == TEXTARGET_2D_ARRAY) {
result.tile_mapping = get_image_gpu_texture_ptr(ima, TEXTARGET_TILE_MAPPING, current_view);
}
if (*result.texture) {
return result;
}
@@ -425,8 +428,7 @@ static ImageGPUTextures image_get_gpu_texture(Image *ima,
* texture with zero bind-code so we don't keep trying. */
ImageTile *tile = BKE_image_get_tile(ima, 0);
if (tile == nullptr) {
*tex = image_gpu_texture_error_create(textarget);
result.texture = *tex;
*result.texture = image_gpu_texture_error_create(textarget);
return result;
}
@@ -435,46 +437,39 @@ static ImageGPUTextures image_get_gpu_texture(Image *ima,
ImBuf *ibuf = BKE_image_acquire_ibuf(ima, iuser, (use_viewers) ? &lock : nullptr);
if (ibuf == nullptr) {
BKE_image_release_ibuf(ima, ibuf, (use_viewers) ? lock : nullptr);
*tex = image_gpu_texture_error_create(textarget);
result.texture = *tex;
*result.texture = image_gpu_texture_error_create(textarget);
return result;
}
if (textarget == TEXTARGET_2D_ARRAY) {
/* For materials, array and tile mapping in case there are UDIM tiles. */
*tex = gpu_texture_create_tile_array(ima, ibuf);
result.texture = *tex;
GPUTexture **tile_mapping_tex = get_image_gpu_texture_ptr(
ima, TEXTARGET_TILE_MAPPING, current_view);
*tile_mapping_tex = gpu_texture_create_tile_mapping(ima, iuser ? iuser->multiview_eye : 0);
result.tile_mapping = *tile_mapping_tex;
*result.texture = gpu_texture_create_tile_array(ima, ibuf);
*result.tile_mapping = gpu_texture_create_tile_mapping(ima, iuser ? iuser->multiview_eye : 0);
}
else {
/* Single image texture. */
const bool use_high_bitdepth = (ima->flag & IMA_HIGH_BITDEPTH);
const bool store_premultiplied = BKE_image_has_gpu_texture_premultiplied_alpha(ima, ibuf);
*tex = IMB_create_gpu_texture(ima->id.name + 2, ibuf, use_high_bitdepth, store_premultiplied);
result.texture = *tex;
*result.texture = IMB_create_gpu_texture(
ima->id.name + 2, ibuf, use_high_bitdepth, store_premultiplied);
if (*tex) {
GPU_texture_extend_mode(*tex, GPU_SAMPLER_EXTEND_MODE_REPEAT);
if (*result.texture) {
GPU_texture_extend_mode(*result.texture, GPU_SAMPLER_EXTEND_MODE_REPEAT);
if (GPU_mipmap_enabled()) {
GPU_texture_update_mipmap_chain(*tex);
GPU_texture_update_mipmap_chain(*result.texture);
ima->gpuflag |= IMA_GPU_MIPMAP_COMPLETE;
GPU_texture_mipmap_mode(*tex, true, true);
GPU_texture_mipmap_mode(*result.texture, true, true);
}
else {
GPU_texture_mipmap_mode(*tex, false, true);
GPU_texture_mipmap_mode(*result.texture, false, true);
}
}
}
if (*tex) {
GPU_texture_original_size_set(*tex, ibuf->x, ibuf->y);
if (*result.texture) {
GPU_texture_original_size_set(*result.texture, ibuf->x, ibuf->y);
}
BKE_image_release_ibuf(ima, ibuf, (use_viewers) ? lock : nullptr);
@@ -484,12 +479,12 @@ static ImageGPUTextures image_get_gpu_texture(Image *ima,
GPUTexture *BKE_image_get_gpu_texture(Image *image, ImageUser *iuser)
{
return image_get_gpu_texture(image, iuser, false, false, false).texture;
return *image_get_gpu_texture(image, iuser, false, false, false).texture;
}
GPUTexture *BKE_image_get_gpu_viewer_texture(Image *image, ImageUser *iuser)
{
return image_get_gpu_texture(image, iuser, true, false, false).texture;
return *image_get_gpu_texture(image, iuser, true, false, false).texture;
}
ImageGPUTextures BKE_image_get_gpu_material_texture(Image *image,

View File

@@ -173,14 +173,8 @@ void MaterialModule::begin_sync()
shader_map_.clear();
}
bool MaterialModule::queue_texture_loading(GPUMaterial *material)
void MaterialModule::queue_texture_loading(GPUMaterial *material)
{
if (inst_.is_viewport_image_render) {
/* Do not delay image loading for viewport render as it would produce invalid frames. */
return true;
}
bool loaded = true;
ListBase textures = GPU_material_textures(material);
for (GPUMaterialTexture *tex : ListBaseWrapper<GPUMaterialTexture>(textures)) {
if (tex->ima) {
@@ -188,17 +182,11 @@ bool MaterialModule::queue_texture_loading(GPUMaterial *material)
ImageUser *iuser = tex->iuser_available ? &tex->iuser : nullptr;
ImageGPUTextures gputex = BKE_image_get_gpu_material_texture_try(
tex->ima, iuser, use_tile_mapping);
if (ELEM(tex->ima->source, IMA_SRC_SEQUENCE, IMA_SRC_MOVIE)) {
/* Do not defer the loading of animated textures as they would appear always loading. */
continue;
}
if (gputex.texture == nullptr) {
if (*gputex.texture == nullptr) {
texture_loading_queue_.append(tex);
loaded = false;
}
}
}
return loaded;
}
void MaterialModule::end_sync()
@@ -214,6 +202,7 @@ void MaterialModule::end_sync()
GPU_debug_group_begin("Texture Loading");
/* Load files from disk in a multithreaded manner. Allow better parallelism. */
threading::parallel_for(texture_loading_queue_.index_range(), 1, [&](const IndexRange range) {
for (auto i : range) {
GPUMaterialTexture *tex = texture_loading_queue_[i];
@@ -225,25 +214,24 @@ void MaterialModule::end_sync()
}
});
/* To avoid freezing the UI too much, we only allow some finite amount of time of texture loading
* per frame. */
double loading_time_per_sync = inst_.is_image_render ? DBL_MAX : 0.250;
double start_time = BLI_time_now_seconds();
/* Upload to the GPU (create GPUTexture). This part still requires a valid GPU context and
* is not easily parallelized. */
for (GPUMaterialTexture *tex : texture_loading_queue_) {
BLI_assert(tex->ima);
GPU_debug_group_begin(tex->ima->id.name);
const bool use_tile_mapping = tex->tiled_mapping_name[0];
ImageUser *iuser = tex->iuser_available ? &tex->iuser : nullptr;
BKE_image_get_gpu_material_texture(tex->ima, iuser, use_tile_mapping);
ImageGPUTextures gputex = BKE_image_get_gpu_material_texture(
tex->ima, iuser, use_tile_mapping);
/* Acquire the textures since they were not existing inside `PassBase::material_set()`. */
inst_.manager->acquire_texture(*gputex.texture);
if (gputex.tile_mapping) {
inst_.manager->acquire_texture(*gputex.tile_mapping);
}
GPU_debug_group_end();
if (BLI_time_now_seconds() - start_time > loading_time_per_sync) {
break;
}
}
GPU_debug_group_end();
texture_loading_queue_.clear();
@@ -269,6 +257,8 @@ MaterialPass MaterialModule::material_pass_get(Object *ob,
matpass.gpumat = inst_.shaders.material_shader_get(
blender_mat, ntree, pipeline_type, geometry_type, use_deferred_compilation, default_mat);
queue_texture_loading(matpass.gpumat);
const bool is_forward = ELEM(pipeline_type,
MAT_PIPE_FORWARD,
MAT_PIPE_PREPASS_FORWARD,
@@ -277,11 +267,6 @@ MaterialPass MaterialModule::material_pass_get(Object *ob,
switch (GPU_material_status(matpass.gpumat)) {
case GPU_MAT_SUCCESS: {
if (!queue_texture_loading(matpass.gpumat)) {
queued_textures_count++;
matpass.gpumat = inst_.shaders.material_shader_get(
default_mat, default_mat->nodetree, pipeline_type, geometry_type, false, nullptr);
}
/* Determine optimization status for remaining compilations counter. */
int optimization_status = GPU_material_optimization_status(matpass.gpumat);
if (optimization_status == GPU_MAT_OPTIMIZATION_QUEUED) {
@@ -341,7 +326,7 @@ MaterialPass MaterialModule::material_pass_get(Object *ob,
if (shader_sub != nullptr) {
/* Create a sub for this material as `shader_sub` is for sharing shader between materials. */
matpass.sub_pass = &shader_sub->sub(GPU_material_get_name(matpass.gpumat));
matpass.sub_pass->material_set(*inst_.manager, matpass.gpumat);
matpass.sub_pass->material_set(*inst_.manager, matpass.gpumat, true);
}
else {
matpass.sub_pass = nullptr;

View File

@@ -417,9 +417,8 @@ class MaterialModule {
eMaterialGeometry geometry_type,
eMaterialProbe probe_capture = MAT_PROBE_NONE);
/* Push unloaded texture used by this material to the texture loading queue.
* Return true if all textures are already loaded. */
bool queue_texture_loading(GPUMaterial *material);
/* Push unloaded texture used by this material to the texture loading queue. */
void queue_texture_loading(GPUMaterial *material);
ShaderGroups default_materials_load(bool block_until_ready = false);
};

View File

@@ -410,7 +410,7 @@ PassMain::Sub *ForwardPipeline::prepass_transparent_add(const Object *ob,
float sorting_value = math::dot(float3(ob->object_to_world().location()), camera_forward_);
PassMain::Sub *pass = &transparent_ps_.sub(GPU_material_get_name(gpumat), sorting_value);
pass->state_set(state);
pass->material_set(*inst_.manager, gpumat);
pass->material_set(*inst_.manager, gpumat, true);
return pass;
}
@@ -427,7 +427,7 @@ PassMain::Sub *ForwardPipeline::material_transparent_add(const Object *ob,
float sorting_value = math::dot(float3(ob->object_to_world().location()), camera_forward_);
PassMain::Sub *pass = &transparent_ps_.sub(GPU_material_get_name(gpumat), sorting_value);
pass->state_set(state);
pass->material_set(*inst_.manager, gpumat);
pass->material_set(*inst_.manager, gpumat, true);
return pass;
}
@@ -1077,7 +1077,7 @@ PassMain::Sub *VolumeLayer::occupancy_add(const Object *ob,
is_empty = false;
PassMain::Sub *pass = &occupancy_ps_->sub(GPU_material_get_name(gpumat));
pass->material_set(*inst_.manager, gpumat);
pass->material_set(*inst_.manager, gpumat, true);
pass->push_constant("use_fast_method", use_fast_occupancy);
return pass;
}
@@ -1091,7 +1091,7 @@ PassMain::Sub *VolumeLayer::material_add(const Object *ob,
UNUSED_VARS_NDEBUG(ob);
PassMain::Sub *pass = &material_ps_->sub(GPU_material_get_name(gpumat));
pass->material_set(*inst_.manager, gpumat);
pass->material_set(*inst_.manager, gpumat, true);
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_VOLUME_SCATTER)) {
has_scatter = true;
}

View File

@@ -73,7 +73,7 @@ PassMain::Sub &MeshPass::get_subpass(eGeometryType geometry_type,
{
is_empty_ = false;
if (texture && texture->gpu.texture) {
if (texture && texture->gpu.texture && *texture->gpu.texture) {
auto add_cb = [&] {
PassMain::Sub *sub_pass = &get_subpass(geometry_type, eShaderType::TEXTURE);
sub_pass = &sub_pass->sub(texture->name);
@@ -95,7 +95,7 @@ PassMain::Sub &MeshPass::get_subpass(eGeometryType geometry_type,
};
return *texture_subpass_map_.lookup_or_add_cb(
TextureSubPassKey(texture->gpu.texture, geometry_type), add_cb);
TextureSubPassKey(*texture->gpu.texture, geometry_type), add_cb);
}
return get_subpass(geometry_type, eShaderType::MATERIAL);

View File

@@ -183,7 +183,7 @@ void SceneResources::init(const SceneState &scene_state, const DRWContext *ctx)
missing_tx.ensure_2d(
GPU_RGBA8, int2(1), GPU_TEXTURE_USAGE_SHADER_READ, float4(1.0f, 0.0f, 1.0f, 1.0f));
missing_texture.gpu.texture = missing_tx;
missing_texture.gpu.texture = &missing_tx;
missing_texture.name = "Missing Texture";
dummy_texture_tx.ensure_2d(

View File

@@ -243,7 +243,9 @@ class PassBase {
* push_constant() call will use its interface.
* IMPORTANT: Assumes material is compiled and can be used (no compilation error).
*/
void material_set(Manager &manager, GPUMaterial *material);
void material_set(Manager &manager,
GPUMaterial *material,
bool deferred_texture_loading = false);
/**
* Record a draw call.
@@ -1116,7 +1118,10 @@ inline void PassBase<T>::subpass_transition(GPUAttachmentState depth_attachment,
color_states[7]}};
}
template<class T> inline void PassBase<T>::material_set(Manager &manager, GPUMaterial *material)
template<class T>
inline void PassBase<T>::material_set(Manager &manager,
GPUMaterial *material,
bool deferred_texture_loading)
{
GPUPass *gpupass = GPU_material_get_pass(material);
shader_set(GPU_pass_shader_get(gpupass));
@@ -1128,15 +1133,31 @@ template<class T> inline void PassBase<T>::material_set(Manager &manager, GPUMat
/* Image */
const bool use_tile_mapping = tex->tiled_mapping_name[0];
ImageUser *iuser = tex->iuser_available ? &tex->iuser : nullptr;
ImageGPUTextures gputex = BKE_image_get_gpu_material_texture(
tex->ima, iuser, use_tile_mapping);
manager.acquire_texture(gputex.texture);
bind_texture(tex->sampler_name, gputex.texture, tex->sampler_state);
ImageGPUTextures gputex;
if (deferred_texture_loading) {
gputex = BKE_image_get_gpu_material_texture_try(tex->ima, iuser, use_tile_mapping);
}
else {
gputex = BKE_image_get_gpu_material_texture(tex->ima, iuser, use_tile_mapping);
}
if (gputex.tile_mapping) {
manager.acquire_texture(gputex.tile_mapping);
bind_texture(tex->tiled_mapping_name, gputex.tile_mapping, tex->sampler_state);
if (*gputex.texture == nullptr) {
/* Texture not yet loaded. Register a reference inside the draw pass.
* The texture will be acquired once it is created. */
bind_texture(tex->sampler_name, gputex.texture, tex->sampler_state);
if (gputex.tile_mapping) {
bind_texture(tex->tiled_mapping_name, gputex.tile_mapping, tex->sampler_state);
}
}
else {
/* Texture is loaded. Acquire. */
manager.acquire_texture(*gputex.texture);
bind_texture(tex->sampler_name, *gputex.texture, tex->sampler_state);
if (gputex.tile_mapping) {
manager.acquire_texture(*gputex.tile_mapping);
bind_texture(tex->tiled_mapping_name, *gputex.tile_mapping, tex->sampler_state);
}
}
}
else if (tex->colorband) {