EEVEE: Add deferred texture loading

This adds a new function to query GPUtexture from an
Image datablock without actually creating them.

This allows to keep track of all the texture that
needs to be loaded and defer their loading in
end_sync. The texture are then only used in the
next sync. This is because we do not want to stage
the texture for drawing as it would require a
valid texture.

Multithreading is used to load the texture from disk
as soon as possible in a threaded way. It is still
blocking, but it is much faster (depending on
hardware).

Before (5.7s):
After (2.5s):

On Linux workstation: 2.28x speedup in texture loading
On M1 Backbook pro: 2.72x speedup in texture loading

This includes redraw overhead but it is not super significant.

Having a vector of all the textures to be loaded
will eventually be helpful in making the
texture uploading multi-threaded. Currently, it is
a bit difficult given the need of a valid GPUContext
per thread.

- [x] Bypass deffered loading on animated textures
- [x] Add throttling to only load a few textures per frame
- [x] Do not delay for viewport render

Pull Request: https://projects.blender.org/blender/blender/pulls/139644
This commit is contained in:
Clément Foucault
2025-06-03 16:57:27 +02:00
committed by Clément Foucault
parent 57d94f5902
commit 1d638c0f5e
5 changed files with 134 additions and 17 deletions

View File

@@ -612,6 +612,11 @@ ImageGPUTextures BKE_image_get_gpu_material_texture(Image *image,
ImageUser *iuser,
const bool use_tile_mapping);
/* Same as BKE_image_get_gpu_material_texture but will not load the texture if it isn't already. */
ImageGPUTextures BKE_image_get_gpu_material_texture_try(Image *image,
ImageUser *iuser,
const bool use_tile_mapping);
/**
* Is the alpha of the `GPUTexture` for a given image/ibuf premultiplied.
*/

View File

@@ -354,7 +354,8 @@ void BKE_image_ensure_gpu_texture(Image *image, ImageUser *iuser)
static ImageGPUTextures image_get_gpu_texture(Image *ima,
ImageUser *iuser,
const bool use_viewers,
const bool use_tile_mapping)
const bool use_tile_mapping,
bool try_only)
{
ImageGPUTextures result = {};
@@ -415,6 +416,11 @@ static ImageGPUTextures image_get_gpu_texture(Image *ima,
return result;
}
if (try_only) {
/* If we got this far, it means the texture is not loaded. */
return result;
}
/* Check if we have a valid image. If not, we return a dummy
* texture with zero bind-code so we don't keep trying. */
ImageTile *tile = BKE_image_get_tile(ima, 0);
@@ -478,19 +484,26 @@ static ImageGPUTextures image_get_gpu_texture(Image *ima,
GPUTexture *BKE_image_get_gpu_texture(Image *image, ImageUser *iuser)
{
return image_get_gpu_texture(image, iuser, false, false).texture;
return image_get_gpu_texture(image, iuser, false, false, false).texture;
}
GPUTexture *BKE_image_get_gpu_viewer_texture(Image *image, ImageUser *iuser)
{
return image_get_gpu_texture(image, iuser, true, false).texture;
return image_get_gpu_texture(image, iuser, true, false, false).texture;
}
ImageGPUTextures BKE_image_get_gpu_material_texture(Image *image,
ImageUser *iuser,
const bool use_tile_mapping)
{
return image_get_gpu_texture(image, iuser, false, use_tile_mapping);
return image_get_gpu_texture(image, iuser, false, use_tile_mapping, false);
}
ImageGPUTextures BKE_image_get_gpu_material_texture_try(Image *image,
ImageUser *iuser,
const bool use_tile_mapping)
{
return image_get_gpu_texture(image, iuser, false, use_tile_mapping, true);
}
/** \} */

View File

@@ -409,6 +409,7 @@ void Instance::end_sync()
return;
}
materials.end_sync();
velocity.end_sync();
volume.end_sync(); /* Needs to be before shadows. */
shadows.end_sync(); /* Needs to be before lights. */
@@ -452,7 +453,8 @@ bool Instance::needs_lightprobe_sphere_passes() const
bool Instance::do_lightprobe_sphere_sync() const
{
return (materials.queued_shaders_count == 0) && needs_lightprobe_sphere_passes();
return (materials.queued_shaders_count == 0) && (materials.queued_textures_count == 0) &&
needs_lightprobe_sphere_passes();
}
bool Instance::needs_planar_probe_passes() const
@@ -462,7 +464,8 @@ bool Instance::needs_planar_probe_passes() const
bool Instance::do_planar_probe_sync() const
{
return (materials.queued_shaders_count == 0) && needs_planar_probe_passes();
return (materials.queued_shaders_count == 0) && (materials.queued_textures_count == 0) &&
needs_planar_probe_passes();
}
/** \} */
@@ -484,7 +487,7 @@ void Instance::render_sample()
/* Motion blur may need to do re-sync after a certain number of sample. */
if (!is_viewport() && sampling.do_render_sync()) {
render_sync();
while (materials.queued_shaders_count > 0) {
while (materials.queued_shaders_count > 0 || materials.queued_textures_count > 0) {
GPU_pass_cache_wait_for_all();
/** WORKAROUND: Re-sync now that all shaders are compiled. */
/* This may need to happen more than once, since actual materials may require more passes
@@ -674,15 +677,19 @@ void Instance::draw_viewport()
DRW_viewport_request_redraw();
}
if (materials.queued_shaders_count > 0) {
info_append_i18n("Compiling shaders ({} remaining)", materials.queued_shaders_count);
if (!GPU_use_parallel_compilation() &&
GPU_type_matches_ex(GPU_DEVICE_ANY, GPU_OS_ANY, GPU_DRIVER_ANY, GPU_BACKEND_OPENGL))
{
info_append_i18n(
"Increasing Preferences > System > Max Shader Compilation Subprocesses may improve "
"compilation time.");
if (materials.queued_shaders_count > 0 || materials.queued_textures_count > 0) {
if (materials.queued_textures_count > 0) {
info_append_i18n("Loading textures ({} remaining)", materials.queued_textures_count);
}
if (materials.queued_shaders_count > 0) {
info_append_i18n("Compiling shaders ({} remaining)", materials.queued_shaders_count);
if (!GPU_use_parallel_compilation() &&
GPU_type_matches_ex(GPU_DEVICE_ANY, GPU_OS_ANY, GPU_DRIVER_ANY, GPU_BACKEND_OPENGL))
{
info_append_i18n(
"Increasing Preferences > System > Max Shader Compilation Subprocesses may improve "
"compilation time.");
}
}
DRW_viewport_request_redraw();
}
@@ -827,7 +834,7 @@ void Instance::light_bake_irradiance(
custom_pipeline_wrapper([&]() {
this->render_sync();
while (materials.queued_shaders_count > 0) {
while ((materials.queued_shaders_count > 0) || (materials.queued_textures_count > 0)) {
GPU_pass_cache_wait_for_all();
/** WORKAROUND: Re-sync now that all shaders are compiled. */
/* This may need to happen more than once, since actual materials may require more passes

View File

@@ -6,6 +6,7 @@
* \ingroup eevee
*/
#include "BLI_time.h"
#include "DNA_material_types.h"
#include "BKE_lib_id.hh"
@@ -160,16 +161,94 @@ MaterialModule::~MaterialModule()
void MaterialModule::begin_sync()
{
queued_shaders_count = 0;
queued_textures_count = 0;
queued_optimize_shaders_count = 0;
uint64_t next_update = GPU_pass_global_compilation_count();
gpu_pass_last_update_ = gpu_pass_next_update_;
gpu_pass_next_update_ = next_update;
texture_loading_queue_.clear();
material_map_.clear();
shader_map_.clear();
}
bool MaterialModule::queue_texture_loading(GPUMaterial *material)
{
if (inst_.is_viewport_image_render) {
/* Do not delay image loading for viewport render as it would produce invalid frames. */
return true;
}
bool loaded = true;
ListBase textures = GPU_material_textures(material);
for (GPUMaterialTexture *tex : ListBaseWrapper<GPUMaterialTexture>(textures)) {
if (tex->ima) {
const bool use_tile_mapping = tex->tiled_mapping_name[0];
ImageUser *iuser = tex->iuser_available ? &tex->iuser : nullptr;
ImageGPUTextures gputex = BKE_image_get_gpu_material_texture_try(
tex->ima, iuser, use_tile_mapping);
if (ELEM(tex->ima->source, IMA_SRC_SEQUENCE, IMA_SRC_MOVIE)) {
/* Do not defer the loading of animated textures as they would appear always loading. */
continue;
}
if (gputex.texture == nullptr) {
texture_loading_queue_.append(tex);
loaded = false;
}
}
}
return loaded;
}
void MaterialModule::end_sync()
{
if (texture_loading_queue_.is_empty()) {
return;
}
if (inst_.is_viewport()) {
/* Avoid ghosting of textures. */
inst_.sampling.reset();
}
GPU_debug_group_begin("Texture Loading");
threading::parallel_for(texture_loading_queue_.index_range(), 1, [&](const IndexRange range) {
for (auto i : range) {
GPUMaterialTexture *tex = texture_loading_queue_[i];
ImageUser *iuser = tex->iuser_available ? &tex->iuser : nullptr;
BKE_image_tag_time(tex->ima);
BKE_image_get_tile(tex->ima, 0);
ImBuf *imbuf = BKE_image_acquire_ibuf(tex->ima, iuser, nullptr);
BKE_image_release_ibuf(tex->ima, imbuf, nullptr);
}
});
/* To avoid freezing the UI too much, we only allow some finite amount of time of texture loading
* per frame. */
double loading_time_per_sync = inst_.is_image_render ? DBL_MAX : 0.250;
double start_time = BLI_time_now_seconds();
for (GPUMaterialTexture *tex : texture_loading_queue_) {
BLI_assert(tex->ima);
GPU_debug_group_begin(tex->ima->id.name);
const bool use_tile_mapping = tex->tiled_mapping_name[0];
ImageUser *iuser = tex->iuser_available ? &tex->iuser : nullptr;
BKE_image_get_gpu_material_texture(tex->ima, iuser, use_tile_mapping);
GPU_debug_group_end();
if (BLI_time_now_seconds() - start_time > loading_time_per_sync) {
break;
}
}
GPU_debug_group_end();
texture_loading_queue_.clear();
}
MaterialPass MaterialModule::material_pass_get(Object *ob,
::Material *blender_mat,
eMaterialPipeline pipeline_type,
@@ -201,6 +280,11 @@ MaterialPass MaterialModule::material_pass_get(Object *ob,
switch (GPU_material_status(matpass.gpumat)) {
case GPU_MAT_SUCCESS: {
if (!queue_texture_loading(matpass.gpumat)) {
queued_textures_count++;
matpass.gpumat = inst_.shaders.material_shader_get(
default_mat, default_mat->nodetree, pipeline_type, geometry_type, false, nullptr);
}
/* Determine optimization status for remaining compilations counter. */
int optimization_status = GPU_material_optimization_status(matpass.gpumat);
if (optimization_status == GPU_MAT_OPTIMIZATION_QUEUED) {

View File

@@ -356,6 +356,7 @@ class MaterialModule {
::Material *default_volume;
int64_t queued_shaders_count = 0;
int64_t queued_textures_count = 0;
int64_t queued_optimize_shaders_count = 0;
private:
@@ -373,11 +374,14 @@ class MaterialModule {
uint64_t gpu_pass_last_update_ = 0;
uint64_t gpu_pass_next_update_ = 0;
Vector<GPUMaterialTexture *> texture_loading_queue_;
public:
MaterialModule(Instance &inst);
~MaterialModule();
void begin_sync();
void end_sync();
/**
* Returned Material references are valid until the next call to this function or material_get().
@@ -402,6 +406,10 @@ class MaterialModule {
eMaterialPipeline pipeline_type,
eMaterialGeometry geometry_type,
eMaterialProbe probe_capture = MAT_PROBE_NONE);
/* Push unloaded texture used by this material to the texture loading queue.
* Return true if all textures are already loaded. */
bool queue_texture_loading(GPUMaterial *material);
};
/** \} */