Cleanup: Remove unnecessary mutex for draw attribute requests

As noted in [0], locking or atomics are not required for merging
requests for a single mesh, because there is no multithreaded iteration
over objects that will process the same mesh in multiple threads. This
locking was  added preemptively over the years and has made code
needlessly complicated, even while the final design for parallel object
iteration isn't completely clear. This PR removes the locks to simplify
some changes necessary for mesh attribute storage refactors.

[0]: b6764e77ef

Pull Request: https://projects.blender.org/blender/blender/pulls/141405
This commit is contained in:
Hans Goudey
2025-07-03 19:14:26 +02:00
committed by Hans Goudey
parent 4df8b950e1
commit a9e6417e19
6 changed files with 13 additions and 40 deletions

View File

@@ -139,9 +139,6 @@ struct MeshRuntime {
Mesh *mesh_eval = nullptr;
Mutex eval_mutex;
/** Needed to ensure some thread-safety during render data pre-processing. */
Mutex render_mutex;
/** Implicit sharing user count for #Mesh::face_offset_indices. */
const ImplicitSharingInfo *face_offsets_sharing_info = nullptr;

View File

@@ -10,14 +10,8 @@
namespace blender::draw {
void drw_attributes_merge(VectorSet<std::string> *dst,
const VectorSet<std::string> *src,
Mutex &render_mutex)
void drw_attributes_merge(VectorSet<std::string> *dst, const VectorSet<std::string> *src)
{
if (src->is_empty()) {
return;
}
std::lock_guard lock{render_mutex};
dst->add_multiple(src->as_span());
}

View File

@@ -14,7 +14,6 @@
#include "DNA_customdata_types.h"
#include "BLI_mutex.hh"
#include "BLI_string_ref.hh"
#include "BLI_sys_types.h"
@@ -43,9 +42,7 @@ struct DRW_MeshCDMask {
* See `mesh_cd_layers_type_*` functions. */
static_assert(sizeof(DRW_MeshCDMask) <= sizeof(uint32_t), "DRW_MeshCDMask exceeds 32 bits");
void drw_attributes_merge(VectorSet<std::string> *dst,
const VectorSet<std::string> *src,
Mutex &render_mutex);
void drw_attributes_merge(VectorSet<std::string> *dst, const VectorSet<std::string> *src);
/* Return true if all requests in b are in a. */
bool drw_attributes_overlap(const VectorSet<std::string> *a, const VectorSet<std::string> *b);

View File

@@ -94,13 +94,6 @@ struct CurvesBatchCache {
/* Whether the cache is invalid. */
bool is_dirty;
/**
* The draw cache extraction is currently not multi-threaded for multiple objects, but if it was,
* some locking would be necessary because multiple objects can use the same curves data with
* different materials, etc. This is a placeholder to make multi-threading easier in the future.
*/
Mutex render_mutex;
};
static bool batch_cache_is_dirty(const Curves &curves)
@@ -773,9 +766,9 @@ static bool ensure_attributes(const Curves &curves,
GPU_VERTBUF_DISCARD_SAFE(final_cache.attributes_buf[i]);
GPU_VERTBUF_DISCARD_SAFE(cache.eval_cache.proc_attributes_buf[i]);
}
drw_attributes_merge(&final_cache.attr_used, &attrs_needed, cache.render_mutex);
drw_attributes_merge(&final_cache.attr_used, &attrs_needed);
}
drw_attributes_merge(&final_cache.attr_used_over_time, &attrs_needed, cache.render_mutex);
drw_attributes_merge(&final_cache.attr_used_over_time, &attrs_needed);
}
bool need_tf_update = false;
@@ -807,7 +800,7 @@ static void request_attribute(Curves &curves, const StringRef name)
}
drw_attributes_add_request(&attributes, name);
drw_attributes_merge(&final_cache.attr_used, &attributes, cache.render_mutex);
drw_attributes_merge(&final_cache.attr_used, &attributes);
}
void drw_curves_get_attribute_sampler_name(const StringRef layer_name, char r_sampler_name[32])

View File

@@ -818,7 +818,7 @@ Span<gpu::Batch *> DRW_mesh_batch_cache_get_surface_shaded(
BLI_assert(materials.size() == cache.mat_len);
mesh_cd_layers_type_merge(&cache.cd_needed, cd_needed);
drw_attributes_merge(&cache.attr_needed, &attrs_needed, mesh.runtime->render_mutex);
drw_attributes_merge(&cache.attr_needed, &attrs_needed);
mesh_batch_cache_request_surface_batches(mesh, cache);
return cache.surface_per_mat;
}
@@ -846,7 +846,7 @@ gpu::Batch *DRW_mesh_batch_cache_get_surface_vertpaint(Object &object, Mesh &mes
VectorSet<std::string> attrs_needed{};
request_active_and_default_color_attributes(object, mesh, attrs_needed);
drw_attributes_merge(&cache.attr_needed, &attrs_needed, mesh.runtime->render_mutex);
drw_attributes_merge(&cache.attr_needed, &attrs_needed);
mesh_batch_cache_request_surface_batches(mesh, cache);
return cache.batch.surface;
@@ -859,7 +859,7 @@ gpu::Batch *DRW_mesh_batch_cache_get_surface_sculpt(Object &object, Mesh &mesh)
VectorSet<std::string> attrs_needed{};
request_active_and_default_color_attributes(object, mesh, attrs_needed);
drw_attributes_merge(&cache.attr_needed, &attrs_needed, mesh.runtime->render_mutex);
drw_attributes_merge(&cache.attr_needed, &attrs_needed);
mesh_batch_cache_request_surface_batches(mesh, cache);
return cache.batch.surface;
@@ -1221,13 +1221,12 @@ void DRW_mesh_batch_cache_create_requested(TaskGraph &task_graph,
cache.batch_ready &= ~(MBC_SURFACE | MBC_SURFACE_PER_MAT);
mesh_cd_layers_type_merge(&cache.cd_used, cache.cd_needed);
drw_attributes_merge(&cache.attr_used, &cache.attr_needed, mesh.runtime->render_mutex);
drw_attributes_merge(&cache.attr_used, &cache.attr_needed);
}
mesh_cd_layers_type_merge(&cache.cd_used_over_time, cache.cd_needed);
mesh_cd_layers_type_clear(&cache.cd_needed);
drw_attributes_merge(
&cache.attr_used_over_time, &cache.attr_needed, mesh.runtime->render_mutex);
drw_attributes_merge(&cache.attr_used_over_time, &cache.attr_needed);
cache.attr_needed.clear();
}

View File

@@ -84,13 +84,6 @@ struct PointCloudBatchCache {
/* settings to determine if cache is invalid */
bool is_dirty;
/**
* The draw cache extraction is currently not multi-threaded for multiple objects, but if it was,
* some locking would be necessary because multiple objects can use the same object data with
* different materials, etc. This is a placeholder to make multi-threading easier in the future.
*/
Mutex render_mutex;
};
static PointCloudBatchCache *pointcloud_batch_cache_get(PointCloud &pointcloud)
@@ -367,9 +360,9 @@ gpu::Batch **pointcloud_surface_shaded_get(PointCloud *pointcloud,
for (const int i : IndexRange(GPU_MAX_ATTR)) {
GPU_VERTBUF_DISCARD_SAFE(cache->eval_cache.attributes_buf[i]);
}
drw_attributes_merge(&cache->eval_cache.attr_used, &attrs_needed, cache->render_mutex);
drw_attributes_merge(&cache->eval_cache.attr_used, &attrs_needed);
}
drw_attributes_merge(&cache->eval_cache.attr_used_over_time, &attrs_needed, cache->render_mutex);
drw_attributes_merge(&cache->eval_cache.attr_used_over_time, &attrs_needed);
DRW_batch_request(&cache->eval_cache.surface_per_mat[0]);
return cache->eval_cache.surface_per_mat;
@@ -411,7 +404,7 @@ gpu::VertBuf **DRW_pointcloud_evaluated_attribute(PointCloud *pointcloud, const
{
VectorSet<std::string> requests{};
drw_attributes_add_request(&requests, name);
drw_attributes_merge(&cache.eval_cache.attr_used, &requests, cache.render_mutex);
drw_attributes_merge(&cache.eval_cache.attr_used, &requests);
}
int request_i = -1;