Cleanup: replace some spinlocks with atomics or mutexes

- ImBuf reference counting: turn that into just an atomic integer
- Cachefile safety: turn into a mutex, since work under the spinlock
  was quite heavy (hashtable creation, other memory allocations)
- Movie clip editor: turn into a mutex, since work under the spinlock
  was very heavy (reading files from disk, etc.)
- Mesh intersect: remove the previously commented out spinlock path;
  replace BLI mutex with C++ mutex for shorter code

Pull Request: https://projects.blender.org/blender/blender/pulls/137989
This commit is contained in:
Aras Pranckevicius
2025-04-29 10:42:45 +02:00
committed by Aras Pranckevicius
parent ddc0f9460d
commit 8528feb7ee
12 changed files with 45 additions and 186 deletions

View File

@@ -16,9 +16,6 @@ struct Main;
struct Object;
struct Scene;
void BKE_cachefiles_init();
void BKE_cachefiles_exit();
void *BKE_cachefile_add(Main *bmain, const char *name);
void BKE_cachefile_reload(Depsgraph *depsgraph, CacheFile *cache_file);

View File

@@ -34,7 +34,6 @@
#include "BKE_blender_user_menu.hh" /* own include */
#include "BKE_blender_version.h" /* own include */
#include "BKE_brush.hh"
#include "BKE_cachefile.hh"
#include "BKE_callbacks.hh"
#include "BKE_global.hh"
#include "BKE_idprop.hh"
@@ -74,7 +73,6 @@ void BKE_blender_free()
BKE_spacetypes_free(); /* after free main, it uses space callbacks */
IMB_exit();
BKE_cachefiles_exit();
DEG_free_node_types();
BKE_brush_system_exit();

View File

@@ -18,7 +18,6 @@
#include "BLI_listbase.h"
#include "BLI_path_utils.hh"
#include "BLI_string.h"
#include "BLI_threads.h"
#include "BLI_utildefines.h"
#include "BLT_translation.hh"
@@ -47,6 +46,8 @@
# include "usd.hh"
#endif
#include <mutex>
static void cachefile_handle_free(CacheFile *cache_file);
static void cache_file_init_data(ID *id)
@@ -152,17 +153,7 @@ IDTypeInfo IDType_ID_CF = {
};
/* TODO: make this per cache file to avoid global locks. */
static SpinLock spin;
void BKE_cachefiles_init()
{
BLI_spin_init(&spin);
}
void BKE_cachefiles_exit()
{
BLI_spin_end(&spin);
}
static std::mutex cache_mutex;
void BKE_cachefile_reader_open(CacheFile *cache_file,
CacheReader **reader,
@@ -197,7 +188,7 @@ void BKE_cachefile_reader_open(CacheFile *cache_file,
}
/* Multiple modifiers and constraints can call this function concurrently. */
BLI_spin_lock(&spin);
std::lock_guard lock(cache_mutex);
if (*reader) {
/* Register in set so we can free it when the cache file changes. */
if (cache_file->handle_readers == nullptr) {
@@ -209,7 +200,6 @@ void BKE_cachefile_reader_open(CacheFile *cache_file,
/* Remove in case CacheReader_open_alembic_object free the existing reader. */
BLI_gset_remove(cache_file->handle_readers, reader, nullptr);
}
BLI_spin_unlock(&spin);
#else
UNUSED_VARS(cache_file, reader, object, object_path);
#endif
@@ -220,7 +210,7 @@ void BKE_cachefile_reader_free(CacheFile *cache_file, CacheReader **reader)
#if defined(WITH_ALEMBIC) || defined(WITH_USD)
/* Multiple modifiers and constraints can call this function concurrently, and
* cachefile_handle_free() can also be called at the same time. */
BLI_spin_lock(&spin);
std::lock_guard lock(cache_mutex);
if (*reader != nullptr) {
if (cache_file) {
BLI_assert(cache_file->id.tag & ID_TAG_COPIED_ON_EVAL);
@@ -247,7 +237,6 @@ void BKE_cachefile_reader_free(CacheFile *cache_file, CacheReader **reader)
BLI_gset_remove(cache_file->handle_readers, reader, nullptr);
}
}
BLI_spin_unlock(&spin);
#else
UNUSED_VARS(cache_file, reader);
#endif
@@ -259,35 +248,36 @@ static void cachefile_handle_free(CacheFile *cache_file)
/* Free readers in all modifiers and constraints that use the handle, before
* we free the handle itself. */
BLI_spin_lock(&spin);
if (cache_file->handle_readers) {
GSetIterator gs_iter;
GSET_ITER (gs_iter, cache_file->handle_readers) {
CacheReader **reader = static_cast<CacheReader **>(BLI_gsetIterator_getKey(&gs_iter));
if (*reader != nullptr) {
switch (cache_file->type) {
case CACHEFILE_TYPE_ALEMBIC:
{
std::lock_guard lock(cache_mutex);
if (cache_file->handle_readers) {
GSetIterator gs_iter;
GSET_ITER (gs_iter, cache_file->handle_readers) {
CacheReader **reader = static_cast<CacheReader **>(BLI_gsetIterator_getKey(&gs_iter));
if (*reader != nullptr) {
switch (cache_file->type) {
case CACHEFILE_TYPE_ALEMBIC:
# ifdef WITH_ALEMBIC
ABC_CacheReader_free(*reader);
ABC_CacheReader_free(*reader);
# endif
break;
case CACHEFILE_TYPE_USD:
break;
case CACHEFILE_TYPE_USD:
# ifdef WITH_USD
blender::io::usd::USD_CacheReader_free(*reader);
blender::io::usd::USD_CacheReader_free(*reader);
# endif
break;
case CACHE_FILE_TYPE_INVALID:
break;
break;
case CACHE_FILE_TYPE_INVALID:
break;
}
*reader = nullptr;
}
*reader = nullptr;
}
}
BLI_gset_free(cache_file->handle_readers, nullptr);
cache_file->handle_readers = nullptr;
BLI_gset_free(cache_file->handle_readers, nullptr);
cache_file->handle_readers = nullptr;
}
}
BLI_spin_unlock(&spin);
/* Free handle. */
if (cache_file->handle) {

View File

@@ -14,6 +14,7 @@
# include <functional>
# include <iostream>
# include <memory>
# include <mutex>
# include <numeric>
# include "BLI_array.hh"
@@ -283,14 +284,6 @@ std::ostream &operator<<(std::ostream &os, const Face *f)
return os;
}
/**
* Un-comment the following to try using a spin-lock instead of
* a mutex in the arena allocation routines.
* Initial tests showed that it doesn't seem to help very much,
* if at all, to use a spin-lock.
*/
// #define USE_SPINLOCK
/**
* #IMeshArena is the owner of the Vert and Face resources used
* during a run of one of the mesh-intersect main functions.
@@ -336,34 +329,9 @@ class IMeshArena::IMeshArenaImpl : NonCopyable, NonMovable {
int next_face_id_ = 0;
/* Need a lock when multi-threading to protect allocation of new elements. */
# ifdef USE_SPINLOCK
SpinLock lock_;
# else
ThreadMutex *mutex_;
# endif
std::mutex mutex_;
public:
IMeshArenaImpl()
{
if (intersect_use_threading) {
# ifdef USE_SPINLOCK
BLI_spin_init(&lock_);
# else
mutex_ = BLI_mutex_alloc();
# endif
}
}
~IMeshArenaImpl()
{
if (intersect_use_threading) {
# ifdef USE_SPINLOCK
BLI_spin_end(&lock_);
# else
BLI_mutex_free(mutex_);
# endif
}
}
void reserve(int vert_num_hint, int face_num_hint)
{
vset_.reserve(vert_num_hint);
@@ -401,21 +369,8 @@ class IMeshArena::IMeshArenaImpl : NonCopyable, NonMovable {
Face *add_face(Span<const Vert *> verts, int orig, Span<int> edge_origs, Span<bool> is_intersect)
{
Face *f = new Face(verts, next_face_id_++, orig, edge_origs, is_intersect);
if (intersect_use_threading) {
# ifdef USE_SPINLOCK
BLI_spin_lock(&lock_);
# else
BLI_mutex_lock(mutex_);
# endif
}
std::lock_guard lock(mutex_);
allocated_faces_.append(std::unique_ptr<Face>(f));
if (intersect_use_threading) {
# ifdef USE_SPINLOCK
BLI_spin_unlock(&lock_);
# else
BLI_mutex_unlock(mutex_);
# endif
}
return f;
}
@@ -436,21 +391,8 @@ class IMeshArena::IMeshArenaImpl : NonCopyable, NonMovable {
{
Vert vtry(co, double3(co[0].get_d(), co[1].get_d(), co[2].get_d()), NO_INDEX, NO_INDEX);
VSetKey vskey(&vtry);
if (intersect_use_threading) {
# ifdef USE_SPINLOCK
BLI_spin_lock(&lock_);
# else
BLI_mutex_lock(mutex_);
# endif
}
std::lock_guard lock(mutex_);
const VSetKey *lookup = vset_.lookup_key_ptr(vskey);
if (intersect_use_threading) {
# ifdef USE_SPINLOCK
BLI_spin_unlock(&lock_);
# else
BLI_mutex_unlock(mutex_);
# endif
}
if (!lookup) {
return nullptr;
}
@@ -481,13 +423,7 @@ class IMeshArena::IMeshArenaImpl : NonCopyable, NonMovable {
Vert *vtry = new Vert(mco, dco, NO_INDEX, NO_INDEX);
const Vert *ans;
VSetKey vskey(vtry);
if (intersect_use_threading) {
# ifdef USE_SPINLOCK
BLI_spin_lock(&lock_);
# else
BLI_mutex_lock(mutex_);
# endif
}
std::lock_guard lock(mutex_);
const VSetKey *lookup = vset_.lookup_key_ptr(vskey);
if (!lookup) {
vtry->id = next_vert_id_++;
@@ -506,13 +442,6 @@ class IMeshArena::IMeshArenaImpl : NonCopyable, NonMovable {
delete vtry;
ans = lookup->vert;
}
if (intersect_use_threading) {
# ifdef USE_SPINLOCK
BLI_spin_unlock(&lock_);
# else
BLI_mutex_unlock(mutex_);
# endif
}
return ans;
};
@@ -520,13 +449,7 @@ class IMeshArena::IMeshArenaImpl : NonCopyable, NonMovable {
{
const Vert *ans;
VSetKey vskey(vtry);
if (intersect_use_threading) {
# ifdef USE_SPINLOCK
BLI_spin_lock(&lock_);
# else
BLI_mutex_lock(mutex_);
# endif
}
std::lock_guard lock(mutex_);
const VSetKey *lookup = vset_.lookup_key_ptr(vskey);
if (!lookup) {
vtry->id = next_vert_id_++;
@@ -544,13 +467,6 @@ class IMeshArena::IMeshArenaImpl : NonCopyable, NonMovable {
delete vtry;
ans = lookup->vert;
}
if (intersect_use_threading) {
# ifdef USE_SPINLOCK
BLI_spin_unlock(&lock_);
# else
BLI_mutex_unlock(mutex_);
# endif
}
return ans;
};
};

View File

@@ -9,6 +9,7 @@
#include <cerrno>
#include <cstddef>
#include <fcntl.h>
#include <mutex>
#include <sys/types.h>
#ifndef WIN32
@@ -681,7 +682,7 @@ struct PrefetchQueue {
*/
bool forward;
SpinLock spin;
std::mutex mutex;
bool *stop;
bool *do_update;
@@ -780,7 +781,7 @@ static uchar *prefetch_thread_next_frame(PrefetchQueue *queue,
{
uchar *mem = nullptr;
BLI_spin_lock(&queue->spin);
std::lock_guard lock(queue->mutex);
if (!*queue->stop && !check_prefetch_break() &&
IN_RANGE_INCL(queue->current_frame, queue->start_frame, queue->end_frame))
{
@@ -831,7 +832,6 @@ static uchar *prefetch_thread_next_frame(PrefetchQueue *queue,
*queue->progress = float(frames_processed) / (queue->end_frame - queue->start_frame);
}
}
BLI_spin_unlock(&queue->spin);
return mem;
}
@@ -896,8 +896,6 @@ static void start_prefetch_threads(MovieClip *clip,
/* initialize queue */
PrefetchQueue queue;
BLI_spin_init(&queue.spin);
queue.current_frame = current_frame;
queue.initial_frame = current_frame;
queue.start_frame = start_frame;
@@ -916,8 +914,6 @@ static void start_prefetch_threads(MovieClip *clip,
}
BLI_task_pool_work_and_wait(task_pool);
BLI_task_pool_free(task_pool);
BLI_spin_end(&queue.spin);
}
/* NOTE: Reading happens from `clip_local` into `clip->cache`. */

View File

@@ -8,6 +8,7 @@
#include <cerrno>
#include <fcntl.h>
#include <mutex>
#include <sys/types.h>
#ifndef WIN32
@@ -1299,7 +1300,7 @@ struct ProxyQueue {
int cfra;
int sfra;
int efra;
SpinLock spin;
std::mutex mutex;
const bool *stop;
bool *do_update;
@@ -1320,7 +1321,7 @@ static uchar *proxy_thread_next_frame(ProxyQueue *queue,
{
uchar *mem = nullptr;
BLI_spin_lock(&queue->spin);
std::lock_guard lock(queue->mutex);
if (!*queue->stop && queue->cfra <= queue->efra) {
MovieClipUser user = *DNA_struct_default_get(MovieClipUser);
char filepath[FILE_MAX];
@@ -1332,14 +1333,12 @@ static uchar *proxy_thread_next_frame(ProxyQueue *queue,
file = BLI_open(filepath, O_BINARY | O_RDONLY, 0);
if (file < 0) {
BLI_spin_unlock(&queue->spin);
return nullptr;
}
const size_t size = BLI_file_descriptor_size(file);
if (UNLIKELY(ELEM(size, 0, size_t(-1)))) {
close(file);
BLI_spin_unlock(&queue->spin);
return nullptr;
}
@@ -1347,7 +1346,6 @@ static uchar *proxy_thread_next_frame(ProxyQueue *queue,
if (BLI_read(file, mem, size) != size) {
close(file);
BLI_spin_unlock(&queue->spin);
MEM_freeN(mem);
return nullptr;
}
@@ -1361,8 +1359,6 @@ static uchar *proxy_thread_next_frame(ProxyQueue *queue,
*queue->do_update = true;
*queue->progress = float(queue->cfra - queue->sfra) / (queue->efra - queue->sfra);
}
BLI_spin_unlock(&queue->spin);
return mem;
}
@@ -1425,8 +1421,6 @@ static void do_sequence_proxy(void *pjv,
}
ProxyQueue queue;
BLI_spin_init(&queue.spin);
queue.cfra = sfra;
queue.sfra = sfra;
queue.efra = efra;
@@ -1464,7 +1458,6 @@ static void do_sequence_proxy(void *pjv,
}
}
BLI_spin_end(&queue.spin);
MEM_freeN(handles);
}

View File

@@ -75,6 +75,7 @@ set(LIB
PRIVATE bf::dna
PRIVATE bf::gpu
bf_imbuf_openimageio
PRIVATE bf::intern::atomic
PRIVATE bf::intern::guardedalloc
bf_intern_memutil
bf_intern_opencolorio

View File

@@ -239,9 +239,8 @@ struct ImBuf {
/** The absolute file path associated with this image. */
char filepath[IMB_FILEPATH_SIZE];
/* memory cache limiter */
/** reference counter for multiple users */
int refcounter;
int32_t refcounter;
/* some parameters to pass along for packing images */
/** Compressed image only used with PNG and EXR currently. */

View File

@@ -9,9 +9,6 @@
struct ImBuf;
void imb_refcounter_lock_init();
void imb_refcounter_lock_exit();
#ifndef WIN32
void imb_mmap_lock_init();
void imb_mmap_lock_exit();

View File

@@ -28,17 +28,7 @@
#include "GPU_texture.hh"
static SpinLock refcounter_spin;
void imb_refcounter_lock_init()
{
BLI_spin_init(&refcounter_spin);
}
void imb_refcounter_lock_exit()
{
BLI_spin_end(&refcounter_spin);
}
#include "atomic_ops.h"
#ifndef WIN32
static SpinLock mmap_spin;
@@ -250,17 +240,7 @@ void IMB_freeImBuf(ImBuf *ibuf)
return;
}
bool needs_free = false;
BLI_spin_lock(&refcounter_spin);
if (ibuf->refcounter > 0) {
ibuf->refcounter--;
}
else {
needs_free = true;
}
BLI_spin_unlock(&refcounter_spin);
bool needs_free = atomic_sub_and_fetch_int32(&ibuf->refcounter, 1) < 0;
if (needs_free) {
/* Include this check here as the path may be manipulated after creation. */
BLI_assert_msg(!(ibuf->filepath[0] == '/' && ibuf->filepath[1] == '/'),
@@ -277,9 +257,7 @@ void IMB_freeImBuf(ImBuf *ibuf)
void IMB_refImBuf(ImBuf *ibuf)
{
BLI_spin_lock(&refcounter_spin);
ibuf->refcounter++;
BLI_spin_unlock(&refcounter_spin);
atomic_add_and_fetch_int32(&ibuf->refcounter, 1);
}
ImBuf *IMB_makeSingleUser(ImBuf *ibuf)
@@ -288,9 +266,7 @@ ImBuf *IMB_makeSingleUser(ImBuf *ibuf)
return nullptr;
}
BLI_spin_lock(&refcounter_spin);
const bool is_single = (ibuf->refcounter == 0);
BLI_spin_unlock(&refcounter_spin);
const bool is_single = (atomic_load_int32(&ibuf->refcounter) == 0);
if (is_single) {
return ibuf;
}

View File

@@ -15,7 +15,6 @@
void IMB_init()
{
imb_refcounter_lock_init();
imb_mmap_lock_init();
imb_filetypes_init();
colormanagement_init();
@@ -26,5 +25,4 @@ void IMB_exit()
imb_filetypes_exit();
colormanagement_exit();
imb_mmap_lock_exit();
imb_refcounter_lock_exit();
}

View File

@@ -39,7 +39,6 @@
#include "BKE_appdir.hh"
#include "BKE_blender.hh"
#include "BKE_brush.hh"
#include "BKE_cachefile.hh"
#include "BKE_callbacks.hh"
#include "BKE_context.hh"
#include "BKE_cpp_types.hh"
@@ -421,7 +420,6 @@ int main(int argc,
BKE_cpp_types_init();
BKE_idtype_init();
BKE_cachefiles_init();
BKE_modifier_init();
BKE_shaderfx_init();
BKE_volumes_init();