Task: Use TBB as Task Scheduler

This patch enables TBB as the default task scheduler. TBB stands for Threading Building Blocks and is developed by Intel. The library contains several threading patters. This patch maps blenders BLI_task_* function to their counterpart. After this patch we can add more patterns. A promising one is TBB:graph that can be used for depsgraph, draw manager and compositor.

Performance changes depends on the actual hardware. It was tested on different hardwares from laptops to workstations and we didn't detected any downgrade of the performance.
* Linux Xeon E5-2699 v4 got FPS boost from 12 to 17 using Spring's 04_010_A.anim.blend.
* AMD Ryzen Threadripper 2990WX 32-Core Animation playback goes from 9.5-10.5 FPS to 13.0-14.0 FPS on Agent 327 , 10_03_B.anim.blend.

Reviewed By: brecht, sergey

Differential Revision: https://developer.blender.org/D7475
This commit is contained in:
Brecht Van Lommel
2020-04-30 07:59:23 +02:00
committed by Jeroen Bakker
parent a18ad3c3b6
commit d8a3f3595a
46 changed files with 825 additions and 1671 deletions

View File

@@ -66,6 +66,9 @@ macro(BLENDER_SRC_GTEST_EX)
if(UNIX AND NOT APPLE)
target_link_libraries(${TARGET_NAME} bf_intern_libc_compat)
endif()
if(WITH_TBB)
target_link_libraries(${TARGET_NAME} ${TBB_LIBRARIES})
endif()
get_property(GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
if(GENERATOR_IS_MULTI_CONFIG)

View File

@@ -49,6 +49,7 @@ struct PBVH;
struct PBVHNode;
struct SubdivCCG;
struct TaskParallelTLS;
struct TaskParallelSettings;
typedef struct PBVH PBVH;
typedef struct PBVHNode PBVHNode;
@@ -456,30 +457,10 @@ bool pbvh_has_face_sets(PBVH *bvh);
void pbvh_show_face_sets_set(PBVH *bvh, bool show_face_sets);
/* Parallelization */
typedef void (*PBVHParallelRangeFunc)(void *__restrict userdata,
const int iter,
const struct TaskParallelTLS *__restrict tls);
typedef void (*PBVHParallelReduceFunc)(const void *__restrict userdata,
void *__restrict chunk_join,
void *__restrict chunk);
typedef struct PBVHParallelSettings {
bool use_threading;
void *userdata_chunk;
size_t userdata_chunk_size;
PBVHParallelReduceFunc func_reduce;
} PBVHParallelSettings;
void BKE_pbvh_parallel_range_settings(struct PBVHParallelSettings *settings,
void BKE_pbvh_parallel_range_settings(struct TaskParallelSettings *settings,
bool use_threading,
int totnode);
void BKE_pbvh_parallel_range(const int start,
const int stop,
void *userdata,
PBVHParallelRangeFunc func,
const struct PBVHParallelSettings *settings);
struct MVert *BKE_pbvh_get_verts(const PBVH *bvh);
#ifdef __cplusplus

View File

@@ -197,7 +197,6 @@ set(SRC
intern/particle_system.c
intern/pbvh.c
intern/pbvh_bmesh.c
intern/pbvh_parallel.cc
intern/pointcache.c
intern/pointcloud.c
intern/report.c

View File

@@ -251,9 +251,7 @@ finally:
pRes[3] = fSign;
}
static void emDM_calc_loop_tangents_thread(TaskPool *__restrict UNUSED(pool),
void *taskdata,
int UNUSED(threadid))
static void emDM_calc_loop_tangents_thread(TaskPool *__restrict UNUSED(pool), void *taskdata)
{
struct SGLSLEditMeshToTangent *mesh2tangent = taskdata;
/* new computation method */
@@ -362,9 +360,8 @@ void BKE_editmesh_loop_tangent_calc(BMEditMesh *em,
#endif
/* Calculation */
if (em->tottri != 0) {
TaskScheduler *scheduler = BLI_task_scheduler_get();
TaskPool *task_pool;
task_pool = BLI_task_pool_create(scheduler, NULL, TASK_PRIORITY_LOW);
task_pool = BLI_task_pool_create(NULL, TASK_PRIORITY_LOW);
tangent_mask_curr = 0;
/* Calculate tangent layers */

View File

@@ -1300,7 +1300,7 @@ static void loop_split_worker_do(LoopSplitTaskDataCommon *common_data,
}
}
static void loop_split_worker(TaskPool *__restrict pool, void *taskdata, int UNUSED(threadid))
static void loop_split_worker(TaskPool *__restrict pool, void *taskdata)
{
LoopSplitTaskDataCommon *common_data = BLI_task_pool_user_data(pool);
LoopSplitTaskData *data = taskdata;
@@ -1704,11 +1704,7 @@ void BKE_mesh_normals_loop_split(const MVert *mverts,
loop_split_generator(NULL, &common_data);
}
else {
TaskScheduler *task_scheduler;
TaskPool *task_pool;
task_scheduler = BLI_task_scheduler_get();
task_pool = BLI_task_pool_create(task_scheduler, &common_data, TASK_PRIORITY_HIGH);
TaskPool *task_pool = BLI_task_pool_create(&common_data, TASK_PRIORITY_HIGH);
loop_split_generator(task_pool, &common_data);

View File

@@ -452,9 +452,7 @@ finally:
pRes[3] = fSign;
}
static void DM_calc_loop_tangents_thread(TaskPool *__restrict UNUSED(pool),
void *taskdata,
int UNUSED(threadid))
static void DM_calc_loop_tangents_thread(TaskPool *__restrict UNUSED(pool), void *taskdata)
{
struct SGLSLMeshToTangent *mesh2tangent = taskdata;
/* new computation method */
@@ -658,9 +656,7 @@ void BKE_mesh_calc_loop_tangent_ex(const MVert *mvert,
/* Calculation */
if (looptri_len != 0) {
TaskScheduler *scheduler = BLI_task_scheduler_get();
TaskPool *task_pool;
task_pool = BLI_task_pool_create(scheduler, NULL, TASK_PRIORITY_LOW);
TaskPool *task_pool = BLI_task_pool_create(NULL, TASK_PRIORITY_LOW);
tangent_mask_curr = 0;
/* Calculate tangent layers */

View File

@@ -453,9 +453,7 @@ static void ocean_compute_htilda(void *__restrict userdata,
}
}
static void ocean_compute_displacement_y(TaskPool *__restrict pool,
void *UNUSED(taskdata),
int UNUSED(threadid))
static void ocean_compute_displacement_y(TaskPool *__restrict pool, void *UNUSED(taskdata))
{
OceanSimulateData *osd = BLI_task_pool_user_data(pool);
const Ocean *o = osd->o;
@@ -463,9 +461,7 @@ static void ocean_compute_displacement_y(TaskPool *__restrict pool,
fftw_execute(o->_disp_y_plan);
}
static void ocean_compute_displacement_x(TaskPool *__restrict pool,
void *UNUSED(taskdata),
int UNUSED(threadid))
static void ocean_compute_displacement_x(TaskPool *__restrict pool, void *UNUSED(taskdata))
{
OceanSimulateData *osd = BLI_task_pool_user_data(pool);
const Ocean *o = osd->o;
@@ -494,9 +490,7 @@ static void ocean_compute_displacement_x(TaskPool *__restrict pool,
fftw_execute(o->_disp_x_plan);
}
static void ocean_compute_displacement_z(TaskPool *__restrict pool,
void *UNUSED(taskdata),
int UNUSED(threadid))
static void ocean_compute_displacement_z(TaskPool *__restrict pool, void *UNUSED(taskdata))
{
OceanSimulateData *osd = BLI_task_pool_user_data(pool);
const Ocean *o = osd->o;
@@ -525,9 +519,7 @@ static void ocean_compute_displacement_z(TaskPool *__restrict pool,
fftw_execute(o->_disp_z_plan);
}
static void ocean_compute_jacobian_jxx(TaskPool *__restrict pool,
void *UNUSED(taskdata),
int UNUSED(threadid))
static void ocean_compute_jacobian_jxx(TaskPool *__restrict pool, void *UNUSED(taskdata))
{
OceanSimulateData *osd = BLI_task_pool_user_data(pool);
const Ocean *o = osd->o;
@@ -560,9 +552,7 @@ static void ocean_compute_jacobian_jxx(TaskPool *__restrict pool,
}
}
static void ocean_compute_jacobian_jzz(TaskPool *__restrict pool,
void *UNUSED(taskdata),
int UNUSED(threadid))
static void ocean_compute_jacobian_jzz(TaskPool *__restrict pool, void *UNUSED(taskdata))
{
OceanSimulateData *osd = BLI_task_pool_user_data(pool);
const Ocean *o = osd->o;
@@ -595,9 +585,7 @@ static void ocean_compute_jacobian_jzz(TaskPool *__restrict pool,
}
}
static void ocean_compute_jacobian_jxz(TaskPool *__restrict pool,
void *UNUSED(taskdata),
int UNUSED(threadid))
static void ocean_compute_jacobian_jxz(TaskPool *__restrict pool, void *UNUSED(taskdata))
{
OceanSimulateData *osd = BLI_task_pool_user_data(pool);
const Ocean *o = osd->o;
@@ -624,9 +612,7 @@ static void ocean_compute_jacobian_jxz(TaskPool *__restrict pool,
fftw_execute(o->_Jxz_plan);
}
static void ocean_compute_normal_x(TaskPool *__restrict pool,
void *UNUSED(taskdata),
int UNUSED(threadid))
static void ocean_compute_normal_x(TaskPool *__restrict pool, void *UNUSED(taskdata))
{
OceanSimulateData *osd = BLI_task_pool_user_data(pool);
const Ocean *o = osd->o;
@@ -645,9 +631,7 @@ static void ocean_compute_normal_x(TaskPool *__restrict pool,
fftw_execute(o->_N_x_plan);
}
static void ocean_compute_normal_z(TaskPool *__restrict pool,
void *UNUSED(taskdata),
int UNUSED(threadid))
static void ocean_compute_normal_z(TaskPool *__restrict pool, void *UNUSED(taskdata))
{
OceanSimulateData *osd = BLI_task_pool_user_data(pool);
const Ocean *o = osd->o;
@@ -668,7 +652,6 @@ static void ocean_compute_normal_z(TaskPool *__restrict pool,
void BKE_ocean_simulate(struct Ocean *o, float t, float scale, float chop_amount)
{
TaskScheduler *scheduler = BLI_task_scheduler_get();
TaskPool *pool;
OceanSimulateData osd;
@@ -680,7 +663,7 @@ void BKE_ocean_simulate(struct Ocean *o, float t, float scale, float chop_amount
osd.scale = scale;
osd.chop_amount = chop_amount;
pool = BLI_task_pool_create(scheduler, &osd, TASK_PRIORITY_HIGH);
pool = BLI_task_pool_create(&osd, TASK_PRIORITY_HIGH);
BLI_rw_mutex_lock(&o->oceanmutex, THREAD_LOCK_WRITE);

View File

@@ -2787,9 +2787,7 @@ static void psys_thread_create_path(ParticleTask *task,
}
}
static void exec_child_path_cache(TaskPool *__restrict UNUSED(pool),
void *taskdata,
int UNUSED(threadid))
static void exec_child_path_cache(TaskPool *__restrict UNUSED(pool), void *taskdata)
{
ParticleTask *task = taskdata;
ParticleThreadContext *ctx = task->ctx;
@@ -2810,7 +2808,6 @@ void psys_cache_child_paths(ParticleSimulationData *sim,
const bool editupdate,
const bool use_render_params)
{
TaskScheduler *task_scheduler;
TaskPool *task_pool;
ParticleThreadContext ctx;
ParticleTask *tasks_parent, *tasks_child;
@@ -2826,8 +2823,7 @@ void psys_cache_child_paths(ParticleSimulationData *sim,
return;
}
task_scheduler = BLI_task_scheduler_get();
task_pool = BLI_task_pool_create(task_scheduler, &ctx, TASK_PRIORITY_LOW);
task_pool = BLI_task_pool_create(&ctx, TASK_PRIORITY_LOW);
totchild = ctx.totchild;
totparent = ctx.totparent;
@@ -3377,7 +3373,6 @@ void psys_cache_edit_paths(Depsgraph *depsgraph,
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.scheduling_mode = TASK_SCHEDULING_DYNAMIC;
BLI_task_parallel_range(0, edit->totpoint, &iter_data, psys_cache_edit_paths_iter, &settings);
edit->totcached = totpart;

View File

@@ -773,9 +773,7 @@ static void distribute_children_exec(ParticleTask *thread, ChildParticle *cpa, i
}
}
static void exec_distribute_parent(TaskPool *__restrict UNUSED(pool),
void *taskdata,
int UNUSED(threadid))
static void exec_distribute_parent(TaskPool *__restrict UNUSED(pool), void *taskdata)
{
ParticleTask *task = taskdata;
ParticleSystem *psys = task->ctx->sim.psys;
@@ -804,9 +802,7 @@ static void exec_distribute_parent(TaskPool *__restrict UNUSED(pool),
}
}
static void exec_distribute_child(TaskPool *__restrict UNUSED(pool),
void *taskdata,
int UNUSED(threadid))
static void exec_distribute_child(TaskPool *__restrict UNUSED(pool), void *taskdata)
{
ParticleTask *task = taskdata;
ParticleSystem *psys = task->ctx->sim.psys;
@@ -1324,7 +1320,6 @@ static void psys_task_init_distribute(ParticleTask *task, ParticleSimulationData
static void distribute_particles_on_dm(ParticleSimulationData *sim, int from)
{
TaskScheduler *task_scheduler;
TaskPool *task_pool;
ParticleThreadContext ctx;
ParticleTask *tasks;
@@ -1336,8 +1331,7 @@ static void distribute_particles_on_dm(ParticleSimulationData *sim, int from)
return;
}
task_scheduler = BLI_task_scheduler_get();
task_pool = BLI_task_pool_create(task_scheduler, &ctx, TASK_PRIORITY_LOW);
task_pool = BLI_task_pool_create(&ctx, TASK_PRIORITY_LOW);
totpart = (from == PART_FROM_CHILD ? sim->psys->totchild : sim->psys->totpart);
psys_tasks_create(&ctx, 0, totpart, &tasks, &numtasks);

View File

@@ -1115,11 +1115,11 @@ static void pbvh_faces_update_normals(PBVH *bvh, PBVHNode **nodes, int totnode)
.vnors = vnors,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, true, totnode);
BKE_pbvh_parallel_range(0, totnode, &data, pbvh_update_normals_accum_task_cb, &settings);
BKE_pbvh_parallel_range(0, totnode, &data, pbvh_update_normals_store_task_cb, &settings);
BLI_task_parallel_range(0, totnode, &data, pbvh_update_normals_accum_task_cb, &settings);
BLI_task_parallel_range(0, totnode, &data, pbvh_update_normals_store_task_cb, &settings);
MEM_freeN(vnors);
}
@@ -1169,9 +1169,9 @@ static void pbvh_update_mask_redraw(PBVH *bvh, PBVHNode **nodes, int totnode, in
.flag = flag,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, true, totnode);
BKE_pbvh_parallel_range(0, totnode, &data, pbvh_update_mask_redraw_task_cb, &settings);
BLI_task_parallel_range(0, totnode, &data, pbvh_update_mask_redraw_task_cb, &settings);
}
static void pbvh_update_visibility_redraw_task_cb(void *__restrict userdata,
@@ -1207,9 +1207,9 @@ static void pbvh_update_visibility_redraw(PBVH *bvh, PBVHNode **nodes, int totno
.flag = flag,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, true, totnode);
BKE_pbvh_parallel_range(0, totnode, &data, pbvh_update_visibility_redraw_task_cb, &settings);
BLI_task_parallel_range(0, totnode, &data, pbvh_update_visibility_redraw_task_cb, &settings);
}
static void pbvh_update_BB_redraw_task_cb(void *__restrict userdata,
@@ -1245,9 +1245,9 @@ void pbvh_update_BB_redraw(PBVH *bvh, PBVHNode **nodes, int totnode, int flag)
.flag = flag,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, true, totnode);
BKE_pbvh_parallel_range(0, totnode, &data, pbvh_update_BB_redraw_task_cb, &settings);
BLI_task_parallel_range(0, totnode, &data, pbvh_update_BB_redraw_task_cb, &settings);
}
static int pbvh_get_buffers_update_flags(PBVH *bvh, bool show_vcol)
@@ -1365,9 +1365,9 @@ static void pbvh_update_draw_buffers(
.show_vcol = show_vcol,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, true, totnode);
BKE_pbvh_parallel_range(0, totnode, &data, pbvh_update_draw_buffer_cb, &settings);
BLI_task_parallel_range(0, totnode, &data, pbvh_update_draw_buffer_cb, &settings);
}
static int pbvh_flush_bb(PBVH *bvh, PBVHNode *node, int flag)
@@ -1558,9 +1558,9 @@ static void pbvh_update_visibility(PBVH *bvh, PBVHNode **nodes, int totnode)
.nodes = nodes,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, true, totnode);
BKE_pbvh_parallel_range(0, totnode, &data, pbvh_update_visibility_task_cb, &settings);
BLI_task_parallel_range(0, totnode, &data, pbvh_update_visibility_task_cb, &settings);
}
void BKE_pbvh_update_visibility(PBVH *bvh)
@@ -2994,7 +2994,7 @@ void BKE_pbvh_get_frustum_planes(PBVH *bvh, PBVHFrustumPlanes *planes)
}
}
void BKE_pbvh_parallel_range_settings(PBVHParallelSettings *settings,
void BKE_pbvh_parallel_range_settings(TaskParallelSettings *settings,
bool use_threading,
int totnode)
{

View File

@@ -43,12 +43,9 @@ struct BLI_mempool;
* must be called from the main threads. All other scheduler and pool functions
* are thread-safe. */
typedef struct TaskScheduler TaskScheduler;
TaskScheduler *BLI_task_scheduler_create(int num_threads);
void BLI_task_scheduler_free(TaskScheduler *scheduler);
int BLI_task_scheduler_num_threads(TaskScheduler *scheduler);
void BLI_task_scheduler_init(void);
void BLI_task_scheduler_exit(void);
int BLI_task_scheduler_num_threads(void);
/* Task Pool
*
@@ -70,16 +67,14 @@ typedef enum TaskPriority {
} TaskPriority;
typedef struct TaskPool TaskPool;
typedef void (*TaskRunFunction)(TaskPool *__restrict pool, void *taskdata, int threadid);
typedef void (*TaskRunFunction)(TaskPool *__restrict pool, void *taskdata);
typedef void (*TaskFreeFunction)(TaskPool *__restrict pool, void *taskdata);
TaskPool *BLI_task_pool_create(TaskScheduler *scheduler, void *userdata, TaskPriority priority);
TaskPool *BLI_task_pool_create_background(TaskScheduler *scheduler,
void *userdata,
TaskPriority priority);
TaskPool *BLI_task_pool_create_suspended(TaskScheduler *scheduler,
void *userdata,
TaskPriority priority);
TaskPool *BLI_task_pool_create(void *userdata, TaskPriority priority);
TaskPool *BLI_task_pool_create_background(void *userdata, TaskPriority priority);
TaskPool *BLI_task_pool_create_suspended(void *userdata, TaskPriority priority);
TaskPool *BLI_task_pool_create_no_threads(void *userdata);
TaskPool *BLI_task_pool_create_background_serial(void *userdata, TaskPriority priority);
void BLI_task_pool_free(TaskPool *pool);
void BLI_task_pool_push(TaskPool *pool,
@@ -87,17 +82,9 @@ void BLI_task_pool_push(TaskPool *pool,
void *taskdata,
bool free_taskdata,
TaskFreeFunction freedata);
void BLI_task_pool_push_from_thread(TaskPool *pool,
TaskRunFunction run,
void *taskdata,
bool free_taskdata,
TaskFreeFunction freedata,
int thread_id);
/* work and wait until all tasks are done */
void BLI_task_pool_work_and_wait(TaskPool *pool);
/* work and wait until all tasks are done, then reset to the initial suspended state */
void BLI_task_pool_work_wait_and_reset(TaskPool *pool);
/* cancel all tasks, keep worker threads running */
void BLI_task_pool_cancel(TaskPool *pool);
@@ -110,36 +97,10 @@ void *BLI_task_pool_user_data(TaskPool *pool);
/* optional mutex to use from run function */
ThreadMutex *BLI_task_pool_user_mutex(TaskPool *pool);
/* Thread ID of thread that created the task pool. */
int BLI_task_pool_creator_thread_id(TaskPool *pool);
/* Delayed push, use that to reduce thread overhead by accumulating
* all new tasks into local queue first and pushing it to scheduler
* from within a single mutex lock.
*/
void BLI_task_pool_delayed_push_begin(TaskPool *pool, int thread_id);
void BLI_task_pool_delayed_push_end(TaskPool *pool, int thread_id);
/* Parallel for routines */
typedef enum eTaskSchedulingMode {
/* Task scheduler will divide overall work into equal chunks, scheduling
* even chunks to all worker threads.
* Least run time benefit, ideal for cases when each task requires equal
* amount of compute power.
*/
TASK_SCHEDULING_STATIC,
/* Task scheduler will schedule small amount of work to each worker thread.
* Has more run time overhead, but deals much better with cases when each
* part of the work requires totally different amount of compute power.
*/
TASK_SCHEDULING_DYNAMIC,
} eTaskSchedulingMode;
/* Per-thread specific data passed to the callback. */
typedef struct TaskParallelTLS {
/* Identifier of the thread who this data belongs to. */
int thread_id;
/* Copy of user-specifier chunk, which is copied from original chunk to all
* worker threads. This is similar to OpenMP's firstprivate.
*/
@@ -163,8 +124,6 @@ typedef struct TaskParallelSettings {
* is higher than a chunk size. As in, threading will always be performed.
*/
bool use_threading;
/* Scheduling mode to use for this parallel range invocation. */
eTaskSchedulingMode scheduling_mode;
/* Each instance of looping chunks will get a copy of this data
* (similar to OpenMP's firstprivate).
*/
@@ -199,7 +158,7 @@ void BLI_task_parallel_range(const int start,
const int stop,
void *userdata,
TaskParallelRangeFunc func,
TaskParallelSettings *settings);
const TaskParallelSettings *settings);
/* This data is shared between all tasks, its access needs thread lock or similar protection.
*/
@@ -254,11 +213,14 @@ BLI_INLINE void BLI_parallel_range_settings_defaults(TaskParallelSettings *setti
{
memset(settings, 0, sizeof(*settings));
settings->use_threading = true;
settings->scheduling_mode = TASK_SCHEDULING_STATIC;
/* Use default heuristic to define actual chunk size. */
settings->min_iter_per_thread = 0;
}
/* Don't use this, store any thread specific data in tls->userdata_chunk instead.
* Ony here for code to be removed. */
int BLI_task_parallel_thread_id(const TaskParallelTLS *tls);
#ifdef __cplusplus
}
#endif

View File

@@ -47,8 +47,6 @@ struct TaskScheduler;
void BLI_threadapi_init(void);
void BLI_threadapi_exit(void);
struct TaskScheduler *BLI_task_scheduler_get(void);
void BLI_threadpool_init(struct ListBase *threadbase, void *(*do_thread)(void *), int tot);
int BLI_available_threads(struct ListBase *threadbase);
int BLI_threadpool_available_thread_index(struct ListBase *threadbase);

View File

@@ -119,8 +119,10 @@ set(SRC
intern/string_utf8.c
intern/string_utils.c
intern/system.c
intern/task_pool.cc
intern/task_iterator.c
intern/task_pool.cc
intern/task_range.cc
intern/task_scheduler.cc
intern/threads.c
intern/time.c
intern/timecode.c
@@ -278,6 +280,14 @@ if(WITH_MEM_VALGRIND)
add_definitions(-DWITH_MEM_VALGRIND)
endif()
if(WITH_TBB)
add_definitions(-DWITH_TBB)
list(APPEND INC_SYS
${TBB_INCLUDE_DIRS}
)
endif()
if(WIN32)
list(APPEND INC
../../../intern/utfconv

View File

@@ -17,7 +17,7 @@
/** \file
* \ingroup bli
*
* A generic task system which can be used for any task based subsystem.
* Parallel tasks over all elements in a container.
*/
#include <stdlib.h>
@@ -34,82 +34,12 @@
#include "atomic_ops.h"
/* Parallel range routines */
/**
*
* Main functions:
* - #BLI_task_parallel_range
* - #BLI_task_parallel_listbase (#ListBase - double linked list)
*
* TODO:
* - #BLI_task_parallel_foreach_link (#Link - single linked list)
* - #BLI_task_parallel_foreach_ghash/gset (#GHash/#GSet - hash & set)
* - #BLI_task_parallel_foreach_mempool (#BLI_mempool - iterate over mempools)
*/
/* Allows to avoid using malloc for userdata_chunk in tasks, when small enough. */
#define MALLOCA(_size) ((_size) <= 8192) ? alloca((_size)) : MEM_mallocN((_size), __func__)
#define MALLOCA_FREE(_mem, _size) \
if (((_mem) != NULL) && ((_size) > 8192)) \
MEM_freeN((_mem))
/* Stores all needed data to perform a parallelized iteration,
* with a same operation (callback function).
* It can be chained with other tasks in a single-linked list way. */
typedef struct TaskParallelRangeState {
struct TaskParallelRangeState *next;
/* Start and end point of integer value iteration. */
int start, stop;
/* User-defined data, shared between all worker threads. */
void *userdata_shared;
/* User-defined callback function called for each value in [start, stop[ specified range. */
TaskParallelRangeFunc func;
/* Each instance of looping chunks will get a copy of this data
* (similar to OpenMP's firstprivate).
*/
void *initial_tls_memory; /* Pointer to actual user-defined 'tls' data. */
size_t tls_data_size; /* Size of that data. */
void *flatten_tls_storage; /* 'tls' copies of initial_tls_memory for each running task. */
/* Number of 'tls' copies in the array, i.e. number of worker threads. */
size_t num_elements_in_tls_storage;
/* Function called to join user data chunk into another, to reduce
* the result to the original userdata_chunk memory.
* The reduce functions should have no side effects, so that they
* can be run on any thread. */
TaskParallelReduceFunc func_reduce;
/* Function called to free data created by TaskParallelRangeFunc. */
TaskParallelFreeFunc func_free;
/* Current value of the iterator, shared between all threads (atomically updated). */
int iter_value;
int iter_chunk_num; /* Amount of iterations to process in a single step. */
} TaskParallelRangeState;
/* Stores all the parallel tasks for a single pool. */
typedef struct TaskParallelRangePool {
/* The workers' task pool. */
TaskPool *pool;
/* The number of worker tasks we need to create. */
int num_tasks;
/* The total number of iterations in all the added ranges. */
int num_total_iters;
/* The size (number of items) processed at once by a worker task. */
int chunk_size;
/* Linked list of range tasks to process. */
TaskParallelRangeState *parallel_range_states;
/* Current range task beeing processed, swapped atomically. */
TaskParallelRangeState *current_state;
/* Scheduling settings common to all tasks. */
TaskParallelSettings *settings;
} TaskParallelRangePool;
BLI_INLINE void task_parallel_calc_chunk_size(const TaskParallelSettings *settings,
const int tot_items,
int num_tasks,
@@ -154,232 +84,7 @@ BLI_INLINE void task_parallel_calc_chunk_size(const TaskParallelSettings *settin
}
BLI_assert(chunk_size > 0);
if (tot_items > 0) {
switch (settings->scheduling_mode) {
case TASK_SCHEDULING_STATIC:
*r_chunk_size = max_ii(chunk_size, tot_items / num_tasks);
break;
case TASK_SCHEDULING_DYNAMIC:
*r_chunk_size = chunk_size;
break;
}
}
else {
/* If total amount of items is unknown, we can only use dynamic scheduling. */
*r_chunk_size = chunk_size;
}
}
BLI_INLINE void task_parallel_range_calc_chunk_size(TaskParallelRangePool *range_pool)
{
int num_iters = 0;
int min_num_iters = INT_MAX;
for (TaskParallelRangeState *state = range_pool->parallel_range_states; state != NULL;
state = state->next) {
const int ni = state->stop - state->start;
num_iters += ni;
if (min_num_iters > ni) {
min_num_iters = ni;
}
}
range_pool->num_total_iters = num_iters;
/* Note: Passing min_num_iters here instead of num_iters kind of partially breaks the 'static'
* scheduling, but pooled range iterator is inherently non-static anyway, so adding a small level
* of dynamic scheduling here should be fine. */
task_parallel_calc_chunk_size(
range_pool->settings, min_num_iters, range_pool->num_tasks, &range_pool->chunk_size);
}
BLI_INLINE bool parallel_range_next_iter_get(TaskParallelRangePool *__restrict range_pool,
int *__restrict r_iter,
int *__restrict r_count,
TaskParallelRangeState **__restrict r_state)
{
/* We need an atomic op here as well to fetch the initial state, since some other thread might
* have already updated it. */
TaskParallelRangeState *current_state = atomic_cas_ptr(
(void **)&range_pool->current_state, NULL, NULL);
int previter = INT32_MAX;
while (current_state != NULL && previter >= current_state->stop) {
previter = atomic_fetch_and_add_int32(&current_state->iter_value, range_pool->chunk_size);
*r_iter = previter;
*r_count = max_ii(0, min_ii(range_pool->chunk_size, current_state->stop - previter));
if (previter >= current_state->stop) {
/* At this point the state we got is done, we need to go to the next one. In case some other
* thread already did it, then this does nothing, and we'll just get current valid state
* at start of the next loop. */
TaskParallelRangeState *current_state_from_atomic_cas = atomic_cas_ptr(
(void **)&range_pool->current_state, current_state, current_state->next);
if (current_state == current_state_from_atomic_cas) {
/* The atomic CAS operation was successful, we did update range_pool->current_state, so we
* can safely switch to next state. */
current_state = current_state->next;
}
else {
/* The atomic CAS operation failed, but we still got range_pool->current_state value out of
* it, just use it as our new current state. */
current_state = current_state_from_atomic_cas;
}
}
}
*r_state = current_state;
return (current_state != NULL && previter < current_state->stop);
}
static void parallel_range_func(TaskPool *__restrict pool, void *tls_data_idx, int thread_id)
{
TaskParallelRangePool *__restrict range_pool = BLI_task_pool_user_data(pool);
TaskParallelTLS tls = {
.thread_id = thread_id,
.userdata_chunk = NULL,
};
TaskParallelRangeState *state;
int iter, count;
while (parallel_range_next_iter_get(range_pool, &iter, &count, &state)) {
tls.userdata_chunk = (char *)state->flatten_tls_storage +
(((size_t)POINTER_AS_INT(tls_data_idx)) * state->tls_data_size);
for (int i = 0; i < count; i++) {
state->func(state->userdata_shared, iter + i, &tls);
}
}
}
static void parallel_range_single_thread(TaskParallelRangePool *range_pool)
{
for (TaskParallelRangeState *state = range_pool->parallel_range_states; state != NULL;
state = state->next) {
const int start = state->start;
const int stop = state->stop;
void *userdata = state->userdata_shared;
TaskParallelRangeFunc func = state->func;
void *initial_tls_memory = state->initial_tls_memory;
const size_t tls_data_size = state->tls_data_size;
const bool use_tls_data = (tls_data_size != 0) && (initial_tls_memory != NULL);
TaskParallelTLS tls = {
.thread_id = 0,
.userdata_chunk = initial_tls_memory,
};
for (int i = start; i < stop; i++) {
func(userdata, i, &tls);
}
if (use_tls_data && state->func_free != NULL) {
/* `func_free` should only free data that was created during execution of `func`. */
state->func_free(userdata, initial_tls_memory);
}
}
}
/**
* This function allows to parallelized for loops in a similar way to OpenMP's
* 'parallel for' statement.
*
* See public API doc of ParallelRangeSettings for description of all settings.
*/
void BLI_task_parallel_range(const int start,
const int stop,
void *userdata,
TaskParallelRangeFunc func,
TaskParallelSettings *settings)
{
if (start == stop) {
return;
}
BLI_assert(start < stop);
TaskParallelRangeState state = {
.next = NULL,
.start = start,
.stop = stop,
.userdata_shared = userdata,
.func = func,
.iter_value = start,
.initial_tls_memory = settings->userdata_chunk,
.tls_data_size = settings->userdata_chunk_size,
.func_free = settings->func_free,
};
TaskParallelRangePool range_pool = {
.pool = NULL, .parallel_range_states = &state, .current_state = NULL, .settings = settings};
int i, num_threads, num_tasks;
void *tls_data = settings->userdata_chunk;
const size_t tls_data_size = settings->userdata_chunk_size;
if (tls_data_size != 0) {
BLI_assert(tls_data != NULL);
}
const bool use_tls_data = (tls_data_size != 0) && (tls_data != NULL);
void *flatten_tls_storage = NULL;
/* If it's not enough data to be crunched, don't bother with tasks at all,
* do everything from the current thread.
*/
if (!settings->use_threading) {
parallel_range_single_thread(&range_pool);
return;
}
TaskScheduler *task_scheduler = BLI_task_scheduler_get();
num_threads = BLI_task_scheduler_num_threads(task_scheduler);
/* The idea here is to prevent creating task for each of the loop iterations
* and instead have tasks which are evenly distributed across CPU cores and
* pull next iter to be crunched using the queue.
*/
range_pool.num_tasks = num_tasks = num_threads + 2;
task_parallel_range_calc_chunk_size(&range_pool);
range_pool.num_tasks = num_tasks = min_ii(num_tasks,
max_ii(1, (stop - start) / range_pool.chunk_size));
if (num_tasks == 1) {
parallel_range_single_thread(&range_pool);
return;
}
TaskPool *task_pool = range_pool.pool = BLI_task_pool_create_suspended(
task_scheduler, &range_pool, TASK_PRIORITY_HIGH);
range_pool.current_state = &state;
if (use_tls_data) {
state.flatten_tls_storage = flatten_tls_storage = MALLOCA(tls_data_size * (size_t)num_tasks);
state.tls_data_size = tls_data_size;
}
const int thread_id = BLI_task_pool_creator_thread_id(task_pool);
for (i = 0; i < num_tasks; i++) {
if (use_tls_data) {
void *userdata_chunk_local = (char *)flatten_tls_storage + (tls_data_size * (size_t)i);
memcpy(userdata_chunk_local, tls_data, tls_data_size);
}
/* Use this pool's pre-allocated tasks. */
BLI_task_pool_push_from_thread(
task_pool, parallel_range_func, POINTER_FROM_INT(i), false, NULL, thread_id);
}
BLI_task_pool_work_and_wait(task_pool);
BLI_task_pool_free(task_pool);
if (use_tls_data && (settings->func_free != NULL || settings->func_reduce != NULL)) {
for (i = 0; i < num_tasks; i++) {
void *userdata_chunk_local = (char *)flatten_tls_storage + (tls_data_size * (size_t)i);
if (settings->func_reduce) {
settings->func_reduce(userdata, tls_data, userdata_chunk_local);
}
if (settings->func_free) {
/* `func_free` should only free data that was created during execution of `func`. */
settings->func_free(userdata, userdata_chunk_local);
}
}
MALLOCA_FREE(flatten_tls_storage, tls_data_size * (size_t)num_tasks);
}
*r_chunk_size = chunk_size;
}
typedef struct TaskParallelIteratorState {
@@ -394,20 +99,10 @@ typedef struct TaskParallelIteratorState {
int tot_items;
} TaskParallelIteratorState;
BLI_INLINE void task_parallel_iterator_calc_chunk_size(const TaskParallelSettings *settings,
const int num_tasks,
TaskParallelIteratorState *state)
{
task_parallel_calc_chunk_size(
settings, state->tot_items, num_tasks, &state->iter_shared.chunk_size);
}
static void parallel_iterator_func_do(TaskParallelIteratorState *__restrict state,
void *userdata_chunk,
int threadid)
void *userdata_chunk)
{
TaskParallelTLS tls = {
.thread_id = threadid,
.userdata_chunk = userdata_chunk,
};
@@ -460,11 +155,11 @@ static void parallel_iterator_func_do(TaskParallelIteratorState *__restrict stat
MALLOCA_FREE(current_chunk_indices, indices_size);
}
static void parallel_iterator_func(TaskPool *__restrict pool, void *userdata_chunk, int threadid)
static void parallel_iterator_func(TaskPool *__restrict pool, void *userdata_chunk)
{
TaskParallelIteratorState *__restrict state = BLI_task_pool_user_data(pool);
parallel_iterator_func_do(state, userdata_chunk, threadid);
parallel_iterator_func_do(state, userdata_chunk);
}
static void task_parallel_iterator_no_threads(const TaskParallelSettings *settings,
@@ -483,7 +178,7 @@ static void task_parallel_iterator_no_threads(const TaskParallelSettings *settin
/* Also marking it as non-threaded for the iterator callback. */
state->iter_shared.spin_lock = NULL;
parallel_iterator_func_do(state, userdata_chunk, 0);
parallel_iterator_func_do(state, userdata_chunk);
if (use_userdata_chunk && settings->func_free != NULL) {
/* `func_free` should only free data that was created during execution of `func`. */
@@ -494,10 +189,10 @@ static void task_parallel_iterator_no_threads(const TaskParallelSettings *settin
static void task_parallel_iterator_do(const TaskParallelSettings *settings,
TaskParallelIteratorState *state)
{
TaskScheduler *task_scheduler = BLI_task_scheduler_get();
const int num_threads = BLI_task_scheduler_num_threads(task_scheduler);
const int num_threads = BLI_task_scheduler_num_threads();
task_parallel_iterator_calc_chunk_size(settings, num_threads, state);
task_parallel_calc_chunk_size(
settings, state->tot_items, num_threads, &state->iter_shared.chunk_size);
if (!settings->use_threading) {
task_parallel_iterator_no_threads(settings, state);
@@ -526,21 +221,19 @@ static void task_parallel_iterator_do(const TaskParallelSettings *settings,
void *userdata_chunk_array = NULL;
const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL);
TaskPool *task_pool = BLI_task_pool_create_suspended(task_scheduler, state, TASK_PRIORITY_HIGH);
TaskPool *task_pool = BLI_task_pool_create(state, TASK_PRIORITY_HIGH);
if (use_userdata_chunk) {
userdata_chunk_array = MALLOCA(userdata_chunk_size * num_tasks);
}
const int thread_id = BLI_task_pool_creator_thread_id(task_pool);
for (size_t i = 0; i < num_tasks; i++) {
if (use_userdata_chunk) {
userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
}
/* Use this pool's pre-allocated tasks. */
BLI_task_pool_push_from_thread(
task_pool, parallel_iterator_func, userdata_chunk_local, false, NULL, thread_id);
BLI_task_pool_push(task_pool, parallel_iterator_func, userdata_chunk_local, false, NULL);
}
BLI_task_pool_work_and_wait(task_pool);
@@ -656,7 +349,7 @@ typedef struct ParallelMempoolState {
TaskParallelMempoolFunc func;
} ParallelMempoolState;
static void parallel_mempool_func(TaskPool *__restrict pool, void *taskdata, int UNUSED(threadid))
static void parallel_mempool_func(TaskPool *__restrict pool, void *taskdata)
{
ParallelMempoolState *__restrict state = BLI_task_pool_user_data(pool);
BLI_mempool_iter *iter = taskdata;
@@ -684,7 +377,6 @@ void BLI_task_parallel_mempool(BLI_mempool *mempool,
TaskParallelMempoolFunc func,
const bool use_threading)
{
TaskScheduler *task_scheduler;
TaskPool *task_pool;
ParallelMempoolState state;
int i, num_threads, num_tasks;
@@ -704,9 +396,8 @@ void BLI_task_parallel_mempool(BLI_mempool *mempool,
return;
}
task_scheduler = BLI_task_scheduler_get();
task_pool = BLI_task_pool_create_suspended(task_scheduler, &state, TASK_PRIORITY_HIGH);
num_threads = BLI_task_scheduler_num_threads(task_scheduler);
task_pool = BLI_task_pool_create(&state, TASK_PRIORITY_HIGH);
num_threads = BLI_task_scheduler_num_threads();
/* The idea here is to prevent creating task for each of the loop iterations
* and instead have tasks which are evenly distributed across CPU cores and
@@ -720,11 +411,9 @@ void BLI_task_parallel_mempool(BLI_mempool *mempool,
BLI_mempool_iter *mempool_iterators = BLI_mempool_iter_threadsafe_create(mempool,
(size_t)num_tasks);
const int thread_id = BLI_task_pool_creator_thread_id(task_pool);
for (i = 0; i < num_tasks; i++) {
/* Use this pool's pre-allocated tasks. */
BLI_task_pool_push_from_thread(
task_pool, parallel_mempool_func, &mempool_iterators[i], false, NULL, thread_id);
BLI_task_pool_push(task_pool, parallel_mempool_func, &mempool_iterators[i], false, NULL);
}
BLI_task_pool_work_and_wait(task_pool);

View File

@@ -17,714 +17,321 @@
/** \file
* \ingroup bli
*
* A generic task system which can be used for any task based subsystem.
* Task pool to run tasks in parallel.
*/
#include <memory>
#include <stdlib.h>
#include <utility>
#include "MEM_guardedalloc.h"
#include "DNA_listBase.h"
#include "BLI_listbase.h"
#include "BLI_math.h"
#include "BLI_mempool.h"
#include "BLI_task.h"
#include "BLI_threads.h"
#include "atomic_ops.h"
/* Define this to enable some detailed statistic print. */
#undef DEBUG_STATS
/* Types */
/* Number of per-thread pre-allocated tasks.
*
* For more details see description of TaskMemPool.
*/
#define MEMPOOL_SIZE 256
/* Number of tasks which are pushed directly to local thread queue.
*
* This allows thread to fetch next task without locking the whole queue.
*/
#define LOCAL_QUEUE_SIZE 1
/* Number of tasks which are allowed to be scheduled in a delayed manner.
*
* This allows to use less locks per graph node children schedule. More details
* could be found at TaskThreadLocalStorage::do_delayed_push.
*/
#define DELAYED_QUEUE_SIZE 4096
#ifndef NDEBUG
# define ASSERT_THREAD_ID(scheduler, thread_id) \
do { \
if (!BLI_thread_is_main()) { \
TaskThread *thread = (TaskThread *)pthread_getspecific(scheduler->tls_id_key); \
if (thread == NULL) { \
BLI_assert(thread_id == 0); \
} \
else { \
BLI_assert(thread_id == thread->id); \
} \
} \
else { \
BLI_assert(thread_id == 0); \
} \
} while (false)
#else
# define ASSERT_THREAD_ID(scheduler, thread_id)
#ifdef WITH_TBB
/* Quiet top level deprecation message, unrelated to API usage here. */
# define TBB_SUPPRESS_DEPRECATED_MESSAGES 1
# include <tbb/tbb.h>
#endif
typedef struct Task {
struct Task *next, *prev;
/* Task
*
* Unit of work to execute. This is a C++ class to work with TBB. */
class Task {
public:
TaskPool *pool;
TaskRunFunction run;
void *taskdata;
bool free_taskdata;
TaskFreeFunction freedata;
TaskPool *pool;
} Task;
/* This is a per-thread storage of pre-allocated tasks.
*
* The idea behind this is simple: reduce amount of malloc() calls when pushing
* new task to the pool. This is done by keeping memory from the tasks which
* were finished already, so instead of freeing that memory we put it to the
* pool for the later re-use.
*
* The tricky part here is to avoid any inter-thread synchronization, hence no
* lock must exist around this pool. The pool will become an owner of the pointer
* from freed task, and only corresponding thread will be able to use this pool
* (no memory stealing and such).
*
* This leads to the following use of the pool:
*
* - task_push() should provide proper thread ID from which the task is being
* pushed from.
*
* - Task allocation function which check corresponding memory pool and if there
* is any memory in there it'll mark memory as re-used, remove it from the pool
* and use that memory for the new task.
*
* At this moment task queue owns the memory.
*
* - When task is done and task_free() is called the memory will be put to the
* pool which corresponds to a thread which handled the task.
*/
typedef struct TaskMemPool {
/* Number of pre-allocated tasks in the pool. */
int num_tasks;
/* Pre-allocated task memory pointers. */
Task *tasks[MEMPOOL_SIZE];
} TaskMemPool;
Task(TaskPool *pool,
TaskRunFunction run,
void *taskdata,
bool free_taskdata,
TaskFreeFunction freedata)
: pool(pool), run(run), taskdata(taskdata), free_taskdata(free_taskdata), freedata(freedata)
{
}
#ifdef DEBUG_STATS
typedef struct TaskMemPoolStats {
/* Number of allocations. */
int num_alloc;
/* Number of avoided allocations (pointer was re-used from the pool). */
int num_reuse;
/* Number of discarded memory due to pool saturation, */
int num_discard;
} TaskMemPoolStats;
~Task()
{
if (free_taskdata) {
if (freedata) {
freedata(pool, taskdata);
}
else {
MEM_freeN(taskdata);
}
}
}
/* Move constructor. */
Task(Task &&other)
: pool(other.pool),
run(other.run),
taskdata(other.taskdata),
free_taskdata(other.free_taskdata),
freedata(other.freedata)
{
other.pool = NULL;
other.run = NULL;
other.taskdata = NULL;
other.free_taskdata = false;
other.freedata = NULL;
}
/* Execute task. */
void operator()() const
{
run(pool, taskdata);
}
/* For performance, ensure we never copy the task and only move it. */
Task(const Task &other) = delete;
Task &operator=(const Task &other) = delete;
Task &operator=(Task &&other) = delete;
};
/* TBB Task Group.
*
* Subclass since there seems to be no other way to set priority. */
#ifdef WITH_TBB
class TBBTaskGroup : public tbb::task_group {
public:
TBBTaskGroup(TaskPriority priority)
{
switch (priority) {
case TASK_PRIORITY_LOW:
my_context.set_priority(tbb::priority_low);
break;
case TASK_PRIORITY_HIGH:
my_context.set_priority(tbb::priority_normal);
break;
}
}
~TBBTaskGroup()
{
}
};
#endif
typedef struct TaskThreadLocalStorage {
/* Memory pool for faster task allocation.
* The idea is to re-use memory of finished/discarded tasks by this thread.
*/
TaskMemPool task_mempool;
/* Task Pool */
/* Local queue keeps thread alive by keeping small amount of tasks ready
* to be picked up without causing global thread locks for synchronization.
*/
int num_local_queue;
Task *local_queue[LOCAL_QUEUE_SIZE];
/* Thread can be marked for delayed tasks push. This is helpful when it's
* know that lots of subsequent task pushed will happen from the same thread
* without "interrupting" for task execution.
*
* We try to accumulate as much tasks as possible in a local queue without
* any locks first, and then we push all of them into a scheduler's queue
* from within a single mutex lock.
*/
bool do_delayed_push;
int num_delayed_queue;
Task *delayed_queue[DELAYED_QUEUE_SIZE];
} TaskThreadLocalStorage;
typedef enum TaskPoolType {
TASK_POOL_TBB,
TASK_POOL_TBB_SUSPENDED,
TASK_POOL_NO_THREADS,
TASK_POOL_BACKGROUND,
TASK_POOL_BACKGROUND_SERIAL,
} TaskPoolType;
struct TaskPool {
TaskScheduler *scheduler;
TaskPoolType type;
bool use_threads;
volatile size_t num;
ThreadMutex num_mutex;
ThreadCondition num_cond;
void *userdata;
ThreadMutex user_mutex;
void *userdata;
volatile bool do_cancel;
volatile bool do_work;
/* TBB task pool. */
#ifdef WITH_TBB
TBBTaskGroup tbb_group;
#endif
volatile bool is_suspended;
bool start_suspended;
ListBase suspended_queue;
size_t num_suspended;
BLI_mempool *suspended_mempool;
TaskPriority priority;
/* If set, this pool may never be work_and_wait'ed, which means TaskScheduler
* has to use its special background fallback thread in case we are in
* single-threaded situation.
*/
bool run_in_background;
/* This is a task scheduler's ID of a thread at which pool was constructed.
* It will be used to access task TLS.
*/
int thread_id;
/* For the pools which are created from non-main thread which is not a
* scheduler worker thread we can't re-use any of scheduler's threads TLS
* and have to use our own one.
*/
bool use_local_tls;
TaskThreadLocalStorage local_tls;
#ifndef NDEBUG
pthread_t creator_thread_id;
#endif
#ifdef DEBUG_STATS
TaskMemPoolStats *mempool_stats;
#endif
/* Background task pool. */
ListBase background_threads;
ThreadQueue *background_queue;
volatile bool background_is_canceling;
};
struct TaskScheduler {
pthread_t *threads;
struct TaskThread *task_threads;
int num_threads;
bool background_thread_only;
/* TBB Task Pool.
*
* Task pool using the TBB scheduler for tasks. When building without TBB
* support or running Blender with -t 1, this reverts to single threaded.
*
* Tasks may be suspended until in all are created, to make it possible to
* initialize data structures and create tasks in a single pass. */
ListBase queue;
ThreadMutex queue_mutex;
ThreadCondition queue_cond;
ThreadMutex startup_mutex;
ThreadCondition startup_cond;
volatile int num_thread_started;
volatile bool do_exit;
/* NOTE: In pthread's TLS we store the whole TaskThread structure. */
pthread_key_t tls_id_key;
};
typedef struct TaskThread {
TaskScheduler *scheduler;
int id;
TaskThreadLocalStorage tls;
} TaskThread;
/* Helper */
BLI_INLINE void task_data_free(Task *task, const int UNUSED(thread_id))
static void tbb_task_pool_create(TaskPool *pool, TaskPriority priority)
{
if (task->free_taskdata) {
if (task->freedata) {
task->freedata(task->pool, task->taskdata);
}
else {
MEM_freeN(task->taskdata);
}
if (pool->type == TASK_POOL_TBB_SUSPENDED) {
pool->is_suspended = true;
pool->suspended_mempool = BLI_mempool_create(sizeof(Task), 512, 512, BLI_MEMPOOL_ALLOW_ITER);
}
}
BLI_INLINE void initialize_task_tls(TaskThreadLocalStorage *tls)
{
memset(tls, 0, sizeof(TaskThreadLocalStorage));
}
BLI_INLINE TaskThreadLocalStorage *get_task_tls(TaskPool *pool, const int thread_id)
{
TaskScheduler *scheduler = pool->scheduler;
BLI_assert(thread_id >= 0);
BLI_assert(thread_id <= scheduler->num_threads);
if (pool->use_local_tls && thread_id == 0) {
BLI_assert(pool->thread_id == 0);
BLI_assert(!BLI_thread_is_main());
BLI_assert(pthread_equal(pthread_self(), pool->creator_thread_id));
return &pool->local_tls;
#ifdef WITH_TBB
if (pool->use_threads) {
new (&pool->tbb_group) TBBTaskGroup(priority);
}
if (thread_id == 0) {
BLI_assert(BLI_thread_is_main());
return &scheduler->task_threads[pool->thread_id].tls;
}
return &scheduler->task_threads[thread_id].tls;
}
BLI_INLINE void free_task_tls(TaskThreadLocalStorage *tls)
{
TaskMemPool *task_mempool = &tls->task_mempool;
for (int i = 0; i < task_mempool->num_tasks; i++) {
MEM_freeN(task_mempool->tasks[i]);
}
}
static Task *task_alloc(TaskPool *pool, const int thread_id)
{
BLI_assert(thread_id <= pool->scheduler->num_threads);
if (thread_id != -1) {
BLI_assert(thread_id >= 0);
BLI_assert(thread_id <= pool->scheduler->num_threads);
TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
TaskMemPool *task_mempool = &tls->task_mempool;
/* Try to re-use task memory from a thread local storage. */
if (task_mempool->num_tasks > 0) {
--task_mempool->num_tasks;
/* Success! We've just avoided task allocation. */
#ifdef DEBUG_STATS
pool->mempool_stats[thread_id].num_reuse++;
#endif
return task_mempool->tasks[task_mempool->num_tasks];
}
/* We are doomed to allocate new task data. */
#ifdef DEBUG_STATS
pool->mempool_stats[thread_id].num_alloc++;
}
static void tbb_task_pool_run(TaskPool *pool, Task &&task)
{
if (pool->is_suspended) {
/* Suspended task that will be executed in work_and_wait(). */
Task *task_mem = (Task *)BLI_mempool_alloc(pool->suspended_mempool);
new (task_mem) Task(std::move(task));
#ifdef __GNUC__
/* Work around apparent compiler bug where task is not properly copied
* to task_mem. This appears unrelated to the use of placement new or
* move semantics, happens even writing to a plain C struct. Rather the
* call into TBB seems to have some indirect effect. */
std::atomic_thread_fence(std::memory_order_release);
#endif
}
return (Task *)MEM_mallocN(sizeof(Task), "New task");
}
static void task_free(TaskPool *pool, Task *task, const int thread_id)
{
task_data_free(task, thread_id);
BLI_assert(thread_id >= 0);
BLI_assert(thread_id <= pool->scheduler->num_threads);
if (thread_id == 0) {
BLI_assert(pool->use_local_tls || BLI_thread_is_main());
}
TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
TaskMemPool *task_mempool = &tls->task_mempool;
if (task_mempool->num_tasks < MEMPOOL_SIZE - 1) {
/* Successfully allowed the task to be re-used later. */
task_mempool->tasks[task_mempool->num_tasks] = task;
++task_mempool->num_tasks;
#ifdef WITH_TBB
else if (pool->use_threads) {
/* Execute in TBB task group. */
pool->tbb_group.run(std::move(task));
}
#endif
else {
/* Local storage saturated, no other way than just discard
* the memory.
*
* TODO(sergey): We can perhaps store such pointer in a global
* scheduler pool, maybe it'll be faster than discarding and
* allocating again.
*/
MEM_freeN(task);
#ifdef DEBUG_STATS
pool->mempool_stats[thread_id].num_discard++;
/* Execute immediately. */
task();
}
}
static void tbb_task_pool_work_and_wait(TaskPool *pool)
{
/* Start any suspended task now. */
if (pool->suspended_mempool) {
pool->is_suspended = false;
BLI_mempool_iter iter;
BLI_mempool_iternew(pool->suspended_mempool, &iter);
while (Task *task = (Task *)BLI_mempool_iterstep(&iter)) {
tbb_task_pool_run(pool, std::move(*task));
}
BLI_mempool_clear(pool->suspended_mempool);
}
#ifdef WITH_TBB
if (pool->use_threads) {
/* This is called wait(), but internally it can actually do work. This
* matters because we don't want recursive usage of task pools to run
* out of threads and get stuck. */
pool->tbb_group.wait();
}
#endif
}
static void tbb_task_pool_cancel(TaskPool *pool)
{
#ifdef WITH_TBB
if (pool->use_threads) {
pool->tbb_group.cancel();
pool->tbb_group.wait();
}
#endif
}
static bool tbb_task_pool_canceled(TaskPool *pool)
{
#ifdef WITH_TBB
if (pool->use_threads) {
return pool->tbb_group.is_canceling();
}
#endif
return false;
}
static void tbb_task_pool_free(TaskPool *pool)
{
#ifdef WITH_TBB
if (pool->use_threads) {
pool->tbb_group.~TBBTaskGroup();
}
#endif
if (pool->suspended_mempool) {
BLI_mempool_destroy(pool->suspended_mempool);
}
}
/* Task Scheduler */
/* Background Task Pool.
*
* Fallback for running background tasks when building without TBB. */
static void task_pool_num_decrease(TaskPool *pool, size_t done)
static void *background_task_run(void *userdata)
{
BLI_mutex_lock(&pool->num_mutex);
BLI_assert(pool->num >= done);
pool->num -= done;
if (pool->num == 0) {
BLI_condition_notify_all(&pool->num_cond);
TaskPool *pool = (TaskPool *)userdata;
while (Task *task = (Task *)BLI_thread_queue_pop(pool->background_queue)) {
(*task)();
task->~Task();
MEM_freeN(task);
}
BLI_mutex_unlock(&pool->num_mutex);
}
static void task_pool_num_increase(TaskPool *pool, size_t new_num)
{
BLI_mutex_lock(&pool->num_mutex);
pool->num += new_num;
BLI_condition_notify_all(&pool->num_cond);
BLI_mutex_unlock(&pool->num_mutex);
}
static bool task_scheduler_thread_wait_pop(TaskScheduler *scheduler, Task **task)
{
bool found_task = false;
BLI_mutex_lock(&scheduler->queue_mutex);
while (!scheduler->queue.first && !scheduler->do_exit) {
BLI_condition_wait(&scheduler->queue_cond, &scheduler->queue_mutex);
}
do {
Task *current_task;
/* Assuming we can only have a void queue in 'exit' case here seems logical
* (we should only be here after our worker thread has been woken up from a
* condition_wait(), which only happens after a new task was added to the queue),
* but it is wrong.
* Waiting on condition may wake up the thread even if condition is not signaled
* (spurious wake-ups), and some race condition may also empty the queue **after**
* condition has been signaled, but **before** awoken thread reaches this point...
* See http://stackoverflow.com/questions/8594591
*
* So we only abort here if do_exit is set.
*/
if (scheduler->do_exit) {
BLI_mutex_unlock(&scheduler->queue_mutex);
return false;
}
for (current_task = (Task *)scheduler->queue.first; current_task != NULL;
current_task = current_task->next) {
TaskPool *pool = current_task->pool;
if (scheduler->background_thread_only && !pool->run_in_background) {
continue;
}
*task = current_task;
found_task = true;
BLI_remlink(&scheduler->queue, *task);
break;
}
if (!found_task) {
BLI_condition_wait(&scheduler->queue_cond, &scheduler->queue_mutex);
}
} while (!found_task);
BLI_mutex_unlock(&scheduler->queue_mutex);
return true;
}
BLI_INLINE void handle_local_queue(TaskThreadLocalStorage *tls, const int thread_id)
{
BLI_assert(!tls->do_delayed_push);
while (tls->num_local_queue > 0) {
/* We pop task from queue before handling it so handler of the task can
* push next job to the local queue.
*/
tls->num_local_queue--;
Task *local_task = tls->local_queue[tls->num_local_queue];
/* TODO(sergey): Double-check work_and_wait() doesn't handle other's
* pool tasks.
*/
TaskPool *local_pool = local_task->pool;
local_task->run(local_pool, local_task->taskdata, thread_id);
task_free(local_pool, local_task, thread_id);
}
BLI_assert(!tls->do_delayed_push);
}
static void *task_scheduler_thread_run(void *thread_p)
{
TaskThread *thread = (TaskThread *)thread_p;
TaskThreadLocalStorage *tls = &thread->tls;
TaskScheduler *scheduler = thread->scheduler;
int thread_id = thread->id;
Task *task;
pthread_setspecific(scheduler->tls_id_key, thread);
/* signal the main thread when all threads have started */
BLI_mutex_lock(&scheduler->startup_mutex);
scheduler->num_thread_started++;
if (scheduler->num_thread_started == scheduler->num_threads) {
BLI_condition_notify_one(&scheduler->startup_cond);
}
BLI_mutex_unlock(&scheduler->startup_mutex);
/* keep popping off tasks */
while (task_scheduler_thread_wait_pop(scheduler, &task)) {
TaskPool *pool = task->pool;
/* run task */
BLI_assert(!tls->do_delayed_push);
task->run(pool, task->taskdata, thread_id);
BLI_assert(!tls->do_delayed_push);
/* delete task */
task_free(pool, task, thread_id);
/* Handle all tasks from local queue. */
handle_local_queue(tls, thread_id);
/* notify pool task was done */
task_pool_num_decrease(pool, 1);
}
return NULL;
}
TaskScheduler *BLI_task_scheduler_create(int num_threads)
static void background_task_pool_create(TaskPool *pool)
{
TaskScheduler *scheduler = (TaskScheduler *)MEM_callocN(sizeof(TaskScheduler), "TaskScheduler");
/* multiple places can use this task scheduler, sharing the same
* threads, so we keep track of the number of users. */
scheduler->do_exit = false;
BLI_listbase_clear(&scheduler->queue);
BLI_mutex_init(&scheduler->queue_mutex);
BLI_condition_init(&scheduler->queue_cond);
BLI_mutex_init(&scheduler->startup_mutex);
BLI_condition_init(&scheduler->startup_cond);
scheduler->num_thread_started = 0;
if (num_threads == 0) {
/* automatic number of threads will be main thread + num cores */
num_threads = BLI_system_thread_count();
}
/* main thread will also work, so we count it too */
num_threads -= 1;
/* Add background-only thread if needed. */
if (num_threads == 0) {
scheduler->background_thread_only = true;
num_threads = 1;
}
scheduler->task_threads = (TaskThread *)MEM_mallocN(sizeof(TaskThread) * (num_threads + 1),
"TaskScheduler task threads");
/* Initialize TLS for main thread. */
initialize_task_tls(&scheduler->task_threads[0].tls);
pthread_key_create(&scheduler->tls_id_key, NULL);
/* launch threads that will be waiting for work */
if (num_threads > 0) {
int i;
scheduler->num_threads = num_threads;
scheduler->threads = (pthread_t *)MEM_callocN(sizeof(pthread_t) * num_threads,
"TaskScheduler threads");
for (i = 0; i < num_threads; i++) {
TaskThread *thread = &scheduler->task_threads[i + 1];
thread->scheduler = scheduler;
thread->id = i + 1;
initialize_task_tls(&thread->tls);
if (pthread_create(&scheduler->threads[i], NULL, task_scheduler_thread_run, thread) != 0) {
fprintf(stderr, "TaskScheduler failed to launch thread %d/%d\n", i, num_threads);
}
}
}
/* Wait for all worker threads to start before returning to caller to prevent the case where
* threads are still starting and pthread_join is called, which causes a deadlock on pthreads4w.
*/
BLI_mutex_lock(&scheduler->startup_mutex);
/* NOTE: Use loop here to avoid false-positive everything-is-ready caused by spontaneous thread
* wake up. */
while (scheduler->num_thread_started != num_threads) {
BLI_condition_wait(&scheduler->startup_cond, &scheduler->startup_mutex);
}
BLI_mutex_unlock(&scheduler->startup_mutex);
return scheduler;
pool->background_queue = BLI_thread_queue_init();
BLI_threadpool_init(&pool->background_threads, background_task_run, 1);
BLI_threadpool_insert(&pool->background_threads, pool);
}
void BLI_task_scheduler_free(TaskScheduler *scheduler)
static void background_task_pool_run(TaskPool *pool, Task &&task)
{
Task *task;
/* stop all waiting threads */
BLI_mutex_lock(&scheduler->queue_mutex);
scheduler->do_exit = true;
BLI_condition_notify_all(&scheduler->queue_cond);
BLI_mutex_unlock(&scheduler->queue_mutex);
pthread_key_delete(scheduler->tls_id_key);
/* delete threads */
if (scheduler->threads) {
int i;
for (i = 0; i < scheduler->num_threads; i++) {
if (pthread_join(scheduler->threads[i], NULL) != 0) {
fprintf(stderr, "TaskScheduler failed to join thread %d/%d\n", i, scheduler->num_threads);
}
}
MEM_freeN(scheduler->threads);
}
/* Delete task thread data */
if (scheduler->task_threads) {
for (int i = 0; i < scheduler->num_threads + 1; i++) {
TaskThreadLocalStorage *tls = &scheduler->task_threads[i].tls;
free_task_tls(tls);
}
MEM_freeN(scheduler->task_threads);
}
/* delete leftover tasks */
for (task = (Task *)scheduler->queue.first; task; task = task->next) {
task_data_free(task, 0);
}
BLI_freelistN(&scheduler->queue);
/* delete mutex/condition */
BLI_mutex_end(&scheduler->queue_mutex);
BLI_condition_end(&scheduler->queue_cond);
BLI_mutex_end(&scheduler->startup_mutex);
BLI_condition_end(&scheduler->startup_cond);
MEM_freeN(scheduler);
Task *task_mem = (Task *)MEM_mallocN(sizeof(Task), __func__);
new (task_mem) Task(std::move(task));
BLI_thread_queue_push(pool->background_queue, task_mem);
}
int BLI_task_scheduler_num_threads(TaskScheduler *scheduler)
static void background_task_pool_work_and_wait(TaskPool *pool)
{
return scheduler->num_threads + 1;
/* Signal background thread to stop waiting for new tasks if none are
* left, and wait for tasks and thread to finish. */
BLI_thread_queue_nowait(pool->background_queue);
BLI_thread_queue_wait_finish(pool->background_queue);
BLI_threadpool_remove(&pool->background_threads, pool);
}
static void task_scheduler_push(TaskScheduler *scheduler, Task *task, TaskPriority priority)
static void background_task_pool_cancel(TaskPool *pool)
{
task_pool_num_increase(task->pool, 1);
pool->background_is_canceling = true;
/* add task to queue */
BLI_mutex_lock(&scheduler->queue_mutex);
if (priority == TASK_PRIORITY_HIGH) {
BLI_addhead(&scheduler->queue, task);
}
else {
BLI_addtail(&scheduler->queue, task);
/* Remove tasks not yet started by background thread. */
BLI_thread_queue_nowait(pool->background_queue);
while (Task *task = (Task *)BLI_thread_queue_pop(pool->background_queue)) {
task->~Task();
MEM_freeN(task);
}
BLI_condition_notify_one(&scheduler->queue_cond);
BLI_mutex_unlock(&scheduler->queue_mutex);
/* Let background thread finish or cancel task it is working on. */
BLI_threadpool_remove(&pool->background_threads, pool);
pool->background_is_canceling = false;
}
static void task_scheduler_push_all(TaskScheduler *scheduler,
TaskPool *pool,
Task **tasks,
int num_tasks)
static bool background_task_pool_canceled(TaskPool *pool)
{
if (num_tasks == 0) {
return;
}
task_pool_num_increase(pool, num_tasks);
BLI_mutex_lock(&scheduler->queue_mutex);
for (int i = 0; i < num_tasks; i++) {
BLI_addhead(&scheduler->queue, tasks[i]);
}
BLI_condition_notify_all(&scheduler->queue_cond);
BLI_mutex_unlock(&scheduler->queue_mutex);
return pool->background_is_canceling;
}
static void task_scheduler_clear(TaskScheduler *scheduler, TaskPool *pool)
static void background_task_pool_free(TaskPool *pool)
{
Task *task, *nexttask;
size_t done = 0;
background_task_pool_work_and_wait(pool);
BLI_mutex_lock(&scheduler->queue_mutex);
/* free all tasks from this pool from the queue */
for (task = (Task *)scheduler->queue.first; task; task = nexttask) {
nexttask = task->next;
if (task->pool == pool) {
task_data_free(task, pool->thread_id);
BLI_freelinkN(&scheduler->queue, task);
done++;
}
}
BLI_mutex_unlock(&scheduler->queue_mutex);
/* notify done */
task_pool_num_decrease(pool, done);
BLI_threadpool_end(&pool->background_threads);
BLI_thread_queue_free(pool->background_queue);
}
/* Task Pool */
static TaskPool *task_pool_create_ex(TaskScheduler *scheduler,
void *userdata,
const bool is_background,
const bool is_suspended,
TaskPriority priority)
static TaskPool *task_pool_create_ex(void *userdata, TaskPoolType type, TaskPriority priority)
{
TaskPool *pool = (TaskPool *)MEM_mallocN(sizeof(TaskPool), "TaskPool");
#ifndef NDEBUG
/* Assert we do not try to create a background pool from some parent task -
* those only work OK from main thread. */
if (is_background) {
const pthread_t thread_id = pthread_self();
int i = scheduler->num_threads;
while (i--) {
BLI_assert(!pthread_equal(scheduler->threads[i], thread_id));
}
}
#endif
pool->scheduler = scheduler;
pool->num = 0;
pool->do_cancel = false;
pool->do_work = false;
pool->is_suspended = is_suspended;
pool->start_suspended = is_suspended;
pool->num_suspended = 0;
pool->suspended_queue.first = pool->suspended_queue.last = NULL;
pool->priority = priority;
pool->run_in_background = is_background;
pool->use_local_tls = false;
BLI_mutex_init(&pool->num_mutex);
BLI_condition_init(&pool->num_cond);
pool->userdata = userdata;
BLI_mutex_init(&pool->user_mutex);
if (BLI_thread_is_main()) {
pool->thread_id = 0;
}
else {
TaskThread *thread = (TaskThread *)pthread_getspecific(scheduler->tls_id_key);
if (thread == NULL) {
/* NOTE: Task pool is created from non-main thread which is not
* managed by the task scheduler. We identify ourselves as thread ID
* 0 but we do not use scheduler's TLS storage and use our own
* instead to avoid any possible threading conflicts.
*/
pool->thread_id = 0;
pool->use_local_tls = true;
#ifndef NDEBUG
pool->creator_thread_id = pthread_self();
#endif
initialize_task_tls(&pool->local_tls);
}
else {
pool->thread_id = thread->id;
}
}
#ifdef DEBUG_STATS
pool->mempool_stats = (TaskMemPoolStats *)MEM_callocN(
sizeof(*pool->mempool_stats) * (scheduler->num_threads + 1), "per-taskpool mempool stats");
#endif
/* Ensure malloc will go fine from threads,
*
* This is needed because we could be in main thread here
@@ -733,15 +340,45 @@ static TaskPool *task_pool_create_ex(TaskScheduler *scheduler,
*/
BLI_threaded_malloc_begin();
const bool use_threads = BLI_task_scheduler_num_threads() > 1 && type != TASK_POOL_NO_THREADS;
/* Background task pool uses regular TBB scheduling if available. Only when
* building without TBB or running with -t 1 do we need to ensure these tasks
* do not block the main thread. */
if (type == TASK_POOL_BACKGROUND && use_threads) {
type = TASK_POOL_TBB;
}
/* Allocate task pool. */
TaskPool *pool = (TaskPool *)MEM_callocN(sizeof(TaskPool), "TaskPool");
pool->type = type;
pool->use_threads = use_threads;
pool->userdata = userdata;
BLI_mutex_init(&pool->user_mutex);
switch (type) {
case TASK_POOL_TBB:
case TASK_POOL_TBB_SUSPENDED:
case TASK_POOL_NO_THREADS:
tbb_task_pool_create(pool, priority);
break;
case TASK_POOL_BACKGROUND:
case TASK_POOL_BACKGROUND_SERIAL:
background_task_pool_create(pool);
break;
}
return pool;
}
/**
* Create a normal task pool. Tasks will be executed as soon as they are added.
*/
TaskPool *BLI_task_pool_create(TaskScheduler *scheduler, void *userdata, TaskPriority priority)
TaskPool *BLI_task_pool_create(void *userdata, TaskPriority priority)
{
return task_pool_create_ex(scheduler, userdata, false, false, priority);
return task_pool_create_ex(userdata, TASK_POOL_TBB, priority);
}
/**
@@ -756,11 +393,9 @@ TaskPool *BLI_task_pool_create(TaskScheduler *scheduler, void *userdata, TaskPri
* they could end never being executed, since the 'fallback' background thread is already
* busy with parent task in single-threaded context).
*/
TaskPool *BLI_task_pool_create_background(TaskScheduler *scheduler,
void *userdata,
TaskPriority priority)
TaskPool *BLI_task_pool_create_background(void *userdata, TaskPriority priority)
{
return task_pool_create_ex(scheduler, userdata, true, false, priority);
return task_pool_create_ex(userdata, TASK_POOL_BACKGROUND, priority);
}
/**
@@ -768,228 +403,114 @@ TaskPool *BLI_task_pool_create_background(TaskScheduler *scheduler,
* for until BLI_task_pool_work_and_wait() is called. This helps reducing threading
* overhead when pushing huge amount of small initial tasks from the main thread.
*/
TaskPool *BLI_task_pool_create_suspended(TaskScheduler *scheduler,
void *userdata,
TaskPriority priority)
TaskPool *BLI_task_pool_create_suspended(void *userdata, TaskPriority priority)
{
return task_pool_create_ex(scheduler, userdata, false, true, priority);
return task_pool_create_ex(userdata, TASK_POOL_TBB_SUSPENDED, priority);
}
/**
* Single threaded task pool that executes pushed task immediately, for
* debugging purposes.
*/
TaskPool *BLI_task_pool_create_no_threads(void *userdata)
{
return task_pool_create_ex(userdata, TASK_POOL_NO_THREADS, TASK_PRIORITY_HIGH);
}
/**
* Task pool that executeds one task after the other, possibly on different threads
* but never in parallel.
*/
TaskPool *BLI_task_pool_create_background_serial(void *userdata, TaskPriority priority)
{
return task_pool_create_ex(userdata, TASK_POOL_BACKGROUND_SERIAL, priority);
}
void BLI_task_pool_free(TaskPool *pool)
{
BLI_task_pool_cancel(pool);
BLI_mutex_end(&pool->num_mutex);
BLI_condition_end(&pool->num_cond);
switch (pool->type) {
case TASK_POOL_TBB:
case TASK_POOL_TBB_SUSPENDED:
case TASK_POOL_NO_THREADS:
tbb_task_pool_free(pool);
break;
case TASK_POOL_BACKGROUND:
case TASK_POOL_BACKGROUND_SERIAL:
background_task_pool_free(pool);
break;
}
BLI_mutex_end(&pool->user_mutex);
#ifdef DEBUG_STATS
printf("Thread ID Allocated Reused Discarded\n");
for (int i = 0; i < pool->scheduler->num_threads + 1; i++) {
printf("%02d %05d %05d %05d\n",
i,
pool->mempool_stats[i].num_alloc,
pool->mempool_stats[i].num_reuse,
pool->mempool_stats[i].num_discard);
}
MEM_freeN(pool->mempool_stats);
#endif
if (pool->use_local_tls) {
free_task_tls(&pool->local_tls);
}
MEM_freeN(pool);
BLI_threaded_malloc_end();
}
BLI_INLINE bool task_can_use_local_queues(TaskPool *pool, int thread_id)
{
return (thread_id != -1 && (thread_id != pool->thread_id || pool->do_work));
}
static void task_pool_push(TaskPool *pool,
TaskRunFunction run,
void *taskdata,
bool free_taskdata,
TaskFreeFunction freedata,
int thread_id)
{
/* Allocate task and fill it's properties. */
Task *task = task_alloc(pool, thread_id);
task->run = run;
task->taskdata = taskdata;
task->free_taskdata = free_taskdata;
task->freedata = freedata;
task->pool = pool;
/* For suspended pools we put everything yo a global queue first
* and exit as soon as possible.
*
* This tasks will be moved to actual execution when pool is
* activated by work_and_wait().
*/
if (pool->is_suspended) {
BLI_addhead(&pool->suspended_queue, task);
atomic_fetch_and_add_z(&pool->num_suspended, 1);
return;
}
/* Populate to any local queue first, this is cheapest push ever. */
if (task_can_use_local_queues(pool, thread_id)) {
ASSERT_THREAD_ID(pool->scheduler, thread_id);
TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
/* Try to push to a local execution queue.
* These tasks will be picked up next.
*/
if (tls->num_local_queue < LOCAL_QUEUE_SIZE) {
tls->local_queue[tls->num_local_queue] = task;
tls->num_local_queue++;
return;
}
/* If we are in the delayed tasks push mode, we push tasks to a
* temporary local queue first without any locks, and then move them
* to global execution queue with a single lock.
*/
if (tls->do_delayed_push && tls->num_delayed_queue < DELAYED_QUEUE_SIZE) {
tls->delayed_queue[tls->num_delayed_queue] = task;
tls->num_delayed_queue++;
return;
}
}
/* Do push to a global execution pool, slowest possible method,
* causes quite reasonable amount of threading overhead.
*/
task_scheduler_push(pool->scheduler, task, pool->priority);
}
void BLI_task_pool_push(TaskPool *pool,
TaskRunFunction run,
void *taskdata,
bool free_taskdata,
TaskFreeFunction freedata)
{
task_pool_push(pool, run, taskdata, free_taskdata, freedata, -1);
}
Task task(pool, run, taskdata, free_taskdata, freedata);
void BLI_task_pool_push_from_thread(TaskPool *pool,
TaskRunFunction run,
void *taskdata,
bool free_taskdata,
TaskFreeFunction freedata,
int thread_id)
{
task_pool_push(pool, run, taskdata, free_taskdata, freedata, thread_id);
switch (pool->type) {
case TASK_POOL_TBB:
case TASK_POOL_TBB_SUSPENDED:
case TASK_POOL_NO_THREADS:
tbb_task_pool_run(pool, std::move(task));
break;
case TASK_POOL_BACKGROUND:
case TASK_POOL_BACKGROUND_SERIAL:
background_task_pool_run(pool, std::move(task));
break;
}
}
void BLI_task_pool_work_and_wait(TaskPool *pool)
{
TaskThreadLocalStorage *tls = get_task_tls(pool, pool->thread_id);
TaskScheduler *scheduler = pool->scheduler;
if (atomic_fetch_and_and_uint8((uint8_t *)&pool->is_suspended, 0)) {
if (pool->num_suspended) {
task_pool_num_increase(pool, pool->num_suspended);
BLI_mutex_lock(&scheduler->queue_mutex);
BLI_movelisttolist(&scheduler->queue, &pool->suspended_queue);
BLI_condition_notify_all(&scheduler->queue_cond);
BLI_mutex_unlock(&scheduler->queue_mutex);
pool->num_suspended = 0;
}
}
pool->do_work = true;
ASSERT_THREAD_ID(pool->scheduler, pool->thread_id);
handle_local_queue(tls, pool->thread_id);
BLI_mutex_lock(&pool->num_mutex);
while (pool->num != 0) {
Task *task, *work_task = NULL;
bool found_task = false;
BLI_mutex_unlock(&pool->num_mutex);
BLI_mutex_lock(&scheduler->queue_mutex);
/* find task from this pool. if we get a task from another pool,
* we can get into deadlock */
for (task = (Task *)scheduler->queue.first; task; task = task->next) {
if (task->pool == pool) {
work_task = task;
found_task = true;
BLI_remlink(&scheduler->queue, task);
break;
}
}
BLI_mutex_unlock(&scheduler->queue_mutex);
/* if found task, do it, otherwise wait until other tasks are done */
if (found_task) {
/* run task */
BLI_assert(!tls->do_delayed_push);
work_task->run(pool, work_task->taskdata, pool->thread_id);
BLI_assert(!tls->do_delayed_push);
/* delete task */
task_free(pool, task, pool->thread_id);
/* Handle all tasks from local queue. */
handle_local_queue(tls, pool->thread_id);
/* notify pool task was done */
task_pool_num_decrease(pool, 1);
}
BLI_mutex_lock(&pool->num_mutex);
if (pool->num == 0) {
switch (pool->type) {
case TASK_POOL_TBB:
case TASK_POOL_TBB_SUSPENDED:
case TASK_POOL_NO_THREADS:
tbb_task_pool_work_and_wait(pool);
break;
case TASK_POOL_BACKGROUND:
case TASK_POOL_BACKGROUND_SERIAL:
background_task_pool_work_and_wait(pool);
break;
}
if (!found_task) {
BLI_condition_wait(&pool->num_cond, &pool->num_mutex);
}
}
BLI_mutex_unlock(&pool->num_mutex);
BLI_assert(tls->num_local_queue == 0);
}
void BLI_task_pool_work_wait_and_reset(TaskPool *pool)
{
BLI_task_pool_work_and_wait(pool);
pool->do_work = false;
pool->is_suspended = pool->start_suspended;
}
void BLI_task_pool_cancel(TaskPool *pool)
{
pool->do_cancel = true;
task_scheduler_clear(pool->scheduler, pool);
/* wait until all entries are cleared */
BLI_mutex_lock(&pool->num_mutex);
while (pool->num) {
BLI_condition_wait(&pool->num_cond, &pool->num_mutex);
switch (pool->type) {
case TASK_POOL_TBB:
case TASK_POOL_TBB_SUSPENDED:
case TASK_POOL_NO_THREADS:
tbb_task_pool_cancel(pool);
break;
case TASK_POOL_BACKGROUND:
case TASK_POOL_BACKGROUND_SERIAL:
background_task_pool_cancel(pool);
break;
}
BLI_mutex_unlock(&pool->num_mutex);
pool->do_cancel = false;
}
bool BLI_task_pool_canceled(TaskPool *pool)
{
return pool->do_cancel;
switch (pool->type) {
case TASK_POOL_TBB:
case TASK_POOL_TBB_SUSPENDED:
case TASK_POOL_NO_THREADS:
return tbb_task_pool_canceled(pool);
case TASK_POOL_BACKGROUND:
case TASK_POOL_BACKGROUND_SERIAL:
return background_task_pool_canceled(pool);
}
BLI_assert("BLI_task_pool_canceled: Control flow should not come here!");
return false;
}
void *BLI_task_pool_user_data(TaskPool *pool)
@@ -1000,30 +521,4 @@ void *BLI_task_pool_user_data(TaskPool *pool)
ThreadMutex *BLI_task_pool_user_mutex(TaskPool *pool)
{
return &pool->user_mutex;
}
int BLI_task_pool_creator_thread_id(TaskPool *pool)
{
return pool->thread_id;
}
void BLI_task_pool_delayed_push_begin(TaskPool *pool, int thread_id)
{
if (task_can_use_local_queues(pool, thread_id)) {
ASSERT_THREAD_ID(pool->scheduler, thread_id);
TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
tls->do_delayed_push = true;
}
}
void BLI_task_pool_delayed_push_end(TaskPool *pool, int thread_id)
{
if (task_can_use_local_queues(pool, thread_id)) {
ASSERT_THREAD_ID(pool->scheduler, thread_id);
TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
BLI_assert(tls->do_delayed_push);
task_scheduler_push_all(pool->scheduler, pool, tls->delayed_queue, tls->num_delayed_queue);
tls->do_delayed_push = false;
tls->num_delayed_queue = 0;
}
}
}

View File

@@ -14,60 +14,72 @@
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bli
*
* Task parallel range functions.
*/
#include <stdlib.h>
#include "MEM_guardedalloc.h"
#include "DNA_listBase.h"
#include "BLI_task.h"
#include "BLI_threads.h"
#include "BKE_pbvh.h"
#include "atomic_ops.h"
#ifdef WITH_TBB
/* Quiet top level deprecation message, unrelated to API usage here. */
# define TBB_SUPPRESS_DEPRECATED_MESSAGES 1
# include <tbb/tbb.h>
#endif
#ifdef WITH_TBB
/* Functor for running TBB parallel_for and parallel_reduce. */
struct PBVHTask {
PBVHParallelRangeFunc func;
struct RangeTask {
TaskParallelRangeFunc func;
void *userdata;
const PBVHParallelSettings *settings;
const TaskParallelSettings *settings;
void *userdata_chunk;
/* Root constructor. */
PBVHTask(PBVHParallelRangeFunc func, void *userdata, const PBVHParallelSettings *settings)
RangeTask(TaskParallelRangeFunc func, void *userdata, const TaskParallelSettings *settings)
: func(func), userdata(userdata), settings(settings)
{
init_chunk(settings->userdata_chunk);
}
/* Copy constructor. */
PBVHTask(const PBVHTask &other)
: func(other.func), userdata(other.userdata), settings(other.settings)
{
init_chunk(other.userdata_chunk);
}
/* Splitting constructor for parallel reduce. */
PBVHTask(PBVHTask &other, tbb::split)
RangeTask(const RangeTask &other)
: func(other.func), userdata(other.userdata), settings(other.settings)
{
init_chunk(settings->userdata_chunk);
}
~PBVHTask()
/* Splitting constructor for parallel reduce. */
RangeTask(RangeTask &other, tbb::split)
: func(other.func), userdata(other.userdata), settings(other.settings)
{
init_chunk(settings->userdata_chunk);
}
~RangeTask()
{
if (settings->func_free != NULL) {
settings->func_free(userdata, userdata_chunk);
}
MEM_SAFE_FREE(userdata_chunk);
}
void init_chunk(void *from_chunk)
{
if (from_chunk) {
userdata_chunk = MEM_mallocN(settings->userdata_chunk_size, "PBVHTask");
userdata_chunk = MEM_mallocN(settings->userdata_chunk_size, "RangeTask");
memcpy(userdata_chunk, from_chunk, settings->userdata_chunk_size);
}
else {
@@ -78,59 +90,41 @@ struct PBVHTask {
void operator()(const tbb::blocked_range<int> &r) const
{
TaskParallelTLS tls;
tls.thread_id = get_thread_id();
tls.userdata_chunk = userdata_chunk;
for (int i = r.begin(); i != r.end(); ++i) {
func(userdata, i, &tls);
}
}
void join(const PBVHTask &other)
void join(const RangeTask &other)
{
settings->func_reduce(userdata, userdata_chunk, other.userdata_chunk);
}
int get_thread_id() const
{
/* Get a unique thread ID for texture nodes. In the future we should get rid
* of the thread ID and change texture evaluation to not require per-thread
* storage that can't be efficiently allocated on the stack. */
static tbb::enumerable_thread_specific<int> pbvh_thread_id(-1);
static int pbvh_thread_id_counter = 0;
int &thread_id = pbvh_thread_id.local();
if (thread_id == -1) {
thread_id = atomic_fetch_and_add_int32(&pbvh_thread_id_counter, 1);
if (thread_id >= BLENDER_MAX_THREADS) {
BLI_assert(!"Maximum number of threads exceeded for sculpting");
thread_id = thread_id % BLENDER_MAX_THREADS;
}
}
return thread_id;
}
};
#endif
void BKE_pbvh_parallel_range(const int start,
void BLI_task_parallel_range(const int start,
const int stop,
void *userdata,
PBVHParallelRangeFunc func,
const struct PBVHParallelSettings *settings)
TaskParallelRangeFunc func,
const TaskParallelSettings *settings)
{
#ifdef WITH_TBB
/* Multithreading. */
if (settings->use_threading) {
PBVHTask task(func, userdata, settings);
if (settings->use_threading && BLI_task_scheduler_num_threads() > 1) {
RangeTask task(func, userdata, settings);
const size_t grainsize = MAX2(settings->min_iter_per_thread, 1);
const tbb::blocked_range<int> range(start, stop, grainsize);
if (settings->func_reduce) {
parallel_reduce(tbb::blocked_range<int>(start, stop), task);
parallel_reduce(range, task);
if (settings->userdata_chunk) {
memcpy(settings->userdata_chunk, task.userdata_chunk, settings->userdata_chunk_size);
}
}
else {
parallel_for(tbb::blocked_range<int>(start, stop), task);
parallel_for(range, task);
}
return;
@@ -140,9 +134,34 @@ void BKE_pbvh_parallel_range(const int start,
/* Single threaded. Nothing to reduce as everything is accumulated into the
* main userdata chunk directly. */
TaskParallelTLS tls;
tls.thread_id = 0;
tls.userdata_chunk = settings->userdata_chunk;
for (int i = start; i < stop; i++) {
func(userdata, i, &tls);
}
if (settings->func_free != NULL) {
settings->func_free(userdata, settings->userdata_chunk);
}
}
int BLI_task_parallel_thread_id(const TaskParallelTLS *UNUSED(tls))
{
#ifdef WITH_TBB
/* Get a unique thread ID for texture nodes. In the future we should get rid
* of the thread ID and change texture evaluation to not require per-thread
* storage that can't be efficiently allocated on the stack. */
static tbb::enumerable_thread_specific<int> tbb_thread_id(-1);
static int tbb_thread_id_counter = 0;
int &thread_id = tbb_thread_id.local();
if (thread_id == -1) {
thread_id = atomic_fetch_and_add_int32(&tbb_thread_id_counter, 1);
if (thread_id >= BLENDER_MAX_THREADS) {
BLI_assert(!"Maximum number of threads exceeded for sculpting");
thread_id = thread_id % BLENDER_MAX_THREADS;
}
}
return thread_id;
#else
return 0;
#endif
}

View File

@@ -0,0 +1,73 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bli
*
* Task scheduler initialization.
*/
#include "MEM_guardedalloc.h"
#include "BLI_task.h"
#include "BLI_threads.h"
#ifdef WITH_TBB
/* Quiet top level deprecation message, unrelated to API usage here. */
# define TBB_SUPPRESS_DEPRECATED_MESSAGES 1
# include <tbb/tbb.h>
#endif
/* Task Scheduler */
static int task_scheduler_num_threads = 1;
#ifdef WITH_TBB
static tbb::global_control *task_scheduler_global_control = nullptr;
#endif
void BLI_task_scheduler_init()
{
#ifdef WITH_TBB
const int num_threads_override = BLI_system_num_threads_override_get();
if (num_threads_override > 0) {
/* Override number of threads. This settings is used within the lifetime
* of tbb::global_control, so we allocate it on the heap. */
task_scheduler_global_control = OBJECT_GUARDED_NEW(
tbb::global_control, tbb::global_control::max_allowed_parallelism, num_threads_override);
task_scheduler_num_threads = num_threads_override;
}
else {
/* Let TBB choose the number of threads. For (legacy) code that calss
* BLI_task_scheduler_num_threads() we provide the system thread count.
* Ideally such code should be rewritten not to use the number of threads
* at all. */
task_scheduler_num_threads = BLI_system_thread_count();
}
#endif
}
void BLI_task_scheduler_exit()
{
#ifdef WITH_TBB
OBJECT_GUARDED_DELETE(task_scheduler_global_control, tbb::global_control);
#endif
}
int BLI_task_scheduler_num_threads()
{
return task_scheduler_num_threads;
}

View File

@@ -61,9 +61,6 @@ extern pthread_key_t gomp_tls_key;
static void *thread_tls_data;
#endif
/* We're using one global task scheduler for all kind of tasks. */
static TaskScheduler *task_scheduler = NULL;
/* ********** basic thread control API ************
*
* Many thread cases have an X amount of jobs, and only an Y amount of
@@ -157,27 +154,9 @@ void BLI_threadapi_init(void)
void BLI_threadapi_exit(void)
{
if (task_scheduler) {
BLI_task_scheduler_free(task_scheduler);
task_scheduler = NULL;
}
BLI_spin_end(&_malloc_lock);
}
TaskScheduler *BLI_task_scheduler_get(void)
{
if (task_scheduler == NULL) {
int tot_thread = BLI_system_thread_count();
/* Do a lazy initialization, so it happens after
* command line arguments parsing
*/
task_scheduler = BLI_task_scheduler_create(tot_thread);
}
return task_scheduler;
}
/* tot = 0 only initializes malloc mutex in a safe way (see sequence.c)
* problem otherwise: scene render will kill of the mutex!
*/
@@ -839,11 +818,6 @@ void BLI_threaded_malloc_begin(void)
unsigned int level = atomic_fetch_and_add_u(&thread_levels, 1);
if (level == 0) {
MEM_set_lock_callback(BLI_lock_malloc_thread, BLI_unlock_malloc_thread);
/* There is a little chance that two threads will need to access to a
* scheduler which was not yet created from main thread. which could
* cause scheduler created multiple times.
*/
BLI_task_scheduler_get();
}
}

View File

@@ -60,18 +60,17 @@ namespace {
struct DepsgraphEvalState;
void deg_task_run_func(TaskPool *pool, void *taskdata, int thread_id);
void deg_task_run_func(TaskPool *pool, void *taskdata);
template<typename ScheduleFunction, typename... ScheduleFunctionArgs>
void schedule_children(DepsgraphEvalState *state,
OperationNode *node,
const int thread_id,
ScheduleFunction *schedule_function,
ScheduleFunctionArgs... schedule_function_args);
void schedule_node_to_pool(OperationNode *node, const int thread_id, TaskPool *pool)
void schedule_node_to_pool(OperationNode *node, const int UNUSED(thread_id), TaskPool *pool)
{
BLI_task_pool_push_from_thread(pool, deg_task_run_func, node, false, NULL, thread_id);
BLI_task_pool_push(pool, deg_task_run_func, node, false, NULL);
}
/* Denotes which part of dependency graph is being evaluated. */
@@ -115,7 +114,7 @@ void evaluate_node(const DepsgraphEvalState *state, OperationNode *operation_nod
}
}
void deg_task_run_func(TaskPool *pool, void *taskdata, int thread_id)
void deg_task_run_func(TaskPool *pool, void *taskdata)
{
void *userdata_v = BLI_task_pool_user_data(pool);
DepsgraphEvalState *state = (DepsgraphEvalState *)userdata_v;
@@ -125,9 +124,7 @@ void deg_task_run_func(TaskPool *pool, void *taskdata, int thread_id)
evaluate_node(state, operation_node);
/* Schedule children. */
BLI_task_pool_delayed_push_begin(pool, thread_id);
schedule_children(state, operation_node, thread_id, schedule_node_to_pool, pool);
BLI_task_pool_delayed_push_end(pool, thread_id);
schedule_children(state, operation_node, schedule_node_to_pool, pool);
}
bool check_operation_node_visible(OperationNode *op_node)
@@ -237,7 +234,6 @@ template<typename ScheduleFunction, typename... ScheduleFunctionArgs>
void schedule_node(DepsgraphEvalState *state,
OperationNode *node,
bool dec_parents,
const int thread_id,
ScheduleFunction *schedule_function,
ScheduleFunctionArgs... schedule_function_args)
{
@@ -270,11 +266,11 @@ void schedule_node(DepsgraphEvalState *state,
if (!is_scheduled) {
if (node->is_noop()) {
/* skip NOOP node, schedule children right away */
schedule_children(state, node, thread_id, schedule_function, schedule_function_args...);
schedule_children(state, node, schedule_function, schedule_function_args...);
}
else {
/* children are scheduled once this task is completed */
schedule_function(node, thread_id, schedule_function_args...);
schedule_function(node, 0, schedule_function_args...);
}
}
}
@@ -285,14 +281,13 @@ void schedule_graph(DepsgraphEvalState *state,
ScheduleFunctionArgs... schedule_function_args)
{
for (OperationNode *node : state->graph->operations) {
schedule_node(state, node, false, -1, schedule_function, schedule_function_args...);
schedule_node(state, node, false, schedule_function, schedule_function_args...);
}
}
template<typename ScheduleFunction, typename... ScheduleFunctionArgs>
void schedule_children(DepsgraphEvalState *state,
OperationNode *node,
const int thread_id,
ScheduleFunction *schedule_function,
ScheduleFunctionArgs... schedule_function_args)
{
@@ -306,7 +301,6 @@ void schedule_children(DepsgraphEvalState *state,
schedule_node(state,
child,
(rel->flag & RELATION_FLAG_CYCLIC) == 0,
thread_id,
schedule_function,
schedule_function_args...);
}
@@ -329,7 +323,7 @@ void evaluate_graph_single_threaded(DepsgraphEvalState *state)
BLI_gsqueue_pop(evaluation_queue, &operation_node);
evaluate_node(state, operation_node);
schedule_children(state, operation_node, 0, schedule_node_to_queue, evaluation_queue);
schedule_children(state, operation_node, schedule_node_to_queue, evaluation_queue);
}
BLI_gsqueue_free(evaluation_queue);
@@ -353,6 +347,16 @@ void depsgraph_ensure_view_layer(Depsgraph *graph)
} // namespace
static TaskPool *deg_evaluate_task_pool_create(DepsgraphEvalState *state)
{
if (G.debug & G_DEBUG_DEPSGRAPH_NO_THREADS) {
return BLI_task_pool_create_no_threads(state);
}
else {
return BLI_task_pool_create_suspended(state, TASK_PRIORITY_HIGH);
}
}
/**
* Evaluate all nodes tagged for updating,
* \warning This is usually done as part of main loop, but may also be
@@ -376,30 +380,20 @@ void deg_evaluate_on_refresh(Depsgraph *graph)
state.graph = graph;
state.do_stats = graph->debug.do_time_debug();
state.need_single_thread_pass = false;
/* Set up task scheduler and pull for threaded evaluation. */
TaskScheduler *task_scheduler;
bool need_free_scheduler;
if (G.debug & G_DEBUG_DEPSGRAPH_NO_THREADS) {
task_scheduler = BLI_task_scheduler_create(1);
need_free_scheduler = true;
}
else {
task_scheduler = BLI_task_scheduler_get();
need_free_scheduler = false;
}
TaskPool *task_pool = BLI_task_pool_create_suspended(task_scheduler, &state, TASK_PRIORITY_HIGH);
/* Prepare all nodes for evaluation. */
initialize_execution(&state, graph);
/* Do actual evaluation now. */
/* First, process all Copy-On-Write nodes. */
state.stage = EvaluationStage::COPY_ON_WRITE;
TaskPool *task_pool = deg_evaluate_task_pool_create(&state);
schedule_graph(&state, schedule_node_to_pool, task_pool);
BLI_task_pool_work_wait_and_reset(task_pool);
BLI_task_pool_work_and_wait(task_pool);
BLI_task_pool_free(task_pool);
/* After that, process all other nodes. */
state.stage = EvaluationStage::THREADED_EVALUATION;
task_pool = deg_evaluate_task_pool_create(&state);
schedule_graph(&state, schedule_node_to_pool, task_pool);
BLI_task_pool_work_and_wait(task_pool);
BLI_task_pool_free(task_pool);
@@ -417,9 +411,6 @@ void deg_evaluate_on_refresh(Depsgraph *graph)
}
/* Clear any uncleared tags - just in case. */
deg_graph_clear_tags(graph);
if (need_free_scheduler) {
BLI_task_scheduler_free(task_scheduler);
}
graph->is_evaluating = false;
graph->debug.end_graph_evaluation();

View File

@@ -4509,7 +4509,7 @@ BLI_INLINE void mesh_extract_iter(const MeshRenderData *mr,
}
}
static void extract_run(TaskPool *__restrict UNUSED(pool), void *taskdata, int UNUSED(threadid))
static void extract_run(TaskPool *__restrict UNUSED(pool), void *taskdata)
{
ExtractTaskData *data = taskdata;
mesh_extract_iter(
@@ -4595,7 +4595,7 @@ static void extract_task_create(TaskPool *task_pool,
else {
/* Single threaded extraction. */
(*task_counter)++;
extract_run(NULL, taskdata, -1);
extract_run(NULL, taskdata);
MEM_freeN(taskdata);
}
}
@@ -4685,11 +4685,11 @@ void mesh_buffer_cache_create_requested(MeshBatchCache *cache,
double rdata_end = PIL_check_seconds_timer();
#endif
TaskScheduler *task_scheduler;
TaskPool *task_pool;
task_scheduler = BLI_task_scheduler_get();
task_pool = BLI_task_pool_create_suspended(task_scheduler, NULL, TASK_PRIORITY_HIGH);
/* Create a suspended pool as the finalize method could be called too early.
* See `extract_run`. */
task_pool = BLI_task_pool_create_suspended(NULL, TASK_PRIORITY_HIGH);
size_t counters_size = (sizeof(mbc) / sizeof(void *)) * sizeof(int32_t);
int32_t *task_counters = MEM_callocN(counters_size, __func__);

View File

@@ -397,9 +397,7 @@ struct UMArrayData {
UndoMesh *um;
const UndoMesh *um_ref; /* can be NULL */
};
static void um_arraystore_compact_cb(TaskPool *__restrict UNUSED(pool),
void *taskdata,
int UNUSED(threadid))
static void um_arraystore_compact_cb(TaskPool *__restrict UNUSED(pool), void *taskdata)
{
struct UMArrayData *um_data = taskdata;
um_arraystore_compact_with_info(um_data->um, um_data->um_ref);
@@ -541,9 +539,7 @@ static void *undomesh_from_editmesh(UndoMesh *um, BMEditMesh *em, Key *key)
# ifdef USE_ARRAY_STORE_THREAD
if (um_arraystore.task_pool == NULL) {
TaskScheduler *scheduler = BLI_task_scheduler_get();
um_arraystore.task_pool = BLI_task_pool_create_background(
scheduler, NULL, TASK_PRIORITY_LOW);
um_arraystore.task_pool = BLI_task_pool_create_background(NULL, TASK_PRIORITY_LOW);
}
struct UMArrayData *um_data = MEM_mallocN(sizeof(*um_data), __func__);

View File

@@ -849,7 +849,6 @@ static void foreach_mouse_hit_key(PEData *data, ForHitKeyMatFunc func, int selec
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.scheduling_mode = TASK_SCHEDULING_DYNAMIC;
BLI_task_parallel_range(0, edit->totpoint, &iter_data, foreach_mouse_hit_key_iter, &settings);
}
@@ -1229,7 +1228,6 @@ static void pe_deflect_emitter(Scene *scene, Object *ob, PTCacheEdit *edit)
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.scheduling_mode = TASK_SCHEDULING_DYNAMIC;
BLI_task_parallel_range(0, edit->totpoint, &iter_data, deflect_emitter_iter, &settings);
}
@@ -1278,7 +1276,6 @@ static void PE_apply_lengths(Scene *scene, PTCacheEdit *edit)
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.scheduling_mode = TASK_SCHEDULING_DYNAMIC;
BLI_task_parallel_range(0, edit->totpoint, &iter_data, apply_lengths_iter, &settings);
}
@@ -1353,7 +1350,6 @@ static void pe_iterate_lengths(Scene *scene, PTCacheEdit *edit)
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.scheduling_mode = TASK_SCHEDULING_DYNAMIC;
BLI_task_parallel_range(0, edit->totpoint, &iter_data, iterate_lengths_iter, &settings);
}
@@ -4112,7 +4108,7 @@ static void brush_add_count_iter(void *__restrict iter_data_v,
dmy = size;
if (tls->rng == NULL) {
tls->rng = BLI_rng_new_srandom(psys->seed + data->mval[0] + data->mval[1] +
tls_v->thread_id);
BLI_task_parallel_thread_id(tls_v));
}
/* rejection sampling to get points in circle */
while (dmx * dmx + dmy * dmy > size2) {
@@ -4257,7 +4253,6 @@ static int brush_add(const bContext *C, PEData *data, short number)
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.scheduling_mode = TASK_SCHEDULING_DYNAMIC;
settings.userdata_chunk = &tls;
settings.userdata_chunk_size = sizeof(BrushAddCountIterTLSData);
settings.func_reduce = brush_add_count_iter_reduce;

View File

@@ -144,7 +144,6 @@ typedef struct OGLRender {
wmTimer *timer; /* use to check if running modal or not (invoke'd or exec'd)*/
void **movie_ctx_arr;
TaskScheduler *task_scheduler;
TaskPool *task_pool;
bool pool_ok;
bool is_animation;
@@ -856,22 +855,16 @@ static bool screen_opengl_render_init(bContext *C, wmOperator *op)
gather_frames_to_render(C, oglrender);
}
TaskScheduler *task_scheduler = BLI_task_scheduler_get();
if (BKE_imtype_is_movie(scene->r.im_format.imtype)) {
task_scheduler = BLI_task_scheduler_create(1);
oglrender->task_scheduler = task_scheduler;
oglrender->task_pool = BLI_task_pool_create_background(
task_scheduler, oglrender, TASK_PRIORITY_LOW);
oglrender->task_pool = BLI_task_pool_create_background_serial(oglrender, TASK_PRIORITY_LOW);
}
else {
oglrender->task_scheduler = NULL;
oglrender->task_pool = BLI_task_pool_create(task_scheduler, oglrender, TASK_PRIORITY_LOW);
oglrender->task_pool = BLI_task_pool_create(oglrender, TASK_PRIORITY_LOW);
}
oglrender->pool_ok = true;
BLI_spin_init(&oglrender->reports_lock);
}
else {
oglrender->task_scheduler = NULL;
oglrender->task_pool = NULL;
}
oglrender->num_scheduled_frames = 0;
@@ -910,10 +903,6 @@ static void screen_opengl_render_end(bContext *C, OGLRender *oglrender)
}
BLI_task_pool_work_and_wait(oglrender->task_pool);
BLI_task_pool_free(oglrender->task_pool);
/* Depending on various things we might or might not use global scheduler. */
if (oglrender->task_scheduler != NULL) {
BLI_task_scheduler_free(oglrender->task_scheduler);
}
BLI_spin_end(&oglrender->reports_lock);
}
BLI_mutex_end(&oglrender->task_mutex);
@@ -1033,7 +1022,7 @@ typedef struct WriteTaskData {
Scene tmp_scene;
} WriteTaskData;
static void write_result_func(TaskPool *__restrict pool, void *task_data_v, int UNUSED(thread_id))
static void write_result_func(TaskPool *__restrict pool, void *task_data_v)
{
OGLRender *oglrender = (OGLRender *)BLI_task_pool_user_data(pool);
WriteTaskData *task_data = (WriteTaskData *)task_data_v;

View File

@@ -171,6 +171,8 @@ static void load_tex_task_cb_ex(void *__restrict userdata,
bool convert_to_linear = false;
struct ColorSpace *colorspace = NULL;
const int thread_id = BLI_task_parallel_thread_id(tls);
if (mtex->tex && mtex->tex->type == TEX_IMAGE && mtex->tex->ima) {
ImBuf *tex_ibuf = BKE_image_pool_acquire_ibuf(mtex->tex->ima, &mtex->tex->iuser, pool);
/* For consistency, sampling always returns color in linear space. */
@@ -214,8 +216,7 @@ static void load_tex_task_cb_ex(void *__restrict userdata,
if (col) {
float rgba[4];
paint_get_tex_pixel_col(
mtex, x, y, rgba, pool, tls->thread_id, convert_to_linear, colorspace);
paint_get_tex_pixel_col(mtex, x, y, rgba, pool, thread_id, convert_to_linear, colorspace);
buffer[index * 4] = rgba[0] * 255;
buffer[index * 4 + 1] = rgba[1] * 255;
@@ -223,7 +224,7 @@ static void load_tex_task_cb_ex(void *__restrict userdata,
buffer[index * 4 + 3] = rgba[3] * 255;
}
else {
float avg = paint_get_tex_pixel(mtex, x, y, pool, tls->thread_id);
float avg = paint_get_tex_pixel(mtex, x, y, pool, thread_id);
avg += br->texture_sample_bias;

View File

@@ -5168,9 +5168,7 @@ static void copy_original_alpha_channel(ProjPixel *pixel, bool is_floatbuf)
}
/* Run this for single and multi-threaded painting. */
static void do_projectpaint_thread(TaskPool *__restrict UNUSED(pool),
void *ph_v,
int UNUSED(threadid))
static void do_projectpaint_thread(TaskPool *__restrict UNUSED(pool), void *ph_v)
{
/* First unpack args from the struct */
ProjPaintState *ps = ((ProjectHandle *)ph_v)->ps;
@@ -5605,7 +5603,6 @@ static bool project_paint_op(void *state, const float lastpos[2], const float po
bool touch_any = false;
ProjectHandle handles[BLENDER_MAX_THREADS];
TaskScheduler *scheduler = NULL;
TaskPool *task_pool = NULL;
int a, i;
@@ -5616,8 +5613,7 @@ static bool project_paint_op(void *state, const float lastpos[2], const float po
}
if (ps->thread_tot > 1) {
scheduler = BLI_task_scheduler_get();
task_pool = BLI_task_pool_create_suspended(scheduler, NULL, TASK_PRIORITY_HIGH);
task_pool = BLI_task_pool_create_suspended(NULL, TASK_PRIORITY_HIGH);
}
image_pool = BKE_image_pool_new();
@@ -5661,7 +5657,7 @@ static bool project_paint_op(void *state, const float lastpos[2], const float po
BLI_task_pool_free(task_pool);
}
else {
do_projectpaint_thread(NULL, &handles[0], 0);
do_projectpaint_thread(NULL, &handles[0]);
}
BKE_image_pool_free(image_pool);

View File

@@ -168,9 +168,9 @@ static int mask_flood_fill_exec(bContext *C, wmOperator *op)
.value = value,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, mask_flood_fill_task_cb, &settings);
BLI_task_parallel_range(0, totnode, &data, mask_flood_fill_task_cb, &settings);
if (multires) {
multires_mark_as_modified(depsgraph, ob, MULTIRES_COORDS_MODIFIED);
@@ -343,9 +343,9 @@ bool ED_sculpt_mask_box_select(struct bContext *C, ViewContext *vc, const rcti *
.clip_planes_final = clip_planes_final,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, mask_box_select_task_cb, &settings);
BLI_task_parallel_range(0, totnode, &data, mask_box_select_task_cb, &settings);
if (nodes) {
MEM_freeN(nodes);
@@ -532,9 +532,9 @@ static int paint_mask_gesture_lasso_exec(bContext *C, wmOperator *op)
data.task_data.mode = mode;
data.task_data.value = value;
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, mask_gesture_lasso_task_cb, &settings);
BLI_task_parallel_range(0, totnode, &data, mask_gesture_lasso_task_cb, &settings);
if (nodes) {
MEM_freeN(nodes);

View File

@@ -2183,9 +2183,9 @@ static void calculate_average_weight(SculptThreadedTaskData *data,
struct WPaintAverageAccum *accum = MEM_mallocN(sizeof(*accum) * totnode, __func__);
data->custom_data = accum;
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (data->sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, data, do_wpaint_brush_calc_average_weight_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, data, do_wpaint_brush_calc_average_weight_cb_ex, &settings);
uint accum_len = 0;
double accum_weight = 0.0;
@@ -2231,22 +2231,22 @@ static void wpaint_paint_leaves(bContext *C,
data.strength = BKE_brush_weight_get(scene, brush);
/* NOTE: current mirroring code cannot be run in parallel */
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, !(me->editflag & ME_EDIT_MIRROR_X), totnode);
switch ((eBrushWeightPaintTool)brush->weightpaint_tool) {
case WPAINT_TOOL_AVERAGE:
calculate_average_weight(&data, nodes, totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_wpaint_brush_draw_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_wpaint_brush_draw_task_cb_ex, &settings);
break;
case WPAINT_TOOL_SMEAR:
BKE_pbvh_parallel_range(0, totnode, &data, do_wpaint_brush_smear_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_wpaint_brush_smear_task_cb_ex, &settings);
break;
case WPAINT_TOOL_BLUR:
BKE_pbvh_parallel_range(0, totnode, &data, do_wpaint_brush_blur_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_wpaint_brush_blur_task_cb_ex, &settings);
break;
case WPAINT_TOOL_DRAW:
BKE_pbvh_parallel_range(0, totnode, &data, do_wpaint_brush_draw_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_wpaint_brush_draw_task_cb_ex, &settings);
break;
}
}
@@ -3252,9 +3252,9 @@ static void calculate_average_color(SculptThreadedTaskData *data,
struct VPaintAverageAccum *accum = MEM_mallocN(sizeof(*accum) * totnode, __func__);
data->custom_data = accum;
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, true, totnode);
BKE_pbvh_parallel_range(0, totnode, data, do_vpaint_brush_calc_average_color_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, data, do_vpaint_brush_calc_average_color_cb_ex, &settings);
uint accum_len = 0;
uint accum_value[3] = {0};
@@ -3298,21 +3298,21 @@ static void vpaint_paint_leaves(bContext *C,
.lcol = (uint *)me->mloopcol,
.me = me,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, true, totnode);
switch ((eBrushVertexPaintTool)brush->vertexpaint_tool) {
case VPAINT_TOOL_AVERAGE:
calculate_average_color(&data, nodes, totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_vpaint_brush_draw_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_vpaint_brush_draw_task_cb_ex, &settings);
break;
case VPAINT_TOOL_BLUR:
BKE_pbvh_parallel_range(0, totnode, &data, do_vpaint_brush_blur_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_vpaint_brush_blur_task_cb_ex, &settings);
break;
case VPAINT_TOOL_SMEAR:
BKE_pbvh_parallel_range(0, totnode, &data, do_vpaint_brush_smear_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_vpaint_brush_smear_task_cb_ex, &settings);
break;
case VPAINT_TOOL_DRAW:
BKE_pbvh_parallel_range(0, totnode, &data, do_vpaint_brush_draw_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_vpaint_brush_draw_task_cb_ex, &settings);
break;
}
}

View File

@@ -806,12 +806,12 @@ int SCULPT_nearest_vertex_get(
nvtd.nearest_vertex_index = -1;
nvtd.nearest_vertex_distance_squared = FLT_MAX;
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
settings.func_reduce = nearest_vertex_get_reduce;
settings.userdata_chunk = &nvtd;
settings.userdata_chunk_size = sizeof(NearestVertexTLSData);
BKE_pbvh_parallel_range(0, totnode, &task_data, do_nearest_vertex_get_task_cb, &settings);
BLI_task_parallel_range(0, totnode, &task_data, do_nearest_vertex_get_task_cb, &settings);
MEM_SAFE_FREE(nodes);
@@ -1283,9 +1283,9 @@ static void paint_mesh_restore_co(Sculpt *sd, Object *ob)
.nodes = nodes,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP) && !ss->bm, totnode);
BKE_pbvh_parallel_range(0, totnode, &data, paint_mesh_restore_co_task_cb, &settings);
BLI_task_parallel_range(0, totnode, &data, paint_mesh_restore_co_task_cb, &settings);
MEM_SAFE_FREE(nodes);
}
@@ -1909,12 +1909,12 @@ static void calc_area_center(
AreaNormalCenterTLSData anctd = {{{0}}};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
settings.func_reduce = calc_area_normal_and_center_reduce;
settings.userdata_chunk = &anctd;
settings.userdata_chunk_size = sizeof(AreaNormalCenterTLSData);
BKE_pbvh_parallel_range(0, totnode, &data, calc_area_normal_and_center_task_cb, &settings);
BLI_task_parallel_range(0, totnode, &data, calc_area_normal_and_center_task_cb, &settings);
/* For flatten center. */
for (n = 0; n < ARRAY_SIZE(anctd.area_cos); n++) {
@@ -1968,12 +1968,12 @@ bool SCULPT_pbvh_calc_area_normal(const Brush *brush,
AreaNormalCenterTLSData anctd = {{{0}}};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, use_threading, totnode);
settings.func_reduce = calc_area_normal_and_center_reduce;
settings.userdata_chunk = &anctd;
settings.userdata_chunk_size = sizeof(AreaNormalCenterTLSData);
BKE_pbvh_parallel_range(0, totnode, &data, calc_area_normal_and_center_task_cb, &settings);
BLI_task_parallel_range(0, totnode, &data, calc_area_normal_and_center_task_cb, &settings);
/* For area normal. */
for (int i = 0; i < ARRAY_SIZE(anctd.area_nos); i++) {
@@ -2009,12 +2009,12 @@ static void calc_area_normal_and_center(
AreaNormalCenterTLSData anctd = {{{0}}};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
settings.func_reduce = calc_area_normal_and_center_reduce;
settings.userdata_chunk = &anctd;
settings.userdata_chunk_size = sizeof(AreaNormalCenterTLSData);
BKE_pbvh_parallel_range(0, totnode, &data, calc_area_normal_and_center_task_cb, &settings);
BLI_task_parallel_range(0, totnode, &data, calc_area_normal_and_center_task_cb, &settings);
/* For flatten center. */
for (n = 0; n < ARRAY_SIZE(anctd.area_cos); n++) {
@@ -2618,22 +2618,17 @@ static void do_topology_rake_bmesh_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
PBVHVertexIter vd;
BKE_pbvh_vertex_iter_begin(ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE)
{
if (sculpt_brush_test_sq_fn(&test, vd.co)) {
const float fade = bstrength *
SCULPT_brush_strength_factor(ss,
brush,
vd.co,
sqrtf(test.dist),
vd.no,
vd.fno,
*vd.mask,
vd.index,
tls->thread_id) *
ss->cache->pressure;
const float fade =
bstrength *
SCULPT_brush_strength_factor(
ss, brush, vd.co, sqrtf(test.dist), vd.no, vd.fno, *vd.mask, vd.index, thread_id) *
ss->cache->pressure;
float avg[3], val[3];
@@ -2675,10 +2670,10 @@ static void bmesh_topology_rake(
.nodes = nodes,
.strength = factor,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_topology_rake_bmesh_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_topology_rake_bmesh_task_cb_ex, &settings);
}
}
@@ -2696,12 +2691,13 @@ static void do_mask_brush_draw_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
BKE_pbvh_vertex_iter_begin(ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE)
{
if (sculpt_brush_test_sq_fn(&test, vd.co)) {
const float fade = SCULPT_brush_strength_factor(
ss, brush, vd.co, sqrtf(test.dist), vd.no, vd.fno, 0.0f, vd.index, tls->thread_id);
ss, brush, vd.co, sqrtf(test.dist), vd.no, vd.fno, 0.0f, vd.index, thread_id);
if (bstrength > 0.0f) {
(*vd.mask) += fade * bstrength * (1.0f - *vd.mask);
@@ -2731,9 +2727,9 @@ static void do_mask_brush_draw(Sculpt *sd, Object *ob, PBVHNode **nodes, int tot
.nodes = nodes,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_mask_brush_draw_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_mask_brush_draw_task_cb_ex, &settings);
}
static void do_mask_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode)
@@ -2768,6 +2764,7 @@ static void do_draw_brush_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
BKE_pbvh_vertex_iter_begin(ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE)
{
@@ -2781,7 +2778,7 @@ static void do_draw_brush_task_cb_ex(void *__restrict userdata,
vd.fno,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
mul_v3_v3fl(proxy[vd.i], offset, fade);
@@ -2818,9 +2815,9 @@ static void do_draw_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode)
.offset = offset,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_draw_brush_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_draw_brush_task_cb_ex, &settings);
}
static void do_draw_sharp_brush_task_cb_ex(void *__restrict userdata,
@@ -2843,6 +2840,7 @@ static void do_draw_sharp_brush_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
BKE_pbvh_vertex_iter_begin(ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE)
{
@@ -2857,7 +2855,7 @@ static void do_draw_sharp_brush_task_cb_ex(void *__restrict userdata,
NULL,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
mul_v3_v3fl(proxy[vd.i], offset, fade);
@@ -2894,9 +2892,9 @@ static void do_draw_sharp_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int to
.offset = offset,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_draw_sharp_brush_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_draw_sharp_brush_task_cb_ex, &settings);
}
/* -------------------------------------------------------------------- */
@@ -2923,6 +2921,7 @@ static void do_topology_slide_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
BKE_pbvh_vertex_iter_begin(ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE)
{
@@ -2936,7 +2935,7 @@ static void do_topology_slide_task_cb_ex(void *__restrict userdata,
NULL,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
float current_disp[3];
float current_disp_norm[3];
float final_disp[3];
@@ -3037,6 +3036,7 @@ static void do_topology_relax_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
BKE_pbvh_vertex_iter_begin(ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE)
{
@@ -3050,7 +3050,7 @@ static void do_topology_relax_task_cb_ex(void *__restrict userdata,
NULL,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
SCULPT_relax_vertex(ss, &vd, fade * bstrength, false, vd.co);
if (vd.mvert) {
@@ -3079,15 +3079,15 @@ static void do_slide_relax_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int t
.nodes = nodes,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
if (ss->cache->alt_smooth) {
for (int i = 0; i < 4; i++) {
BKE_pbvh_parallel_range(0, totnode, &data, do_topology_relax_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_topology_relax_task_cb_ex, &settings);
}
}
else {
BKE_pbvh_parallel_range(0, totnode, &data, do_topology_slide_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_topology_slide_task_cb_ex, &settings);
}
}
@@ -3200,6 +3200,7 @@ static void do_crease_brush_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
BKE_pbvh_vertex_iter_begin(ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE)
{
@@ -3213,7 +3214,7 @@ static void do_crease_brush_task_cb_ex(void *__restrict userdata,
vd.fno,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
float val1[3];
float val2[3];
@@ -3288,9 +3289,9 @@ static void do_crease_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnod
.flippedbstrength = flippedbstrength,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_crease_brush_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_crease_brush_task_cb_ex, &settings);
}
static void do_pinch_brush_task_cb_ex(void *__restrict userdata,
@@ -3311,6 +3312,7 @@ static void do_pinch_brush_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
float x_object_space[3];
float z_object_space[3];
@@ -3328,7 +3330,7 @@ static void do_pinch_brush_task_cb_ex(void *__restrict userdata,
vd.fno,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
float disp_center[3];
float x_disp[3];
float z_disp[3];
@@ -3401,9 +3403,9 @@ static void do_pinch_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode
.stroke_xz = stroke_xz,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_pinch_brush_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_pinch_brush_task_cb_ex, &settings);
}
static void do_grab_brush_task_cb_ex(void *__restrict userdata,
@@ -3427,6 +3429,7 @@ static void do_grab_brush_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
BKE_pbvh_vertex_iter_begin(ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE)
{
@@ -3441,7 +3444,7 @@ static void do_grab_brush_task_cb_ex(void *__restrict userdata,
NULL,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
mul_v3_v3fl(proxy[vd.i], grab_delta, fade);
@@ -3473,9 +3476,9 @@ static void do_grab_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode)
.grab_delta = grab_delta,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_grab_brush_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_grab_brush_task_cb_ex, &settings);
}
static void do_elastic_deform_brush_task_cb_ex(void *__restrict userdata,
@@ -3582,9 +3585,9 @@ static void do_elastic_deform_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, in
.grab_delta = grab_delta,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_elastic_deform_brush_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_elastic_deform_brush_task_cb_ex, &settings);
}
ePaintSymmetryAreas SCULPT_get_vertex_symm_area(const float co[3])
@@ -3744,6 +3747,7 @@ static void do_nudge_brush_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
BKE_pbvh_vertex_iter_begin(ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE)
{
@@ -3756,7 +3760,7 @@ static void do_nudge_brush_task_cb_ex(void *__restrict userdata,
vd.fno,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
mul_v3_v3fl(proxy[vd.i], cono, fade);
@@ -3788,9 +3792,9 @@ static void do_nudge_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode
.cono = cono,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_nudge_brush_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_nudge_brush_task_cb_ex, &settings);
}
static void do_snake_hook_brush_task_cb_ex(void *__restrict userdata,
@@ -3817,6 +3821,7 @@ static void do_snake_hook_brush_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
BKE_pbvh_vertex_iter_begin(ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE)
{
@@ -3829,7 +3834,7 @@ static void do_snake_hook_brush_task_cb_ex(void *__restrict userdata,
vd.fno,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
mul_v3_v3fl(proxy[vd.i], grab_delta, fade);
@@ -3909,9 +3914,9 @@ static void do_snake_hook_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int to
.grab_delta = grab_delta,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_snake_hook_brush_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_snake_hook_brush_task_cb_ex, &settings);
}
static void do_thumb_brush_task_cb_ex(void *__restrict userdata,
@@ -3935,6 +3940,7 @@ static void do_thumb_brush_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
BKE_pbvh_vertex_iter_begin(ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE)
{
@@ -3949,7 +3955,7 @@ static void do_thumb_brush_task_cb_ex(void *__restrict userdata,
NULL,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
mul_v3_v3fl(proxy[vd.i], cono, fade);
@@ -3981,9 +3987,9 @@ static void do_thumb_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode
.cono = cono,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_thumb_brush_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_thumb_brush_task_cb_ex, &settings);
}
static void do_rotate_brush_task_cb_ex(void *__restrict userdata,
@@ -4007,6 +4013,7 @@ static void do_rotate_brush_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
BKE_pbvh_vertex_iter_begin(ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE)
{
@@ -4022,7 +4029,7 @@ static void do_rotate_brush_task_cb_ex(void *__restrict userdata,
NULL,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
sub_v3_v3v3(vec, orig_data.co, ss->cache->location);
axis_angle_normalized_to_mat3(rot, ss->cache->sculpt_normal_symm, angle * fade);
@@ -4054,9 +4061,9 @@ static void do_rotate_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnod
.angle = angle,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_rotate_brush_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_rotate_brush_task_cb_ex, &settings);
}
static void do_layer_brush_task_cb_ex(void *__restrict userdata,
@@ -4078,6 +4085,7 @@ static void do_layer_brush_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
BKE_pbvh_vertex_iter_begin(ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE)
{
@@ -4092,7 +4100,7 @@ static void do_layer_brush_task_cb_ex(void *__restrict userdata,
vd.fno,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
const int vi = vd.index;
float *disp_factor;
@@ -4170,9 +4178,9 @@ static void do_layer_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode
.nodes = nodes,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_layer_brush_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_layer_brush_task_cb_ex, &settings);
}
static void do_inflate_brush_task_cb_ex(void *__restrict userdata,
@@ -4192,6 +4200,7 @@ static void do_inflate_brush_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
BKE_pbvh_vertex_iter_begin(ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE)
{
@@ -4204,7 +4213,7 @@ static void do_inflate_brush_task_cb_ex(void *__restrict userdata,
vd.fno,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
float val[3];
if (vd.fno) {
@@ -4236,9 +4245,9 @@ static void do_inflate_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totno
.nodes = nodes,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_inflate_brush_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_inflate_brush_task_cb_ex, &settings);
}
int SCULPT_plane_trim(const StrokeCache *cache, const Brush *brush, const float val[3])
@@ -4294,6 +4303,7 @@ static void do_flatten_brush_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
plane_from_point_normal_v3(test.plane_tool, area_co, area_no);
@@ -4316,7 +4326,7 @@ static void do_flatten_brush_task_cb_ex(void *__restrict userdata,
vd.fno,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
mul_v3_v3fl(proxy[vd.i], val, fade);
@@ -4360,9 +4370,9 @@ static void do_flatten_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totno
.area_co = area_co,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_flatten_brush_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_flatten_brush_task_cb_ex, &settings);
}
/* -------------------------------------------------------------------- */
@@ -4448,6 +4458,7 @@ static void do_clay_brush_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
plane_from_point_normal_v3(test.plane_tool, area_co, area_no);
@@ -4468,7 +4479,7 @@ static void do_clay_brush_task_cb_ex(void *__restrict userdata,
vd.fno,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
mul_v3_v3fl(proxy[vd.i], val, fade);
@@ -4510,13 +4521,13 @@ static void do_clay_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode)
ClaySampleData csd = {{0}};
PBVHParallelSettings sample_settings;
TaskParallelSettings sample_settings;
BKE_pbvh_parallel_range_settings(&sample_settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
sample_settings.func_reduce = calc_clay_surface_reduce;
sample_settings.userdata_chunk = &csd;
sample_settings.userdata_chunk_size = sizeof(ClaySampleData);
BKE_pbvh_parallel_range(0, totnode, &sample_data, calc_clay_surface_task_cb, &sample_settings);
BLI_task_parallel_range(0, totnode, &sample_data, calc_clay_surface_task_cb, &sample_settings);
float d_offset = (csd.plane_dist[0] + csd.plane_dist[1]);
d_offset = min_ff(radius, d_offset);
@@ -4541,9 +4552,9 @@ static void do_clay_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode)
.area_co = area_co,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_clay_brush_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_clay_brush_task_cb_ex, &settings);
}
static void do_clay_strips_brush_task_cb_ex(void *__restrict userdata,
@@ -4567,6 +4578,7 @@ static void do_clay_strips_brush_task_cb_ex(void *__restrict userdata,
SCULPT_brush_test_init(ss, &test);
plane_from_point_normal_v3(test.plane_tool, area_co, area_no_sp);
const int thread_id = BLI_task_parallel_thread_id(tls);
BKE_pbvh_vertex_iter_begin(ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE)
{
@@ -4590,7 +4602,7 @@ static void do_clay_strips_brush_task_cb_ex(void *__restrict userdata,
vd.fno,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
mul_v3_v3fl(proxy[vd.i], val, fade);
@@ -4674,9 +4686,9 @@ static void do_clay_strips_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int t
.mat = mat,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_clay_strips_brush_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_clay_strips_brush_task_cb_ex, &settings);
}
static void do_fill_brush_task_cb_ex(void *__restrict userdata,
@@ -4698,6 +4710,7 @@ static void do_fill_brush_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
plane_from_point_normal_v3(test.plane_tool, area_co, area_no);
@@ -4721,7 +4734,7 @@ static void do_fill_brush_task_cb_ex(void *__restrict userdata,
vd.fno,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
mul_v3_v3fl(proxy[vd.i], val, fade);
@@ -4767,9 +4780,9 @@ static void do_fill_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode)
.area_co = area_co,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_fill_brush_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_fill_brush_task_cb_ex, &settings);
}
static void do_scrape_brush_task_cb_ex(void *__restrict userdata,
@@ -4791,6 +4804,7 @@ static void do_scrape_brush_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
plane_from_point_normal_v3(test.plane_tool, area_co, area_no);
BKE_pbvh_vertex_iter_begin(ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE)
@@ -4813,7 +4827,7 @@ static void do_scrape_brush_task_cb_ex(void *__restrict userdata,
vd.fno,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
mul_v3_v3fl(proxy[vd.i], val, fade);
@@ -4859,9 +4873,9 @@ static void do_scrape_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnod
.area_co = area_co,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_scrape_brush_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_scrape_brush_task_cb_ex, &settings);
}
/* -------------------------------------------------------------------- */
@@ -4889,6 +4903,7 @@ static void do_clay_thumb_brush_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
float plane_tilt[4];
float normal_tilt[3];
@@ -4929,7 +4944,7 @@ static void do_clay_thumb_brush_task_cb_ex(void *__restrict userdata,
vd.fno,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
mul_v3_v3fl(proxy[vd.i], val, fade);
@@ -5031,9 +5046,9 @@ static void do_clay_thumb_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int to
.clay_strength = clay_strength,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_clay_thumb_brush_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_clay_thumb_brush_task_cb_ex, &settings);
}
/** \} */
@@ -5055,6 +5070,7 @@ static void do_gravity_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
BKE_pbvh_vertex_iter_begin(ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE)
{
@@ -5067,7 +5083,7 @@ static void do_gravity_task_cb_ex(void *__restrict userdata,
vd.fno,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
mul_v3_v3fl(proxy[vd.i], offset, fade);
@@ -5102,9 +5118,9 @@ static void do_gravity(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode, fl
.offset = offset,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_gravity_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_gravity_task_cb_ex, &settings);
}
void SCULPT_vertcos_to_key(Object *ob, KeyBlock *kb, const float (*vertCos)[3])
@@ -5294,9 +5310,9 @@ static void do_brush_action(Sculpt *sd, Object *ob, Brush *brush, UnifiedPaintSe
.nodes = nodes,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &task_data, do_brush_action_task_cb, &settings);
BLI_task_parallel_range(0, totnode, &task_data, do_brush_action_task_cb, &settings);
/* Draw Face Sets in draw mode makes a single undo push, in alt-smooth mode deforms the
* vertices and uses regular coords undo. */
@@ -5557,9 +5573,9 @@ static void sculpt_combine_proxies(Sculpt *sd, Object *ob)
.nodes = nodes,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, sculpt_combine_proxies_task_cb, &settings);
BLI_task_parallel_range(0, totnode, &data, sculpt_combine_proxies_task_cb, &settings);
}
MEM_SAFE_FREE(nodes);
@@ -5645,9 +5661,9 @@ void SCULPT_flush_stroke_deform(Sculpt *sd, Object *ob, bool is_proxy_used)
.vertCos = vertCos,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, SCULPT_flush_stroke_deform_task_cb, &settings);
BLI_task_parallel_range(0, totnode, &data, SCULPT_flush_stroke_deform_task_cb, &settings);
if (vertCos) {
SCULPT_vertcos_to_key(ob, ss->shapekey_active, vertCos);

View File

@@ -221,6 +221,7 @@ static void do_cloth_brush_apply_forces_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
/* For Pich Perpendicular Deform Type. */
float x_object_space[3];
@@ -269,7 +270,7 @@ static void do_cloth_brush_apply_forces_task_cb_ex(void *__restrict userdata,
vd.fno,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
float brush_disp[3];
float normal[3];
@@ -412,7 +413,7 @@ static void cloth_brush_build_nodes_constraints(Sculpt *sd,
* storing the constraints per node. */
/* Currently all constrains are added to the same global array which can't be accessed from
* different threads. */
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, false, totnode);
SculptThreadedTaskData build_constraints_data = {
@@ -421,7 +422,7 @@ static void cloth_brush_build_nodes_constraints(Sculpt *sd,
.brush = brush,
.nodes = nodes,
};
BKE_pbvh_parallel_range(
BLI_task_parallel_range(
0, totnode, &build_constraints_data, do_cloth_brush_build_constraints_task_cb_ex, &settings);
}
@@ -490,9 +491,9 @@ static void cloth_brush_do_simulation_step(Sculpt *sd, Object *ob, PBVHNode **no
.cloth_time_step = CLOTH_SIMULATION_TIME_STEP,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(
BLI_task_parallel_range(
0, totnode, &solve_simulation_data, do_cloth_brush_solve_simulation_task_cb_ex, &settings);
}
@@ -565,9 +566,9 @@ static void cloth_brush_apply_brush_foces(Sculpt *sd, Object *ob, PBVHNode **nod
}
}
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(
BLI_task_parallel_range(
0, totnode, &apply_forces_data, do_cloth_brush_apply_forces_task_cb_ex, &settings);
}

View File

@@ -87,6 +87,7 @@ static void do_draw_face_sets_brush_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
BKE_pbvh_vertex_iter_begin(ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE)
{
@@ -107,7 +108,7 @@ static void do_draw_face_sets_brush_task_cb_ex(void *__restrict userdata,
vd.fno,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
if (fade > 0.05f && ss->face_sets[vert_map->indices[j]] > 0) {
ss->face_sets[vert_map->indices[j]] = abs(ss->cache->paint_face_set);
@@ -127,7 +128,7 @@ static void do_draw_face_sets_brush_task_cb_ex(void *__restrict userdata,
vd.fno,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
if (fade > 0.05f) {
SCULPT_vertex_face_set_set(ss, vd.index, ss->cache->paint_face_set);
@@ -160,6 +161,8 @@ static void do_relax_face_sets_brush_task_cb_ex(void *__restrict userdata,
bstrength *= 2.0f;
}
const int thread_id = BLI_task_parallel_thread_id(tls);
BKE_pbvh_vertex_iter_begin(ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE)
{
if (sculpt_brush_test_sq_fn(&test, vd.co)) {
@@ -172,7 +175,7 @@ static void do_relax_face_sets_brush_task_cb_ex(void *__restrict userdata,
vd.fno,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
SCULPT_relax_vertex(ss, &vd, fade * bstrength, relax_face_sets, vd.co);
if (vd.mvert) {
@@ -211,15 +214,15 @@ void SCULPT_do_draw_face_sets_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, in
.nodes = nodes,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
if (ss->cache->alt_smooth) {
for (int i = 0; i < 4; i++) {
BKE_pbvh_parallel_range(0, totnode, &data, do_relax_face_sets_brush_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_relax_face_sets_brush_task_cb_ex, &settings);
}
}
else {
BKE_pbvh_parallel_range(0, totnode, &data, do_draw_face_sets_brush_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_draw_face_sets_brush_task_cb_ex, &settings);
}
}

View File

@@ -246,9 +246,9 @@ static int sculpt_mask_filter_exec(bContext *C, wmOperator *op)
.prev_mask = prev_mask,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, mask_filter_task_cb, &settings);
BLI_task_parallel_range(0, totnode, &data, mask_filter_task_cb, &settings);
if (ELEM(filter_type, MASK_FILTER_GROW, MASK_FILTER_SHRINK)) {
MEM_freeN(prev_mask);
@@ -275,9 +275,9 @@ void SCULPT_mask_filter_smooth_apply(
};
for (int i = 0; i < smooth_iterations; i++) {
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, mask_filter_task_cb, &settings);
BLI_task_parallel_range(0, totnode, &data, mask_filter_task_cb, &settings);
}
}
@@ -458,17 +458,17 @@ static int sculpt_dirty_mask_exec(bContext *C, wmOperator *op)
.max = -FLT_MAX,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
settings.func_reduce = dirty_mask_compute_range_reduce;
settings.userdata_chunk = &range;
settings.userdata_chunk_size = sizeof(DirtyMaskRangeData);
BKE_pbvh_parallel_range(0, totnode, &data, dirty_mask_compute_range_task_cb, &settings);
BLI_task_parallel_range(0, totnode, &data, dirty_mask_compute_range_task_cb, &settings);
data.dirty_mask_min = range.min;
data.dirty_mask_max = range.max;
BKE_pbvh_parallel_range(0, totnode, &data, dirty_mask_apply_task_cb, &settings);
BLI_task_parallel_range(0, totnode, &data, dirty_mask_apply_task_cb, &settings);
MEM_SAFE_FREE(nodes);

View File

@@ -112,10 +112,10 @@ void SCULPT_filter_cache_init(Object *ob, Sculpt *sd)
.nodes = ss->filter_cache->nodes,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(
&settings, (sd->flags & SCULPT_USE_OPENMP), ss->filter_cache->totnode);
BKE_pbvh_parallel_range(
BLI_task_parallel_range(
0, ss->filter_cache->totnode, &data, filter_cache_init_task_cb, &settings);
}
@@ -496,13 +496,13 @@ static int sculpt_mesh_filter_modal(bContext *C, wmOperator *op, const wmEvent *
.filter_strength = filter_strength,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(
&settings, (sd->flags & SCULPT_USE_OPENMP), ss->filter_cache->totnode);
BKE_pbvh_parallel_range(0, ss->filter_cache->totnode, &data, mesh_filter_task_cb, &settings);
BLI_task_parallel_range(0, ss->filter_cache->totnode, &data, mesh_filter_task_cb, &settings);
if (filter_type == MESH_FILTER_SURFACE_SMOOTH) {
BKE_pbvh_parallel_range(0,
BLI_task_parallel_range(0,
ss->filter_cache->totnode,
&data,
mesh_filter_surface_smooth_displace_task_cb,

View File

@@ -288,10 +288,10 @@ static int sculpt_mask_expand_modal(bContext *C, wmOperator *op, const wmEvent *
.mask_expand_keep_prev_mask = RNA_boolean_get(op->ptr, "keep_previous_mask"),
.mask_expand_create_face_set = RNA_boolean_get(op->ptr, "create_face_set"),
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(
&settings, (sd->flags & SCULPT_USE_OPENMP), ss->filter_cache->totnode);
BKE_pbvh_parallel_range(0, ss->filter_cache->totnode, &data, sculpt_expand_task_cb, &settings);
BLI_task_parallel_range(0, ss->filter_cache->totnode, &data, sculpt_expand_task_cb, &settings);
ss->filter_cache->mask_update_current_it = mask_expand_update_it;
}
@@ -458,10 +458,10 @@ static int sculpt_mask_expand_invoke(bContext *C, wmOperator *op, const wmEvent
.mask_expand_keep_prev_mask = RNA_boolean_get(op->ptr, "keep_previous_mask"),
.mask_expand_create_face_set = RNA_boolean_get(op->ptr, "create_face_set"),
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(
&settings, (sd->flags & SCULPT_USE_OPENMP), ss->filter_cache->totnode);
BKE_pbvh_parallel_range(0, ss->filter_cache->totnode, &data, sculpt_expand_task_cb, &settings);
BLI_task_parallel_range(0, ss->filter_cache->totnode, &data, sculpt_expand_task_cb, &settings);
const char *status_str = TIP_(
"Move the mouse to expand the mask from the active vertex. LMB: confirm mask, ESC/RMB: "

View File

@@ -79,6 +79,7 @@ static void calc_multiplane_scrape_surface_task_cb(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
/* Apply the brush normal radius to the test before sampling. */
float test_radius = sqrtf(test.radius_squared);
@@ -107,7 +108,7 @@ static void calc_multiplane_scrape_surface_task_cb(void *__restrict userdata,
vd.fno,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
/* Sample the normal and area of the +X and -X axis individually. */
if (local_co[0] > 0.0f) {
@@ -163,6 +164,7 @@ static void do_multiplane_scrape_brush_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
BKE_pbvh_vertex_iter_begin(ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE)
{
@@ -208,7 +210,7 @@ static void do_multiplane_scrape_brush_task_cb_ex(void *__restrict userdata,
vd.fno,
vd.mask ? *vd.mask : 0.0f,
vd.index,
tls->thread_id);
thread_id);
mul_v3_v3fl(proxy[vd.i], val, fade);
@@ -301,13 +303,13 @@ void SCULPT_do_multiplane_scrape_brush(Sculpt *sd, Object *ob, PBVHNode **nodes,
MultiplaneScrapeSampleData mssd = {{{0}}};
PBVHParallelSettings sample_settings;
TaskParallelSettings sample_settings;
BKE_pbvh_parallel_range_settings(&sample_settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
sample_settings.func_reduce = calc_multiplane_scrape_surface_reduce;
sample_settings.userdata_chunk = &mssd;
sample_settings.userdata_chunk_size = sizeof(MultiplaneScrapeSampleData);
BKE_pbvh_parallel_range(
BLI_task_parallel_range(
0, totnode, &sample_data, calc_multiplane_scrape_surface_task_cb, &sample_settings);
float sampled_plane_normals[2][3];
@@ -392,9 +394,9 @@ void SCULPT_do_multiplane_scrape_brush(Sculpt *sd, Object *ob, PBVHNode **nodes,
normalize_v3(plane_no);
plane_from_point_normal_v3(data.multiplane_scrape_planes[0], area_co, plane_no);
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_multiplane_scrape_brush_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_multiplane_scrape_brush_task_cb_ex, &settings);
}
void SCULPT_multiplane_scrape_preview_draw(const uint gpuattr,

View File

@@ -263,7 +263,7 @@ static void sculpt_pose_grow_pose_factor(Sculpt *sd,
};
data.pose_initial_co = pose_target;
PBVHParallelSettings settings;
TaskParallelSettings settings;
PoseGrowFactorTLSData gftd;
gftd.pos_count = 0;
zero_v3(gftd.pos_avg);
@@ -279,7 +279,7 @@ static void sculpt_pose_grow_pose_factor(Sculpt *sd,
zero_v3(gftd.pos_avg);
gftd.pos_count = 0;
memcpy(data.prev_mask, pose_factor, SCULPT_vertex_count_get(ss) * sizeof(float));
BKE_pbvh_parallel_range(0, totnode, &data, pose_brush_grow_factor_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, pose_brush_grow_factor_task_cb_ex, &settings);
if (gftd.pos_count != 0) {
mul_v3_fl(gftd.pos_avg, 1.0f / (float)gftd.pos_count);
@@ -793,9 +793,9 @@ void SCULPT_pose_brush_init(Sculpt *sd, Object *ob, SculptSession *ss, Brush *br
for (int ik = 0; ik < ss->cache->pose_ik_chain->tot_segments; ik++) {
data.pose_factor = ss->cache->pose_ik_chain->segments[ik].weights;
for (int i = 0; i < br->pose_smooth_iterations; i++) {
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, pose_brush_init_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, pose_brush_init_task_cb_ex, &settings);
}
}
@@ -885,9 +885,9 @@ void SCULPT_do_pose_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode)
.grab_delta = grab_delta,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(0, totnode, &data, do_pose_brush_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_pose_brush_task_cb_ex, &settings);
}
void SCULPT_pose_ik_chain_free(SculptPoseIKChain *ik_chain)

View File

@@ -245,6 +245,8 @@ static void do_smooth_brush_mesh_task_cb_ex(void *__restrict userdata,
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
BKE_pbvh_vertex_iter_begin(ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE)
{
if (sculpt_brush_test_sq_fn(&test, vd.co)) {
@@ -257,7 +259,7 @@ static void do_smooth_brush_mesh_task_cb_ex(void *__restrict userdata,
vd.fno,
smooth_mask ? 0.0f : (vd.mask ? *vd.mask : 0.0f),
vd.index,
tls->thread_id);
thread_id);
if (smooth_mask) {
float val = SCULPT_neighbor_mask_average(ss, vd.vert_indices[vd.i]) - *vd.mask;
val *= fade * bstrength;
@@ -301,6 +303,7 @@ static void do_smooth_brush_bmesh_task_cb_ex(void *__restrict userdata,
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
BKE_pbvh_vertex_iter_begin(ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE)
{
@@ -313,7 +316,7 @@ static void do_smooth_brush_bmesh_task_cb_ex(void *__restrict userdata,
vd.fno,
smooth_mask ? 0.0f : *vd.mask,
vd.index,
tls->thread_id);
thread_id);
if (smooth_mask) {
float val = SCULPT_neighbor_mask_average(ss, vd.index) - *vd.mask;
val *= fade * bstrength;
@@ -358,6 +361,8 @@ static void do_smooth_brush_multires_task_cb_ex(void *__restrict userdata,
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
BKE_pbvh_vertex_iter_begin(ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE)
{
if (sculpt_brush_test_sq_fn(&test, vd.co)) {
@@ -370,7 +375,7 @@ static void do_smooth_brush_multires_task_cb_ex(void *__restrict userdata,
vd.fno,
smooth_mask ? 0.0f : (vd.mask ? *vd.mask : 0.0f),
vd.index,
tls->thread_id);
thread_id);
if (smooth_mask) {
float val = SCULPT_neighbor_mask_average(ss, vd.index) - *vd.mask;
val *= fade * bstrength;
@@ -427,18 +432,18 @@ void SCULPT_smooth(Sculpt *sd,
.strength = strength,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
switch (type) {
case PBVH_GRIDS:
BKE_pbvh_parallel_range(0, totnode, &data, do_smooth_brush_multires_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_smooth_brush_multires_task_cb_ex, &settings);
break;
case PBVH_FACES:
BKE_pbvh_parallel_range(0, totnode, &data, do_smooth_brush_mesh_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_smooth_brush_mesh_task_cb_ex, &settings);
break;
case PBVH_BMESH:
BKE_pbvh_parallel_range(0, totnode, &data, do_smooth_brush_bmesh_task_cb_ex, &settings);
BLI_task_parallel_range(0, totnode, &data, do_smooth_brush_bmesh_task_cb_ex, &settings);
break;
}
}
@@ -512,6 +517,7 @@ static void SCULPT_do_surface_smooth_brush_laplacian_task_cb_ex(
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
SCULPT_orig_vert_data_init(&orig_data, data->ob, data->nodes[n]);
@@ -522,7 +528,7 @@ static void SCULPT_do_surface_smooth_brush_laplacian_task_cb_ex(
const float fade =
bstrength *
SCULPT_brush_strength_factor(
ss, brush, vd.co, sqrtf(test.dist), vd.no, vd.fno, 0.0f, vd.index, tls->thread_id);
ss, brush, vd.co, sqrtf(test.dist), vd.no, vd.fno, 0.0f, vd.index, thread_id);
float disp[3];
SCULPT_surface_smooth_laplacian_step(ss,
@@ -555,6 +561,7 @@ static void SCULPT_do_surface_smooth_brush_displace_task_cb_ex(
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
BKE_pbvh_vertex_iter_begin(ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE)
{
@@ -562,7 +569,7 @@ static void SCULPT_do_surface_smooth_brush_displace_task_cb_ex(
const float fade =
bstrength *
SCULPT_brush_strength_factor(
ss, brush, vd.co, sqrtf(test.dist), vd.no, vd.fno, 0.0f, vd.index, tls->thread_id);
ss, brush, vd.co, sqrtf(test.dist), vd.no, vd.fno, 0.0f, vd.index, thread_id);
SCULPT_surface_smooth_displace_step(
ss, vd.co, ss->cache->surface_smooth_laplacian_disp, vd.index, beta, fade);
}
@@ -590,12 +597,12 @@ void SCULPT_do_surface_smooth_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, in
.nodes = nodes,
};
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
for (int i = 0; i < brush->surface_smooth_iterations; i++) {
BKE_pbvh_parallel_range(
BLI_task_parallel_range(
0, totnode, &data, SCULPT_do_surface_smooth_brush_laplacian_task_cb_ex, &settings);
BKE_pbvh_parallel_range(
BLI_task_parallel_range(
0, totnode, &data, SCULPT_do_surface_smooth_brush_displace_task_cb_ex, &settings);
}
}

View File

@@ -177,10 +177,10 @@ void ED_sculpt_update_modal_transform(struct bContext *C)
mul_m4_m4m4(data.transform_mats[i], pivot_mat, data.transform_mats[i]);
}
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(
&settings, (sd->flags & SCULPT_USE_OPENMP), ss->filter_cache->totnode);
BKE_pbvh_parallel_range(
BLI_task_parallel_range(
0, ss->filter_cache->totnode, &data, sculpt_transform_task_cb, &settings);
if (ss->deform_modifiers_active || ss->shapekey_active) {

View File

@@ -415,9 +415,9 @@ static void sculpt_undo_bmesh_restore_generic(bContext *C,
BKE_pbvh_search_gather(ss->pbvh, NULL, NULL, &nodes, &totnode);
PBVHParallelSettings settings;
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, (sd->flags & SCULPT_USE_OPENMP), totnode);
BKE_pbvh_parallel_range(
BLI_task_parallel_range(
0, totnode, nodes, sculpt_undo_bmesh_restore_generic_task_cb, &settings);
if (nodes) {

View File

@@ -885,7 +885,7 @@ static uchar *prefetch_thread_next_frame(PrefetchQueue *queue,
return mem;
}
static void prefetch_task_func(TaskPool *__restrict pool, void *task_data, int UNUSED(threadid))
static void prefetch_task_func(TaskPool *__restrict pool, void *task_data)
{
PrefetchQueue *queue = (PrefetchQueue *)BLI_task_pool_user_data(pool);
MovieClip *clip = (MovieClip *)task_data;
@@ -942,9 +942,8 @@ static void start_prefetch_threads(MovieClip *clip,
float *progress)
{
PrefetchQueue queue;
TaskScheduler *task_scheduler = BLI_task_scheduler_get();
TaskPool *task_pool;
int i, tot_thread = BLI_task_scheduler_num_threads(task_scheduler);
int i, tot_thread = BLI_task_scheduler_num_threads();
/* initialize queue */
BLI_spin_init(&queue.spin);
@@ -961,7 +960,7 @@ static void start_prefetch_threads(MovieClip *clip,
queue.do_update = do_update;
queue.progress = progress;
task_pool = BLI_task_pool_create(task_scheduler, &queue, TASK_PRIORITY_LOW);
task_pool = BLI_task_pool_create(&queue, TASK_PRIORITY_LOW);
for (i = 0; i < tot_thread; i++) {
BLI_task_pool_push(task_pool, prefetch_task_func, clip, false, NULL);
}

View File

@@ -1367,7 +1367,7 @@ static uchar *proxy_thread_next_frame(ProxyQueue *queue,
return mem;
}
static void proxy_task_func(TaskPool *__restrict pool, void *task_data, int UNUSED(threadid))
static void proxy_task_func(TaskPool *__restrict pool, void *task_data)
{
ProxyThread *data = (ProxyThread *)task_data;
ProxyQueue *queue = (ProxyQueue *)BLI_task_pool_user_data(pool);
@@ -1413,11 +1413,10 @@ static void do_sequence_proxy(void *pjv,
ProxyJob *pj = pjv;
MovieClip *clip = pj->clip;
Scene *scene = pj->scene;
TaskScheduler *task_scheduler = BLI_task_scheduler_get();
TaskPool *task_pool;
int sfra = SFRA, efra = EFRA;
ProxyThread *handles;
int i, tot_thread = BLI_task_scheduler_num_threads(task_scheduler);
int i, tot_thread = BLI_task_scheduler_num_threads();
int width, height;
ProxyQueue queue;
@@ -1434,7 +1433,7 @@ static void do_sequence_proxy(void *pjv,
queue.do_update = do_update;
queue.progress = progress;
task_pool = BLI_task_pool_create(task_scheduler, &queue, TASK_PRIORITY_LOW);
task_pool = BLI_task_pool_create(&queue, TASK_PRIORITY_LOW);
handles = MEM_callocN(sizeof(ProxyThread) * tot_thread, "proxy threaded handles");
for (i = 0; i < tot_thread; i++) {
ProxyThread *handle = &handles[i];

View File

@@ -1264,9 +1264,7 @@ static void filelist_intern_free(FileListIntern *filelist_intern)
MEM_SAFE_FREE(filelist_intern->filtered);
}
static void filelist_cache_preview_runf(TaskPool *__restrict pool,
void *taskdata,
int UNUSED(threadid))
static void filelist_cache_preview_runf(TaskPool *__restrict pool, void *taskdata)
{
FileListEntryCache *cache = BLI_task_pool_user_data(pool);
FileListEntryPreviewTaskData *preview_taskdata = taskdata;
@@ -1325,9 +1323,7 @@ static void filelist_cache_preview_freef(TaskPool *__restrict UNUSED(pool), void
static void filelist_cache_preview_ensure_running(FileListEntryCache *cache)
{
if (!cache->previews_pool) {
TaskScheduler *scheduler = BLI_task_scheduler_get();
cache->previews_pool = BLI_task_pool_create_background(scheduler, cache, TASK_PRIORITY_LOW);
cache->previews_pool = BLI_task_pool_create_background(cache, TASK_PRIORITY_LOW);
cache->previews_done = BLI_thread_queue_init();
IMB_thumb_locks_acquire();

View File

@@ -338,7 +338,7 @@ void nearest_interpolation(ImBuf *in, ImBuf *out, float x, float y, int xout, in
/*********************** Threaded image processing *************************/
static void processor_apply_func(TaskPool *__restrict pool, void *taskdata, int UNUSED(threadid))
static void processor_apply_func(TaskPool *__restrict pool, void *taskdata)
{
void (*do_thread)(void *) = (void (*)(void *))BLI_task_pool_user_data(pool);
do_thread(taskdata);
@@ -353,14 +353,13 @@ void IMB_processor_apply_threaded(
{
const int lines_per_task = 64;
TaskScheduler *task_scheduler = BLI_task_scheduler_get();
TaskPool *task_pool;
void *handles;
int total_tasks = (buffer_lines + lines_per_task - 1) / lines_per_task;
int i, start_line;
task_pool = BLI_task_pool_create(task_scheduler, do_thread, TASK_PRIORITY_LOW);
task_pool = BLI_task_pool_create(do_thread, TASK_PRIORITY_LOW);
handles = MEM_callocN(handle_size * total_tasks, "processor apply threaded handles");
@@ -399,9 +398,7 @@ typedef struct ScanlineGlobalData {
int total_scanlines;
} ScanlineGlobalData;
static void processor_apply_scanline_func(TaskPool *__restrict pool,
void *taskdata,
int UNUSED(threadid))
static void processor_apply_scanline_func(TaskPool *__restrict pool, void *taskdata)
{
ScanlineGlobalData *data = BLI_task_pool_user_data(pool);
int start_scanline = POINTER_AS_INT(taskdata);
@@ -420,8 +417,7 @@ void IMB_processor_apply_threaded_scanlines(int total_scanlines,
data.scanlines_per_task = scanlines_per_task;
data.total_scanlines = total_scanlines;
const int total_tasks = (total_scanlines + scanlines_per_task - 1) / scanlines_per_task;
TaskScheduler *task_scheduler = BLI_task_scheduler_get();
TaskPool *task_pool = BLI_task_pool_create(task_scheduler, &data, TASK_PRIORITY_LOW);
TaskPool *task_pool = BLI_task_pool_create(&data, TASK_PRIORITY_LOW);
for (int i = 0, start_line = 0; i < total_tasks; i++) {
BLI_task_pool_push(
task_pool, processor_apply_scanline_func, POINTER_FROM_INT(start_line), false, NULL);

View File

@@ -44,6 +44,7 @@
#include "BLI_listbase.h"
#include "BLI_path_util.h"
#include "BLI_string.h"
#include "BLI_task.h"
#include "BLI_threads.h"
#include "BLI_timer.h"
#include "BLI_utildefines.h"
@@ -648,6 +649,7 @@ void WM_exit_ex(bContext *C, const bool do_python)
DNA_sdna_current_free();
BLI_threadapi_exit();
BLI_task_scheduler_exit();
/* No need to call this early, rather do it late so that other
* pieces of Blender using sound may exit cleanly, see also T50676. */

View File

@@ -43,6 +43,7 @@
#include "BLI_args.h"
#include "BLI_string.h"
#include "BLI_system.h"
#include "BLI_task.h"
#include "BLI_threads.h"
#include "BLI_utildefines.h"
@@ -401,6 +402,9 @@ int main(int argc,
G.factory_startup = true;
#endif
/* After parsing number of threads argument. */
BLI_task_scheduler_init();
#ifdef WITH_FFMPEG
IMB_ffmpeg_init();
#endif

View File

@@ -63,7 +63,7 @@ struct IndexedNode {
int index;
};
void concurrent_insert(TaskPool *__restrict pool, void *taskdata, int /*threadid*/)
void concurrent_insert(TaskPool *__restrict pool, void *taskdata)
{
LockfreeLinkList *list = (LockfreeLinkList *)BLI_task_pool_user_data(pool);
CHECK_NOTNULL(list);
@@ -76,14 +76,12 @@ void concurrent_insert(TaskPool *__restrict pool, void *taskdata, int /*threadid
TEST(LockfreeLinkList, InsertMultipleConcurrent)
{
static const int num_threads = 512;
static const int num_nodes = 655360;
/* Initialize list. */
LockfreeLinkList list;
BLI_linklist_lockfree_init(&list);
/* Initialize task scheduler and pool. */
TaskScheduler *scheduler = BLI_task_scheduler_create(num_threads);
TaskPool *pool = BLI_task_pool_create_suspended(scheduler, &list, TASK_PRIORITY_HIGH);
TaskPool *pool = BLI_task_pool_create_suspended(&list, TASK_PRIORITY_HIGH);
/* Push tasks to the pool. */
for (int i = 0; i < num_nodes; ++i) {
BLI_task_pool_push(pool, concurrent_insert, POINTER_FROM_INT(i), false, NULL);
@@ -112,5 +110,4 @@ TEST(LockfreeLinkList, InsertMultipleConcurrent)
/* Cleanup data. */
BLI_linklist_lockfree_free(&list, MEM_freeN);
BLI_task_pool_free(pool);
BLI_task_scheduler_free(scheduler);
}

View File

@@ -54,7 +54,7 @@ TEST(task, RangeIter)
BLI_task_parallel_range(0, NUM_ITEMS, data, task_range_iter_func, &settings);
/* Those checks should ensure us all items of the listbase were processed once, and only once -
/* Those checks should ensure us all items of the listbase were processed once, and only once
* as expected. */
int expected_sum = 0;