2023-08-16 00:20:26 +10:00
|
|
|
/* SPDX-FileCopyrightText: 2023 Blender Authors
|
2023-05-31 16:19:06 +02:00
|
|
|
*
|
|
|
|
|
* SPDX-License-Identifier: GPL-2.0-or-later */
|
2013-10-12 14:08:59 +00:00
|
|
|
|
2019-02-18 08:08:12 +11:00
|
|
|
/** \file
|
|
|
|
|
* \ingroup bli
|
2014-01-19 23:14:24 +11:00
|
|
|
*
|
2020-04-30 07:59:23 +02:00
|
|
|
* Parallel tasks over all elements in a container.
|
2014-01-19 23:14:24 +11:00
|
|
|
*/
|
|
|
|
|
|
2013-10-12 14:08:59 +00:00
|
|
|
#include <stdlib.h>
|
|
|
|
|
|
|
|
|
|
#include "MEM_guardedalloc.h"
|
|
|
|
|
|
2016-05-13 11:03:04 +02:00
|
|
|
#include "DNA_listBase.h"
|
|
|
|
|
|
2013-10-12 14:08:59 +00:00
|
|
|
#include "BLI_listbase.h"
|
Cleanup: reduce amount of math-related includes
Using ClangBuildAnalyzer on the whole Blender build, it was pointing
out that BLI_math.h is the heaviest "header hub" (i.e. non tiny file
that is included a lot).
However, there's very little (actually zero) source files in Blender
that need "all the math" (base, colors, vectors, matrices,
quaternions, intersection, interpolation, statistics, solvers and
time). A common use case is source files needing just vectors, or
just vectors & matrices, or just colors etc. Actually, 181 files
were including the whole math thing without needing it at all.
This change removes BLI_math.h completely, and instead in all the
places that need it, includes BLI_math_vector.h or BLI_math_color.h
and so on.
Change from that:
- BLI_math_color.h was included 1399 times -> now 408 (took 114.0sec
to parse -> now 36.3sec)
- BLI_simd.h 1403 -> 418 (109.7sec -> 34.9sec).
Full rebuild of Blender (Apple M1, Xcode, RelWithDebInfo) is not
affected much (342sec -> 334sec). Most of benefit would be when
someone's changing BLI_simd.h or BLI_math_color.h or similar files,
that now there's 3x fewer files result in a recompile.
Pull Request #110944
2023-08-09 11:39:20 +03:00
|
|
|
#include "BLI_math_base.h"
|
2017-11-23 21:14:43 +01:00
|
|
|
#include "BLI_mempool.h"
|
2021-06-09 22:49:45 +10:00
|
|
|
#include "BLI_mempool_private.h"
|
2013-10-12 14:08:59 +00:00
|
|
|
#include "BLI_task.h"
|
|
|
|
|
#include "BLI_threads.h"
|
|
|
|
|
|
2014-12-02 15:23:58 +05:00
|
|
|
#include "atomic_ops.h"
|
|
|
|
|
|
2021-06-10 02:01:32 +10:00
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Macros
|
|
|
|
|
* \{ */
|
|
|
|
|
|
2015-11-25 11:01:59 +01:00
|
|
|
/* Allows to avoid using malloc for userdata_chunk in tasks, when small enough. */
|
2022-09-25 15:14:13 +10:00
|
|
|
#define MALLOCA(_size) ((_size) <= 8192) ? alloca(_size) : MEM_mallocN((_size), __func__)
|
2015-11-25 11:01:59 +01:00
|
|
|
#define MALLOCA_FREE(_mem, _size) \
|
2020-09-19 16:01:32 +10:00
|
|
|
if (((_mem) != NULL) && ((_size) > 8192)) { \
|
2022-09-25 15:14:13 +10:00
|
|
|
MEM_freeN(_mem); \
|
2020-09-19 16:01:32 +10:00
|
|
|
} \
|
|
|
|
|
((void)0)
|
2015-11-25 11:01:59 +01:00
|
|
|
|
2021-06-10 02:01:32 +10:00
|
|
|
/** \} */
|
|
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Generic Iteration
|
|
|
|
|
* \{ */
|
|
|
|
|
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
BLI_INLINE void task_parallel_calc_chunk_size(const TaskParallelSettings *settings,
|
2022-03-30 17:26:42 +11:00
|
|
|
const int items_num,
|
|
|
|
|
int tasks_num,
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
int *r_chunk_size)
|
2019-07-30 14:36:59 +02:00
|
|
|
{
|
|
|
|
|
int chunk_size = 0;
|
|
|
|
|
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
if (!settings->use_threading) {
|
|
|
|
|
/* Some users of this helper will still need a valid chunk size in case processing is not
|
|
|
|
|
* threaded. We can use a bigger one than in default threaded case then. */
|
|
|
|
|
chunk_size = 1024;
|
2022-03-30 17:26:42 +11:00
|
|
|
tasks_num = 1;
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
}
|
|
|
|
|
else if (settings->min_iter_per_thread > 0) {
|
2019-07-30 14:36:59 +02:00
|
|
|
/* Already set by user, no need to do anything here. */
|
|
|
|
|
chunk_size = settings->min_iter_per_thread;
|
|
|
|
|
}
|
|
|
|
|
else {
|
2019-09-18 17:35:00 +02:00
|
|
|
/* Multiplier used in heuristics below to define "optimal" chunk size.
|
|
|
|
|
* The idea here is to increase the chunk size to compensate for a rather measurable threading
|
2019-07-30 14:36:59 +02:00
|
|
|
* overhead caused by fetching tasks. With too many CPU threads we are starting
|
2019-09-18 17:35:00 +02:00
|
|
|
* to spend too much time in those overheads.
|
2022-03-30 17:26:42 +11:00
|
|
|
* First values are: 1 if tasks_num < 16;
|
|
|
|
|
* else 2 if tasks_num < 32;
|
|
|
|
|
* else 3 if tasks_num < 48;
|
|
|
|
|
* else 4 if tasks_num < 64;
|
2019-09-18 17:35:00 +02:00
|
|
|
* etc.
|
2021-07-03 23:08:40 +10:00
|
|
|
* NOTE: If we wanted to keep the 'power of two' multiplier, we'd need something like:
|
2022-03-30 17:26:42 +11:00
|
|
|
* 1 << max_ii(0, (int)(sizeof(int) * 8) - 1 - bitscan_reverse_i(tasks_num) - 3)
|
2019-09-18 17:35:00 +02:00
|
|
|
*/
|
2022-03-30 17:26:42 +11:00
|
|
|
const int tasks_num_factor = max_ii(1, tasks_num >> 3);
|
2019-09-18 17:35:00 +02:00
|
|
|
|
|
|
|
|
/* We could make that 'base' 32 number configurable in TaskParallelSettings too, or maybe just
|
|
|
|
|
* always use that heuristic using TaskParallelSettings.min_iter_per_thread as basis? */
|
2022-03-30 17:26:42 +11:00
|
|
|
chunk_size = 32 * tasks_num_factor;
|
2019-09-18 17:35:00 +02:00
|
|
|
|
|
|
|
|
/* Basic heuristic to avoid threading on low amount of items.
|
|
|
|
|
* We could make that limit configurable in settings too. */
|
2022-03-30 17:26:42 +11:00
|
|
|
if (items_num > 0 && items_num < max_ii(256, chunk_size * 2)) {
|
|
|
|
|
chunk_size = items_num;
|
2019-07-30 14:36:59 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BLI_assert(chunk_size > 0);
|
2020-04-30 07:59:23 +02:00
|
|
|
*r_chunk_size = chunk_size;
|
BLI_task: Add pooled threaded index range iterator, Take II.
This code allows to push a set of different operations all based on
iterations over a range of indices, and then process them all at once
over multiple threads.
This commit also adds unit tests for both old un-pooled, and new pooled
task_parallel_range family of functions, as well as some basic
performances tests.
This is mainly interesting for relatively low amount of individual
tasks, as expected.
E.g. performance tests on a 32 threads machine, for a set of 10
different tasks, shows following improvements when using pooled version
instead of ten sequential calls to BLI_task_parallel_range():
| Num Items | Sequential | Pooled | Speed-up |
| --------- | ---------- | ------- | -------- |
| 10K | 365 us | 138 us | 2.5 x |
| 100K | 877 us | 530 us | 1.66 x |
| 1000K | 5521 us | 4625 us | 1.25 x |
Differential Revision: https://developer.blender.org/D6189
Note: Compared to previous commit yesterday, this reworks atomic handling in
parallel iter code, and fixes a dummy double-free bug.
Now we should only use the two critical values for synchronization from
atomic calls results, which is the proper way to do things.
Reading a value after an atomic operation does not guarantee you will
get the latest value in all cases (especially on Windows release builds
it seems).
2019-11-26 14:26:47 +01:00
|
|
|
}
|
|
|
|
|
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
typedef struct TaskParallelIteratorState {
|
2016-05-13 11:03:04 +02:00
|
|
|
void *userdata;
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
TaskParallelIteratorIterFunc iter_func;
|
|
|
|
|
TaskParallelIteratorFunc func;
|
|
|
|
|
|
|
|
|
|
/* *** Data used to 'acquire' chunks of items from the iterator. *** */
|
|
|
|
|
/* Common data also passed to the generator callback. */
|
|
|
|
|
TaskParallelIteratorStateShared iter_shared;
|
|
|
|
|
/* Total number of items. If unknown, set it to a negative number. */
|
2022-03-30 17:26:42 +11:00
|
|
|
int items_num;
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
} TaskParallelIteratorState;
|
|
|
|
|
|
|
|
|
|
static void parallel_iterator_func_do(TaskParallelIteratorState *__restrict state,
|
2020-04-30 07:59:23 +02:00
|
|
|
void *userdata_chunk)
|
2016-05-13 11:03:04 +02:00
|
|
|
{
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
TaskParallelTLS tls = {
|
|
|
|
|
.userdata_chunk = userdata_chunk,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
void **current_chunk_items;
|
|
|
|
|
int *current_chunk_indices;
|
|
|
|
|
int current_chunk_size;
|
|
|
|
|
|
|
|
|
|
const size_t items_size = sizeof(*current_chunk_items) * (size_t)state->iter_shared.chunk_size;
|
|
|
|
|
const size_t indices_size = sizeof(*current_chunk_indices) *
|
|
|
|
|
(size_t)state->iter_shared.chunk_size;
|
|
|
|
|
|
|
|
|
|
current_chunk_items = MALLOCA(items_size);
|
|
|
|
|
current_chunk_indices = MALLOCA(indices_size);
|
|
|
|
|
current_chunk_size = 0;
|
|
|
|
|
|
|
|
|
|
for (bool do_abort = false; !do_abort;) {
|
|
|
|
|
if (state->iter_shared.spin_lock != NULL) {
|
|
|
|
|
BLI_spin_lock(state->iter_shared.spin_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Get current status. */
|
|
|
|
|
int index = state->iter_shared.next_index;
|
|
|
|
|
void *item = state->iter_shared.next_item;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
/* 'Acquire' a chunk of items from the iterator function. */
|
|
|
|
|
for (i = 0; i < state->iter_shared.chunk_size && !state->iter_shared.is_finished; i++) {
|
|
|
|
|
current_chunk_indices[i] = index;
|
|
|
|
|
current_chunk_items[i] = item;
|
|
|
|
|
state->iter_func(state->userdata, &tls, &item, &index, &state->iter_shared.is_finished);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Update current status. */
|
|
|
|
|
state->iter_shared.next_index = index;
|
|
|
|
|
state->iter_shared.next_item = item;
|
|
|
|
|
current_chunk_size = i;
|
|
|
|
|
|
|
|
|
|
do_abort = state->iter_shared.is_finished;
|
|
|
|
|
|
|
|
|
|
if (state->iter_shared.spin_lock != NULL) {
|
|
|
|
|
BLI_spin_unlock(state->iter_shared.spin_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < current_chunk_size; ++i) {
|
|
|
|
|
state->func(state->userdata, current_chunk_items[i], current_chunk_indices[i], &tls);
|
2016-05-13 11:03:04 +02:00
|
|
|
}
|
|
|
|
|
}
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
|
|
|
|
|
MALLOCA_FREE(current_chunk_items, items_size);
|
|
|
|
|
MALLOCA_FREE(current_chunk_indices, indices_size);
|
2016-05-13 11:03:04 +02:00
|
|
|
}
|
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static void parallel_iterator_func(TaskPool *__restrict pool, void *userdata_chunk)
|
2016-05-13 11:03:04 +02:00
|
|
|
{
|
2020-04-21 15:36:35 +02:00
|
|
|
TaskParallelIteratorState *__restrict state = BLI_task_pool_user_data(pool);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
parallel_iterator_func_do(state, userdata_chunk);
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void task_parallel_iterator_no_threads(const TaskParallelSettings *settings,
|
|
|
|
|
TaskParallelIteratorState *state)
|
|
|
|
|
{
|
|
|
|
|
/* Prepare user's TLS data. */
|
|
|
|
|
void *userdata_chunk = settings->userdata_chunk;
|
2021-07-29 10:35:12 -03:00
|
|
|
if (userdata_chunk) {
|
2021-07-15 14:43:25 +10:00
|
|
|
if (settings->func_init != NULL) {
|
2021-07-29 10:35:12 -03:00
|
|
|
settings->func_init(state->userdata, userdata_chunk);
|
2021-07-15 14:43:25 +10:00
|
|
|
}
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Also marking it as non-threaded for the iterator callback. */
|
|
|
|
|
state->iter_shared.spin_lock = NULL;
|
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
parallel_iterator_func_do(state, userdata_chunk);
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
|
2021-07-29 10:35:12 -03:00
|
|
|
if (userdata_chunk) {
|
2021-06-09 20:17:03 +10:00
|
|
|
if (settings->func_free != NULL) {
|
|
|
|
|
/* `func_free` should only free data that was created during execution of `func`. */
|
2021-07-29 10:35:12 -03:00
|
|
|
settings->func_free(state->userdata, userdata_chunk);
|
2021-06-09 20:17:03 +10:00
|
|
|
}
|
2016-05-13 11:03:04 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
static void task_parallel_iterator_do(const TaskParallelSettings *settings,
|
|
|
|
|
TaskParallelIteratorState *state)
|
2018-11-20 12:17:03 +01:00
|
|
|
{
|
2022-03-30 17:26:42 +11:00
|
|
|
const int threads_num = BLI_task_scheduler_num_threads();
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
task_parallel_calc_chunk_size(
|
2022-03-30 17:26:42 +11:00
|
|
|
settings, state->items_num, threads_num, &state->iter_shared.chunk_size);
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
|
|
|
|
|
if (!settings->use_threading) {
|
|
|
|
|
task_parallel_iterator_no_threads(settings, state);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const int chunk_size = state->iter_shared.chunk_size;
|
2022-03-30 17:26:42 +11:00
|
|
|
const int items_num = state->items_num;
|
|
|
|
|
const size_t tasks_num = items_num >= 0 ?
|
|
|
|
|
(size_t)min_ii(threads_num, state->items_num / chunk_size) :
|
|
|
|
|
(size_t)threads_num;
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
|
2022-03-30 17:26:42 +11:00
|
|
|
BLI_assert(tasks_num > 0);
|
|
|
|
|
if (tasks_num == 1) {
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
task_parallel_iterator_no_threads(settings, state);
|
|
|
|
|
return;
|
2018-11-20 12:17:03 +01:00
|
|
|
}
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
|
|
|
|
|
SpinLock spin_lock;
|
|
|
|
|
BLI_spin_init(&spin_lock);
|
|
|
|
|
state->iter_shared.spin_lock = &spin_lock;
|
|
|
|
|
|
|
|
|
|
void *userdata_chunk = settings->userdata_chunk;
|
|
|
|
|
const size_t userdata_chunk_size = settings->userdata_chunk_size;
|
|
|
|
|
void *userdata_chunk_local = NULL;
|
|
|
|
|
void *userdata_chunk_array = NULL;
|
|
|
|
|
const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL);
|
|
|
|
|
|
2021-06-14 23:50:24 +10:00
|
|
|
TaskPool *task_pool = BLI_task_pool_create(state, TASK_PRIORITY_HIGH);
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
|
|
|
|
|
if (use_userdata_chunk) {
|
2022-03-30 17:26:42 +11:00
|
|
|
userdata_chunk_array = MALLOCA(userdata_chunk_size * tasks_num);
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
}
|
|
|
|
|
|
2022-03-30 17:26:42 +11:00
|
|
|
for (size_t i = 0; i < tasks_num; i++) {
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
if (use_userdata_chunk) {
|
|
|
|
|
userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
|
|
|
|
|
memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
|
2021-07-15 14:43:25 +10:00
|
|
|
if (settings->func_init != NULL) {
|
|
|
|
|
settings->func_init(state->userdata, userdata_chunk_local);
|
|
|
|
|
}
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
}
|
|
|
|
|
/* Use this pool's pre-allocated tasks. */
|
2020-04-30 07:59:23 +02:00
|
|
|
BLI_task_pool_push(task_pool, parallel_iterator_func, userdata_chunk_local, false, NULL);
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BLI_task_pool_work_and_wait(task_pool);
|
|
|
|
|
BLI_task_pool_free(task_pool);
|
|
|
|
|
|
2021-06-09 20:17:03 +10:00
|
|
|
if (use_userdata_chunk) {
|
|
|
|
|
if (settings->func_reduce != NULL || settings->func_free != NULL) {
|
2022-03-30 17:26:42 +11:00
|
|
|
for (size_t i = 0; i < tasks_num; i++) {
|
2021-06-09 20:17:03 +10:00
|
|
|
userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
|
|
|
|
|
if (settings->func_reduce != NULL) {
|
|
|
|
|
settings->func_reduce(state->userdata, userdata_chunk, userdata_chunk_local);
|
|
|
|
|
}
|
|
|
|
|
if (settings->func_free != NULL) {
|
|
|
|
|
settings->func_free(state->userdata, userdata_chunk_local);
|
|
|
|
|
}
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
}
|
|
|
|
|
}
|
2022-03-30 17:26:42 +11:00
|
|
|
MALLOCA_FREE(userdata_chunk_array, userdata_chunk_size * tasks_num);
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BLI_spin_end(&spin_lock);
|
|
|
|
|
state->iter_shared.spin_lock = NULL;
|
2018-11-20 12:17:03 +01:00
|
|
|
}
|
|
|
|
|
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
void BLI_task_parallel_iterator(void *userdata,
|
|
|
|
|
TaskParallelIteratorIterFunc iter_func,
|
|
|
|
|
void *init_item,
|
|
|
|
|
const int init_index,
|
2022-03-30 17:26:42 +11:00
|
|
|
const int items_num,
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
TaskParallelIteratorFunc func,
|
|
|
|
|
const TaskParallelSettings *settings)
|
2018-11-20 12:17:03 +01:00
|
|
|
{
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
TaskParallelIteratorState state = {0};
|
|
|
|
|
|
2022-03-30 17:26:42 +11:00
|
|
|
state.items_num = items_num;
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
state.iter_shared.next_index = init_index;
|
|
|
|
|
state.iter_shared.next_item = init_item;
|
|
|
|
|
state.iter_shared.is_finished = false;
|
|
|
|
|
state.userdata = userdata;
|
|
|
|
|
state.iter_func = iter_func;
|
|
|
|
|
state.func = func;
|
|
|
|
|
|
|
|
|
|
task_parallel_iterator_do(settings, &state);
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-10 02:01:32 +10:00
|
|
|
/** \} */
|
|
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name ListBase Iteration
|
|
|
|
|
* \{ */
|
|
|
|
|
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
static void task_parallel_listbase_get(void *__restrict UNUSED(userdata),
|
|
|
|
|
const TaskParallelTLS *__restrict UNUSED(tls),
|
|
|
|
|
void **r_next_item,
|
|
|
|
|
int *r_next_index,
|
|
|
|
|
bool *r_do_abort)
|
|
|
|
|
{
|
|
|
|
|
/* Get current status. */
|
|
|
|
|
Link *link = *r_next_item;
|
|
|
|
|
|
|
|
|
|
if (link->next == NULL) {
|
|
|
|
|
*r_do_abort = true;
|
2018-11-20 12:17:03 +01:00
|
|
|
}
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
*r_next_item = link->next;
|
|
|
|
|
(*r_next_index)++;
|
2018-11-20 12:17:03 +01:00
|
|
|
}
|
|
|
|
|
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
void BLI_task_parallel_listbase(ListBase *listbase,
|
2016-05-13 11:03:04 +02:00
|
|
|
void *userdata,
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
TaskParallelIteratorFunc func,
|
|
|
|
|
const TaskParallelSettings *settings)
|
2016-05-13 11:03:04 +02:00
|
|
|
{
|
|
|
|
|
if (BLI_listbase_is_empty(listbase)) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
TaskParallelIteratorState state = {0};
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2022-03-30 17:26:42 +11:00
|
|
|
state.items_num = BLI_listbase_count(listbase);
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
state.iter_shared.next_index = 0;
|
|
|
|
|
state.iter_shared.next_item = listbase->first;
|
|
|
|
|
state.iter_shared.is_finished = false;
|
2016-05-13 11:03:04 +02:00
|
|
|
state.userdata = userdata;
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
state.iter_func = task_parallel_listbase_get;
|
2016-05-13 11:03:04 +02:00
|
|
|
state.func = func;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
task_parallel_iterator_do(settings, &state);
|
2016-05-13 11:03:04 +02:00
|
|
|
}
|
2017-11-23 21:14:43 +01:00
|
|
|
|
2021-06-10 02:01:32 +10:00
|
|
|
/** \} */
|
|
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name MemPool Iteration
|
|
|
|
|
* \{ */
|
|
|
|
|
|
2017-11-23 21:14:43 +01:00
|
|
|
typedef struct ParallelMempoolState {
|
|
|
|
|
void *userdata;
|
|
|
|
|
TaskParallelMempoolFunc func;
|
|
|
|
|
} ParallelMempoolState;
|
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static void parallel_mempool_func(TaskPool *__restrict pool, void *taskdata)
|
2017-11-23 21:14:43 +01:00
|
|
|
{
|
2020-04-21 15:36:35 +02:00
|
|
|
ParallelMempoolState *__restrict state = BLI_task_pool_user_data(pool);
|
2021-06-10 18:16:17 +10:00
|
|
|
BLI_mempool_threadsafe_iter *iter = &((ParallelMempoolTaskData *)taskdata)->ts_iter;
|
2021-06-09 22:49:45 +10:00
|
|
|
TaskParallelTLS *tls = &((ParallelMempoolTaskData *)taskdata)->tls;
|
2017-11-23 21:14:43 +01:00
|
|
|
|
2021-06-09 22:49:45 +10:00
|
|
|
MempoolIterData *item;
|
2021-06-10 18:16:17 +10:00
|
|
|
while ((item = mempool_iter_threadsafe_step(iter)) != NULL) {
|
2021-06-09 22:49:45 +10:00
|
|
|
state->func(state->userdata, item, tls);
|
2017-11-23 21:14:43 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void BLI_task_parallel_mempool(BLI_mempool *mempool,
|
|
|
|
|
void *userdata,
|
|
|
|
|
TaskParallelMempoolFunc func,
|
2021-06-09 22:49:45 +10:00
|
|
|
const TaskParallelSettings *settings)
|
2017-11-23 21:14:43 +01:00
|
|
|
{
|
2021-07-16 14:38:33 +10:00
|
|
|
if (UNLIKELY(BLI_mempool_len(mempool) == 0)) {
|
2017-11-23 21:14:43 +01:00
|
|
|
return;
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2021-06-09 22:49:45 +10:00
|
|
|
void *userdata_chunk = settings->userdata_chunk;
|
|
|
|
|
const size_t userdata_chunk_size = settings->userdata_chunk_size;
|
|
|
|
|
void *userdata_chunk_array = NULL;
|
|
|
|
|
const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL);
|
|
|
|
|
|
|
|
|
|
if (!settings->use_threading) {
|
|
|
|
|
TaskParallelTLS tls = {NULL};
|
|
|
|
|
if (use_userdata_chunk) {
|
2021-07-15 14:43:25 +10:00
|
|
|
if (settings->func_init != NULL) {
|
2021-07-29 10:35:12 -03:00
|
|
|
settings->func_init(userdata, userdata_chunk);
|
2021-07-15 14:43:25 +10:00
|
|
|
}
|
2021-07-29 10:35:12 -03:00
|
|
|
tls.userdata_chunk = userdata_chunk;
|
2021-06-09 22:49:45 +10:00
|
|
|
}
|
|
|
|
|
|
2017-11-23 21:14:43 +01:00
|
|
|
BLI_mempool_iter iter;
|
|
|
|
|
BLI_mempool_iternew(mempool, &iter);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2021-06-09 22:49:45 +10:00
|
|
|
void *item;
|
|
|
|
|
while ((item = BLI_mempool_iterstep(&iter))) {
|
|
|
|
|
func(userdata, item, &tls);
|
2017-11-23 21:14:43 +01:00
|
|
|
}
|
2021-06-09 22:49:45 +10:00
|
|
|
|
2021-07-29 10:35:12 -03:00
|
|
|
if (use_userdata_chunk) {
|
|
|
|
|
if (settings->func_free != NULL) {
|
|
|
|
|
/* `func_free` should only free data that was created during execution of `func`. */
|
|
|
|
|
settings->func_free(userdata, userdata_chunk);
|
|
|
|
|
}
|
2021-06-09 22:49:45 +10:00
|
|
|
}
|
|
|
|
|
|
2017-11-23 21:14:43 +01:00
|
|
|
return;
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2021-07-16 14:38:33 +10:00
|
|
|
ParallelMempoolState state;
|
|
|
|
|
TaskPool *task_pool = BLI_task_pool_create(&state, TASK_PRIORITY_HIGH);
|
2022-03-30 17:26:42 +11:00
|
|
|
const int threads_num = BLI_task_scheduler_num_threads();
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2017-11-23 21:14:43 +01:00
|
|
|
/* The idea here is to prevent creating task for each of the loop iterations
|
|
|
|
|
* and instead have tasks which are evenly distributed across CPU cores and
|
|
|
|
|
* pull next item to be crunched using the threaded-aware BLI_mempool_iter.
|
|
|
|
|
*/
|
2022-03-30 17:26:42 +11:00
|
|
|
const int tasks_num = threads_num + 2;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2017-11-23 21:14:43 +01:00
|
|
|
state.userdata = userdata;
|
|
|
|
|
state.func = func;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2021-06-09 22:49:45 +10:00
|
|
|
if (use_userdata_chunk) {
|
2022-03-30 17:26:42 +11:00
|
|
|
userdata_chunk_array = MALLOCA(userdata_chunk_size * tasks_num);
|
2021-06-09 22:49:45 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ParallelMempoolTaskData *mempool_iterator_data = mempool_iter_threadsafe_create(
|
2022-03-30 17:26:42 +11:00
|
|
|
mempool, (size_t)tasks_num);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2022-03-30 17:26:42 +11:00
|
|
|
for (int i = 0; i < tasks_num; i++) {
|
2021-07-29 10:35:12 -03:00
|
|
|
void *userdata_chunk_local = NULL;
|
2021-06-09 22:49:45 +10:00
|
|
|
if (use_userdata_chunk) {
|
|
|
|
|
userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
|
|
|
|
|
memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
|
2021-07-15 14:43:25 +10:00
|
|
|
if (settings->func_init != NULL) {
|
|
|
|
|
settings->func_init(userdata, userdata_chunk_local);
|
|
|
|
|
}
|
2021-06-09 22:49:45 +10:00
|
|
|
}
|
|
|
|
|
mempool_iterator_data[i].tls.userdata_chunk = userdata_chunk_local;
|
|
|
|
|
|
2017-11-23 21:14:43 +01:00
|
|
|
/* Use this pool's pre-allocated tasks. */
|
2021-06-09 22:49:45 +10:00
|
|
|
BLI_task_pool_push(task_pool, parallel_mempool_func, &mempool_iterator_data[i], false, NULL);
|
2017-11-23 21:14:43 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2017-11-23 21:14:43 +01:00
|
|
|
BLI_task_pool_work_and_wait(task_pool);
|
|
|
|
|
BLI_task_pool_free(task_pool);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2021-06-09 22:49:45 +10:00
|
|
|
if (use_userdata_chunk) {
|
|
|
|
|
if ((settings->func_free != NULL) || (settings->func_reduce != NULL)) {
|
2022-03-30 17:26:42 +11:00
|
|
|
for (int i = 0; i < tasks_num; i++) {
|
2021-06-09 22:49:45 +10:00
|
|
|
if (settings->func_reduce) {
|
|
|
|
|
settings->func_reduce(
|
|
|
|
|
userdata, userdata_chunk, mempool_iterator_data[i].tls.userdata_chunk);
|
|
|
|
|
}
|
|
|
|
|
if (settings->func_free) {
|
|
|
|
|
settings->func_free(userdata, mempool_iterator_data[i].tls.userdata_chunk);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2022-03-30 17:26:42 +11:00
|
|
|
MALLOCA_FREE(userdata_chunk_array, userdata_chunk_size * tasks_num);
|
2021-06-09 22:49:45 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
mempool_iter_threadsafe_destroy(mempool_iterator_data);
|
2017-11-23 21:14:43 +01:00
|
|
|
}
|
2021-06-09 22:49:45 +10:00
|
|
|
|
|
|
|
|
#undef MALLOCA
|
|
|
|
|
#undef MALLOCA_FREE
|
2021-06-10 02:01:32 +10:00
|
|
|
|
|
|
|
|
/** \} */
|