Files
test/source/blender/blenlib/tests/BLI_task_test.cc
Campbell Barton e955c94ed3 License Headers: Set copyright to "Blender Authors", add AUTHORS
Listing the "Blender Foundation" as copyright holder implied the Blender
Foundation holds copyright to files which may include work from many
developers.

While keeping copyright on headers makes sense for isolated libraries,
Blender's own code may be refactored or moved between files in a way
that makes the per file copyright holders less meaningful.

Copyright references to the "Blender Foundation" have been replaced with
"Blender Authors", with the exception of `./extern/` since these this
contains libraries which are more isolated, any changed to license
headers there can be handled on a case-by-case basis.

Some directories in `./intern/` have also been excluded:

- `./intern/cycles/` it's own `AUTHORS` file is planned.
- `./intern/opensubdiv/`.

An "AUTHORS" file has been added, using the chromium projects authors
file as a template.

Design task: #110784

Ref !110783.
2023-08-16 00:20:26 +10:00

297 lines
8.2 KiB
C++

/* SPDX-FileCopyrightText: 2023 Blender Authors
*
* SPDX-License-Identifier: Apache-2.0 */
#include "testing/testing.h"
#include <atomic>
#include <cstring>
#include "atomic_ops.h"
#include "MEM_guardedalloc.h"
#include "BLI_utildefines.h"
#include "BLI_listbase.h"
#include "BLI_mempool.h"
#include "BLI_task.h"
#include "BLI_task.hh"
#define ITEMS_NUM 10000
/* *** Parallel iterations over range of integer values. *** */
static void task_range_iter_func(void *userdata, int index, const TaskParallelTLS *__restrict tls)
{
int *data = (int *)userdata;
data[index] = index;
*((int *)tls->userdata_chunk) += index;
// printf("%d, %d, %d\n", index, data[index], *((int *)tls->userdata_chunk));
}
static void task_range_iter_reduce_func(const void *__restrict /*userdata*/,
void *__restrict join_v,
void *__restrict userdata_chunk)
{
int *join = (int *)join_v;
int *chunk = (int *)userdata_chunk;
*join += *chunk;
// printf("%d, %d\n", data[ITEMS_NUM], *((int *)userdata_chunk));
}
TEST(task, RangeIter)
{
int data[ITEMS_NUM] = {0};
int sum = 0;
BLI_threadapi_init();
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.min_iter_per_thread = 1;
settings.userdata_chunk = &sum;
settings.userdata_chunk_size = sizeof(sum);
settings.func_reduce = task_range_iter_reduce_func;
BLI_task_parallel_range(0, ITEMS_NUM, data, task_range_iter_func, &settings);
/* Those checks should ensure us all items of the listbase were processed once, and only once
* as expected. */
int expected_sum = 0;
for (int i = 0; i < ITEMS_NUM; i++) {
EXPECT_EQ(data[i], i);
expected_sum += i;
}
EXPECT_EQ(sum, expected_sum);
BLI_threadapi_exit();
}
/* *** Parallel iterations over mempool items. *** */
static void task_mempool_iter_func(void *userdata,
MempoolIterData *item,
const TaskParallelTLS *__restrict /*tls*/)
{
int *data = (int *)item;
int *count = (int *)userdata;
EXPECT_TRUE(data != nullptr);
*data += 1;
atomic_sub_and_fetch_uint32((uint32_t *)count, 1);
}
TEST(task, MempoolIter)
{
int *data[ITEMS_NUM];
BLI_threadapi_init();
BLI_mempool *mempool = BLI_mempool_create(
sizeof(*data[0]), ITEMS_NUM, 32, BLI_MEMPOOL_ALLOW_ITER);
int i;
/* 'Randomly' add and remove some items from mempool, to create a non-homogeneous one. */
int items_num = 0;
for (i = 0; i < ITEMS_NUM; i++) {
data[i] = (int *)BLI_mempool_alloc(mempool);
*data[i] = i - 1;
items_num++;
}
for (i = 0; i < ITEMS_NUM; i += 3) {
BLI_mempool_free(mempool, data[i]);
data[i] = nullptr;
items_num--;
}
for (i = 0; i < ITEMS_NUM; i += 7) {
if (data[i] == nullptr) {
data[i] = (int *)BLI_mempool_alloc(mempool);
*data[i] = i - 1;
items_num++;
}
}
for (i = 0; i < ITEMS_NUM - 5; i += 23) {
for (int j = 0; j < 5; j++) {
if (data[i + j] != nullptr) {
BLI_mempool_free(mempool, data[i + j]);
data[i + j] = nullptr;
items_num--;
}
}
}
TaskParallelSettings settings;
BLI_parallel_mempool_settings_defaults(&settings);
BLI_task_parallel_mempool(mempool, &items_num, task_mempool_iter_func, &settings);
/* Those checks should ensure us all items of the mempool were processed once, and only once - as
* expected. */
EXPECT_EQ(items_num, 0);
for (i = 0; i < ITEMS_NUM; i++) {
if (data[i] != nullptr) {
EXPECT_EQ(*data[i], i);
}
}
BLI_mempool_destroy(mempool);
BLI_threadapi_exit();
}
/* *** Parallel iterations over mempool items with TLS. *** */
using TaskMemPool_Chunk = struct TaskMemPool_Chunk {
ListBase *accumulate_items;
};
static void task_mempool_iter_tls_func(void * /*userdata*/,
MempoolIterData *item,
const TaskParallelTLS *__restrict tls)
{
TaskMemPool_Chunk *task_data = (TaskMemPool_Chunk *)tls->userdata_chunk;
int *data = (int *)item;
EXPECT_TRUE(data != nullptr);
if (task_data->accumulate_items == nullptr) {
task_data->accumulate_items = MEM_cnew<ListBase>(__func__);
}
/* Flip to prove this has been touched. */
*data = -*data;
BLI_addtail(task_data->accumulate_items, BLI_genericNodeN(data));
}
static void task_mempool_iter_tls_reduce(const void *__restrict /*userdata*/,
void *__restrict chunk_join,
void *__restrict chunk)
{
TaskMemPool_Chunk *join_chunk = (TaskMemPool_Chunk *)chunk_join;
TaskMemPool_Chunk *data_chunk = (TaskMemPool_Chunk *)chunk;
if (data_chunk->accumulate_items != nullptr) {
if (join_chunk->accumulate_items == nullptr) {
join_chunk->accumulate_items = MEM_cnew<ListBase>(__func__);
}
BLI_movelisttolist(join_chunk->accumulate_items, data_chunk->accumulate_items);
}
}
static void task_mempool_iter_tls_free(const void * /*userdata*/, void *__restrict userdata_chunk)
{
TaskMemPool_Chunk *task_data = (TaskMemPool_Chunk *)userdata_chunk;
MEM_freeN(task_data->accumulate_items);
}
TEST(task, MempoolIterTLS)
{
int *data[ITEMS_NUM];
BLI_threadapi_init();
BLI_mempool *mempool = BLI_mempool_create(
sizeof(*data[0]), ITEMS_NUM, 32, BLI_MEMPOOL_ALLOW_ITER);
int i;
/* Add numbers negative `1..ITEMS_NUM` inclusive. */
for (i = 0; i < ITEMS_NUM; i++) {
data[i] = (int *)BLI_mempool_alloc(mempool);
*data[i] = -(i + 1);
}
TaskParallelSettings settings;
BLI_parallel_mempool_settings_defaults(&settings);
TaskMemPool_Chunk tls_data;
tls_data.accumulate_items = nullptr;
settings.userdata_chunk = &tls_data;
settings.userdata_chunk_size = sizeof(tls_data);
settings.func_free = task_mempool_iter_tls_free;
settings.func_reduce = task_mempool_iter_tls_reduce;
BLI_task_parallel_mempool(mempool, nullptr, task_mempool_iter_tls_func, &settings);
EXPECT_EQ(BLI_listbase_count(tls_data.accumulate_items), ITEMS_NUM);
/* Check that all elements are added into the list once. */
int number_accum = 0;
LISTBASE_FOREACH (LinkData *, link, tls_data.accumulate_items) {
int *data = (int *)link->data;
number_accum += *data;
}
EXPECT_EQ(number_accum, (ITEMS_NUM * (ITEMS_NUM + 1)) / 2);
BLI_freelistN(tls_data.accumulate_items);
MEM_freeN(tls_data.accumulate_items);
BLI_mempool_destroy(mempool);
BLI_threadapi_exit();
}
/* *** Parallel iterations over double-linked list items. *** */
static void task_listbase_iter_func(void *userdata,
void *item,
int index,
const TaskParallelTLS *__restrict /*tls*/)
{
LinkData *data = (LinkData *)item;
int *count = (int *)userdata;
data->data = POINTER_FROM_INT(POINTER_AS_INT(data->data) + index);
atomic_sub_and_fetch_uint32((uint32_t *)count, 1);
}
TEST(task, ListBaseIter)
{
ListBase list = {nullptr, nullptr};
LinkData *items_buffer = (LinkData *)MEM_calloc_arrayN(
ITEMS_NUM, sizeof(*items_buffer), __func__);
BLI_threadapi_init();
int i;
int items_num = 0;
for (i = 0; i < ITEMS_NUM; i++) {
BLI_addtail(&list, &items_buffer[i]);
items_num++;
}
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
BLI_task_parallel_listbase(&list, &items_num, task_listbase_iter_func, &settings);
/* Those checks should ensure us all items of the listbase were processed once, and only once -
* as expected. */
EXPECT_EQ(items_num, 0);
LinkData *item;
for (i = 0, item = (LinkData *)list.first; i < ITEMS_NUM && item != nullptr;
i++, item = item->next)
{
EXPECT_EQ(POINTER_AS_INT(item->data), i);
}
EXPECT_EQ(ITEMS_NUM, i);
MEM_freeN(items_buffer);
BLI_threadapi_exit();
}
TEST(task, ParallelInvoke)
{
std::atomic<int> counter = 0;
blender::threading::parallel_invoke([&]() { counter++; },
[&]() { counter++; },
[&]() { counter++; },
[&]() { counter++; },
[&]() { counter++; },
[&]() { counter++; });
EXPECT_EQ(counter, 6);
}