Replace stupid static balancing with task-based one
Initially i wanted to have some really simple and basic threading scheduler and wrote one based on traversing depsgraph in advance. But it ended up in some issues with the single-pass traverse i did which didn't gather all the dependencies actually. That was for sure solvable, but it ended up in a bit of time consuming thing and with huge help of Brecht's patch it was faster just to write proper balancing. But it's again really basic thing, which could be easily changed depending on feedback and design decisions from Joshua, So for now it works in the following way: - Currently DagNode is used for threaded evaluaiton, meaning traversing actually happens for DagNodes. This is easier than converting DAG to a graph where only objects are stored, but required adding one int field to DagNode for faster runtime checks. We could change this later when it'll be clear how and where we'll store evaluation data, but for now it work pretty ok. - The new field is called "valency" and it's basically number of node parents which needs to be evaluated before the node itself could be evaluated. - Nodes' valency is getting initialized before threading, and when node finished to update valency of it's childs is getting decreased by one. And if it happens so child's valency became zero, it's adding to task pool. - There's thread lock around valency update, it'll be replaced with spinlock in nearest future. - Another update runtime data is node color. White nodes represents objects, gray one non-objects. Currently it's needed to distinguish whether we need to call object_handle_update on node->ob or not. In the future it could be replaced with node->type to support granularity, meaning we then could update object data separately from object itself. - Needed to add some public depsgraph functions to make it possible to traverse depsgraph without including depsgraph private header to other files. This change doesn't make code anyhow more stable, but solves update order issues noticed while working on fixing underlying bugs. Threaded update is still ifdef-ed for until curves and armatures are considered thread-safe, which is next step to be done.
This commit is contained in:
@@ -116,58 +116,28 @@ void DAG_pose_sort(struct Object *ob);
|
||||
void DAG_editors_update_cb(void (*id_func)(struct Main *bmain, struct ID *id),
|
||||
void (*scene_func)(struct Main *bmain, struct Scene *scene, int updated));
|
||||
|
||||
/* Threaded update: get groups of independent bases
|
||||
*
|
||||
* DAG_get_independent_groups goes over dependency graph and collects
|
||||
* groups of bases in a way that there's no dependencies between this
|
||||
* groups at all.
|
||||
*
|
||||
* Result is stored in a list called groups. This is a sliced list,
|
||||
* which means every element of list groups is a LinkData which data
|
||||
* represents list of bases in that group.
|
||||
*
|
||||
* List of bases uses LinkData as well, this is so because bases are
|
||||
* used from actual scene.
|
||||
*
|
||||
* Here's an example of groups storage. There're two groups, one of
|
||||
* them consists of two bases: base1 and base2, and base2 depends on
|
||||
* base1. Second group contains base3 which doesn't depend on base1
|
||||
* and base2.
|
||||
*
|
||||
* groups
|
||||
* |
|
||||
* +- LinkData
|
||||
* | |
|
||||
* | +- BasesList
|
||||
* | |
|
||||
* | +- LinkData
|
||||
* | | |
|
||||
* | | + - base1
|
||||
* | |
|
||||
* | +- LinkData
|
||||
* | |
|
||||
* | + - base2
|
||||
* |
|
||||
* +- LinkData
|
||||
* |
|
||||
* +- BasesList
|
||||
* |
|
||||
* +- LinkData
|
||||
* |
|
||||
* + - base3
|
||||
*
|
||||
* Bases in every group are sorted in a dependency order, meaning
|
||||
* first base in group doesn't depend on any other objects, and
|
||||
* further bases depends on bases above.
|
||||
*/
|
||||
void DAG_get_independent_groups(struct Scene *scene, struct ListBase *groups);
|
||||
/* ** Threaded update ** */
|
||||
|
||||
/* Initialize the DAG for threaded update. */
|
||||
void DAG_threaded_update_begin(struct Scene *scene);
|
||||
|
||||
/* Run a callback for every node which is ready for update. */
|
||||
void DAG_threaded_update_foreach_ready_node(struct Scene *scene,
|
||||
void (*func)(void *node, void *user_data),
|
||||
void *user_data);
|
||||
|
||||
struct Object *DAG_threaded_update_get_node_object(void *node_v);
|
||||
|
||||
const char *DAG_threaded_update_get_node_name(void *node_v);
|
||||
|
||||
void DAG_threaded_update_handle_node_updated(void *node_v,
|
||||
void (*func)(void *node, void *user_data),
|
||||
void *user_data);
|
||||
|
||||
/* Debugging: print dependency graph for scene or armature object to console */
|
||||
|
||||
void DAG_print_dependencies(struct Main *bmain, struct Scene *scene, struct Object *ob);
|
||||
|
||||
void DAG_print_dependency_groups(struct Scene *scene);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -92,6 +92,13 @@ typedef struct DagNode {
|
||||
struct DagAdjList *child;
|
||||
struct DagAdjList *parent;
|
||||
struct DagNode *next;
|
||||
|
||||
/* Threaded evaluation routines */
|
||||
int valency; /* valency of the node is a number of parents which are not updated yet
|
||||
* this node has got.
|
||||
* Used by threaded update for faster detect whether node could be
|
||||
* updated aready.
|
||||
*/
|
||||
} DagNode;
|
||||
|
||||
typedef struct DagNodeQueueElem {
|
||||
|
||||
@@ -2652,170 +2652,121 @@ void DAG_pose_sort(Object *ob)
|
||||
ugly_hack_sorry = 1;
|
||||
}
|
||||
|
||||
/* ******************* DAG FOR THREADED UPDATR ***************** */
|
||||
/* ************************ DAG FOR THREADED UPDATE ********************* */
|
||||
|
||||
void DAG_get_independent_groups(Scene *scene, ListBase *groups)
|
||||
/* Initialize the DAG for threaded update.
|
||||
*
|
||||
* Sets up all the data needed for faster check whether DAG node is
|
||||
* updatable already (whether all the dependencies are met).
|
||||
*/
|
||||
void DAG_threaded_update_begin(Scene *scene)
|
||||
{
|
||||
#if defined __GNUC__ || defined __sun
|
||||
# define PRINT(format, args ...) { if (dag_print_dependencies) printf(format, ##args); } (void)0
|
||||
#else
|
||||
# define PRINT(format, ...) { if (dag_print_dependencies) printf(__VA_ARGS__); } (void)0
|
||||
#endif
|
||||
DagNode *node;
|
||||
|
||||
DagNode *node, *rootnode;
|
||||
DagNodeQueue *node_queue;
|
||||
DagAdjList *itA;
|
||||
bool skip = false;
|
||||
Base *base;
|
||||
ListBase *current_group = NULL;
|
||||
bool has_group = false;
|
||||
int root_count = 0;
|
||||
|
||||
PRINT("Independend groups of objects:\n");
|
||||
PRINT("\nACHTUNG!!! Order of objects in groups is reversed in outout!\n");
|
||||
PRINT("Don't ask why, just be aware of this.\n\n");
|
||||
|
||||
/* ** STEP 0: Some pre-initialization. ** */
|
||||
|
||||
rootnode = scene->theDag->DagNode.first;
|
||||
|
||||
node_queue = queue_create(DAGQUEUEALLOC);
|
||||
|
||||
/* Mark all objects as unhandled.
|
||||
* This flag is checked later to detect cycle dependencies.
|
||||
*/
|
||||
for (base = scene->base.first; base; base = base->next) {
|
||||
base->object->id.flag |= LIB_DOIT;
|
||||
}
|
||||
|
||||
/* ** STEP 1: Find all nodes which doesn't depend on other nodes ** */
|
||||
|
||||
/* Mark all nodes as not visited. */
|
||||
/* We reset valency to zero first... */
|
||||
for (node = scene->theDag->DagNode.first; node; node = node->next) {
|
||||
node->color = DAG_WHITE;
|
||||
node->valency = 0;
|
||||
}
|
||||
|
||||
/* Detect all the nodes which doesn't have parent. */
|
||||
/* ... and then iterate over all the nodes and
|
||||
* increase valency for node childs.
|
||||
*/
|
||||
for (node = scene->theDag->DagNode.first; node; node = node->next) {
|
||||
DagAdjList *itA;
|
||||
|
||||
if (node == rootnode) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (itA = node->child; itA; itA = itA->next) {
|
||||
if (itA->node != node) {
|
||||
itA->node->color = DAG_BLACK;
|
||||
itA->node->valency++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Always put root to a traverse queue. */
|
||||
rootnode->color = DAG_GRAY;
|
||||
push_stack(node_queue, rootnode);
|
||||
|
||||
/* Put all nodes which that depend on other nodes to a traverse queue.
|
||||
* Also mark this nodes as gray (since they're visited but not finished)
|
||||
* and mark all other nodes as white (they're not viisted).
|
||||
/* A bit tricky, tasks are operating with nodes, which is much
|
||||
* easier from tracking dependnecies point of view, and also
|
||||
* makes it possible to do partial object objects.
|
||||
*
|
||||
* However, currently the only way we're performing update is
|
||||
* calling object_handle_update for objects which are ready,
|
||||
* which also updates object data.
|
||||
*
|
||||
* And for this we need to know whether node represents object
|
||||
* or not.
|
||||
*
|
||||
* And we mark all the nodes which represents objects as
|
||||
* white color, All other nodes are staying gray.
|
||||
*/
|
||||
for (node = scene->theDag->DagNode.first; node; node = node->next) {
|
||||
if (node == rootnode) {
|
||||
continue;
|
||||
Base *base = scene->base.first;
|
||||
node->color = DAG_GRAY;
|
||||
while (base && base->object != node->ob) {
|
||||
base = base->next;
|
||||
}
|
||||
|
||||
if (node->color == DAG_WHITE) {
|
||||
PRINT("Root node: %s\n", dag_node_name(node));
|
||||
|
||||
node->color = DAG_GRAY;
|
||||
push_stack(node_queue, node);
|
||||
}
|
||||
else {
|
||||
if (base) {
|
||||
node->color = DAG_WHITE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
root_count = node_queue->count;
|
||||
/* Call functor for every node in the graph which is ready for
|
||||
* update (all it's dependencies are met). Quick check for this
|
||||
* is valency == 0.
|
||||
*/
|
||||
void DAG_threaded_update_foreach_ready_node(Scene *scene,
|
||||
void (*func)(void *node, void *user_data),
|
||||
void *user_data)
|
||||
{
|
||||
DagNode *node;
|
||||
|
||||
/* ** STEP 2: Traverse the graph and collect all the groups. ** */
|
||||
|
||||
while (node_queue->count) {
|
||||
skip = false;
|
||||
node = get_top_node_queue(node_queue);
|
||||
|
||||
itA = node->child;
|
||||
while (itA != NULL) {
|
||||
if (itA->node->color == DAG_WHITE) {
|
||||
itA->node->color = DAG_GRAY;
|
||||
push_stack(node_queue, itA->node);
|
||||
skip = true;
|
||||
break;
|
||||
}
|
||||
itA = itA->next;
|
||||
for (node = scene->theDag->DagNode.first; node; node = node->next) {
|
||||
if (node->valency == 0) {
|
||||
func(node, user_data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!skip) {
|
||||
if (node) {
|
||||
node = pop_queue(node_queue);
|
||||
if (node->ob == scene) {
|
||||
/* Whatever Thom, we are done! */
|
||||
break;
|
||||
}
|
||||
/* Will return Object ID if node represents Object,
|
||||
* and will return NULL otherwise.
|
||||
*/
|
||||
Object *DAG_threaded_update_get_node_object(void *node_v)
|
||||
{
|
||||
DagNode *node = node_v;
|
||||
|
||||
node->color = DAG_BLACK;
|
||||
if (node->color == DAG_WHITE) {
|
||||
return node->ob;
|
||||
}
|
||||
|
||||
base = scene->base.first;
|
||||
while (base && base->object != node->ob) {
|
||||
base = base->next;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (base) {
|
||||
base->object->id.flag &= ~LIB_DOIT;
|
||||
/* Returns node name, used for debug output only, atm. */
|
||||
const char *DAG_threaded_update_get_node_name(void *node_v)
|
||||
{
|
||||
DagNode *node = node_v;
|
||||
|
||||
if (has_group == false) {
|
||||
PRINT("- Next group\n");
|
||||
return dag_node_name(node);
|
||||
}
|
||||
|
||||
if (groups) {
|
||||
current_group = MEM_callocN(sizeof(ListBase), "DAG independent group");
|
||||
BLI_addhead(groups, BLI_genericNodeN(current_group));
|
||||
}
|
||||
/* This function is called when handling node is done.
|
||||
*
|
||||
* This function updates valency for all childs and
|
||||
* schedules them if they're ready.
|
||||
*/
|
||||
void DAG_threaded_update_handle_node_updated(void *node_v,
|
||||
void (*func)(void *node, void *user_data),
|
||||
void *user_data)
|
||||
{
|
||||
DagNode *node = node_v;
|
||||
DagAdjList *itA;
|
||||
|
||||
has_group = true;
|
||||
}
|
||||
for (itA = node->child; itA; itA = itA->next) {
|
||||
if (itA->node != node) {
|
||||
itA->node->valency--;
|
||||
|
||||
PRINT(" %s\n", dag_node_name(node));
|
||||
|
||||
if (groups) {
|
||||
BLI_addhead(current_group, BLI_genericNodeN(base));
|
||||
}
|
||||
}
|
||||
|
||||
if (node_queue->count < root_count) {
|
||||
root_count--;
|
||||
has_group = false;
|
||||
}
|
||||
if (itA->node->valency == 0) {
|
||||
func(itA->node, user_data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Here we put all cyclic objects to separate groups */
|
||||
for (base = scene->base.first; base; base = base->next) {
|
||||
if (base->object->id.flag & LIB_DOIT) {
|
||||
base->object->id.flag &= ~LIB_DOIT;
|
||||
|
||||
PRINT("- Next cyclic group\n");
|
||||
PRINT(" %s\n", base->object->id.name + 2);
|
||||
|
||||
if (groups) {
|
||||
current_group = MEM_callocN(sizeof(ListBase), "DAG independent group");
|
||||
BLI_addtail(groups, BLI_genericNodeN(current_group));
|
||||
BLI_addtail(current_group, BLI_genericNodeN(base));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
queue_delete(node_queue);
|
||||
|
||||
#undef PRINT
|
||||
}
|
||||
|
||||
/* ************************ DAG DEBUGGING ********************* */
|
||||
@@ -2836,12 +2787,3 @@ void DAG_print_dependencies(Main *bmain, Scene *scene, Object *ob)
|
||||
|
||||
dag_print_dependencies = 0;
|
||||
}
|
||||
|
||||
void DAG_print_dependency_groups(Scene *scene)
|
||||
{
|
||||
dag_print_dependencies = 1;
|
||||
|
||||
DAG_get_independent_groups(scene, NULL);
|
||||
|
||||
dag_print_dependencies = 0;
|
||||
}
|
||||
|
||||
@@ -57,6 +57,7 @@
|
||||
#include "BLI_callbacks.h"
|
||||
#include "BLI_string.h"
|
||||
#include "BLI_threads.h"
|
||||
#include "BLI_task.h"
|
||||
|
||||
#include "BLF_translation.h"
|
||||
|
||||
@@ -1165,71 +1166,15 @@ static void scene_do_rb_simulation_recursive(Scene *scene, float ctime)
|
||||
|
||||
#undef USE_THREADED_UPDATE
|
||||
|
||||
/* Debugging only!
|
||||
*
|
||||
* Will enable some additional checks about whether threaded
|
||||
* update went all fine or there's some mistake in code somewhere
|
||||
* (like, missing object update or object being updated from two threads).
|
||||
*/
|
||||
#undef UPDATE_SANITY_CHECK
|
||||
|
||||
typedef struct SceneUpdateThreadHandle {
|
||||
#ifdef UPDATE_SANITY_CHECK
|
||||
int thread;
|
||||
#endif
|
||||
|
||||
/* Current scene and it's parent */
|
||||
typedef struct ThreadedObjectUpdateState {
|
||||
Scene *scene;
|
||||
Scene *scene_parent;
|
||||
} ThreadedObjectUpdateState;
|
||||
|
||||
/* Every thread handles several independent groups.
|
||||
* This is a current group.
|
||||
*/
|
||||
LinkData *current_group_link;
|
||||
static void scene_update_object_add_task(void *node, void *user_data);
|
||||
|
||||
/* This is a first group which shouldn't be handled
|
||||
* (aka it's handled by another thread as as soon
|
||||
* as current thread reaches it thread shall stop).
|
||||
*/
|
||||
LinkData *barrier_group_link;
|
||||
|
||||
/* Current link to a base which wasn't updated yet. */
|
||||
LinkData *current_base_link;
|
||||
} SceneUpdateThreadHandle;
|
||||
|
||||
static Base *scene_update_thread_next_base(SceneUpdateThreadHandle *handle)
|
||||
static void scene_update_single_object(Scene *scene, Scene *scene_parent, Object *object)
|
||||
{
|
||||
Base *base = NULL;
|
||||
|
||||
/* If current base link became NULL, it means traversing current group
|
||||
* is finished and we need to go to a next group.
|
||||
*/
|
||||
if (handle->current_base_link == NULL) {
|
||||
ListBase *current_bases;
|
||||
|
||||
handle->current_group_link = handle->current_group_link->next;
|
||||
|
||||
/* If we've reached barrier group, we need to stop current thread. */
|
||||
if (handle->current_group_link == handle->barrier_group_link) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
current_bases = handle->current_group_link->data;
|
||||
handle->current_base_link = current_bases->first;
|
||||
}
|
||||
|
||||
if (handle->current_base_link) {
|
||||
base = handle->current_base_link->data;
|
||||
handle->current_base_link = handle->current_base_link->next;
|
||||
}
|
||||
|
||||
return base;
|
||||
}
|
||||
|
||||
static void scene_update_single_base(Scene *scene_parent, Scene *scene, Base *base)
|
||||
{
|
||||
Object *object = base->object;
|
||||
|
||||
BKE_object_handle_update_ex(scene_parent, object, scene->rigidbody_world);
|
||||
|
||||
if (object->dup_group && (object->transflag & OB_DUPLIGROUP))
|
||||
@@ -1239,52 +1184,78 @@ static void scene_update_single_base(Scene *scene_parent, Scene *scene, Base *ba
|
||||
/* XXX commented out, this has depsgraph issues anyway - and this breaks setting scenes
|
||||
* (on scene-set, the base-lay is copied to ob-lay (ton nov 2012) */
|
||||
// base->lay = ob->lay;
|
||||
|
||||
#ifdef UPDATE_SANITY_CHECK
|
||||
base->object->id.flag &= ~LIB_DOIT;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void scene_update_all_bases(Scene *scene_parent, Scene *scene)
|
||||
static void scene_update_single_base(Scene *scene, Scene *scene_parent, Base *base)
|
||||
{
|
||||
Object *object = base->object;
|
||||
|
||||
scene_update_single_object(scene, scene_parent, object);
|
||||
}
|
||||
|
||||
static void scene_update_all_bases(Scene *scene, Scene *scene_parent)
|
||||
{
|
||||
Base *base;
|
||||
|
||||
for (base = scene->base.first; base; base = base->next) {
|
||||
scene_update_single_base(scene_parent, scene, base);
|
||||
scene_update_single_base(scene, scene_parent, base);
|
||||
}
|
||||
}
|
||||
|
||||
static void *scene_update_tagged_thread(void *handle_v)
|
||||
static void scene_update_object_func(TaskPool *pool, void *taskdata, int threadid)
|
||||
{
|
||||
SceneUpdateThreadHandle *handle = handle_v;
|
||||
Base *base;
|
||||
ThreadedObjectUpdateState *state = (ThreadedObjectUpdateState *) BLI_task_pool_userdata(pool);
|
||||
void *node = taskdata;
|
||||
Object *object;
|
||||
|
||||
while ((base = scene_update_thread_next_base(handle))) {
|
||||
#ifdef UPDATE_SANITY_CHECK
|
||||
BLI_lock_thread(LOCK_CUSTOM1);
|
||||
printf("Thread %d: updating %s\n", handle->thread, base->object->id.name + 2);
|
||||
(void) threadid; /* Ignored when logging is disabled. */
|
||||
|
||||
BLI_assert(base->object->id.flag & LIB_DOIT);
|
||||
#endif
|
||||
object = DAG_threaded_update_get_node_object(node);
|
||||
|
||||
scene_update_single_base(handle->scene_parent, handle->scene, base);
|
||||
|
||||
#ifdef UPDATE_SANITY_CHECK
|
||||
BLI_unlock_thread(LOCK_CUSTOM1);
|
||||
#endif
|
||||
if (object) {
|
||||
// printf("Thread %d: update object %s\n", threadid, object->id.name);
|
||||
scene_update_single_object(state->scene, state->scene_parent, object);
|
||||
}
|
||||
else {
|
||||
// printf("Threda %d: update node %s\n", threadid,
|
||||
// DAG_threaded_update_get_node_name(node));
|
||||
}
|
||||
|
||||
return NULL;
|
||||
BLI_lock_thread(LOCK_CUSTOM1);
|
||||
/* Update will decrease child's valency and schedule child with zero valency. */
|
||||
DAG_threaded_update_handle_node_updated(node,scene_update_object_add_task, pool);
|
||||
BLI_unlock_thread(LOCK_CUSTOM1);
|
||||
}
|
||||
|
||||
static void scene_update_object_add_task(void *node, void *user_data)
|
||||
{
|
||||
TaskPool *task_pool = user_data;
|
||||
|
||||
BLI_task_pool_push(task_pool, scene_update_object_func, node, false, TASK_PRIORITY_LOW);
|
||||
}
|
||||
|
||||
static void scene_update_objects_threaded(Scene *scene, Scene *scene_parent)
|
||||
{
|
||||
ListBase threads;
|
||||
SceneUpdateThreadHandle *handles;
|
||||
ListBase groups = {NULL, NULL};
|
||||
LinkData *current_group_link;
|
||||
int i, tot_thread = BLI_system_thread_count();
|
||||
int tot_group, groups_per_thread, additional_group_threads;
|
||||
TaskScheduler *task_scheduler;
|
||||
TaskPool *task_pool;
|
||||
ThreadedObjectUpdateState state;
|
||||
int tot_thread = BLI_system_thread_count();
|
||||
|
||||
if (tot_thread == 1) {
|
||||
/* If only one thread is possible we don't bother self with
|
||||
* task pool, which would be an overhead in cas e of single
|
||||
* CPU core.
|
||||
*/
|
||||
scene_update_all_bases(scene, scene_parent);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Ensure malloc will go go fine from threads,
|
||||
* this is needed because we could be in main thread here
|
||||
* and malloc could be non-threda safe at this point because
|
||||
* no other jobs are running.
|
||||
*/
|
||||
BLI_begin_threaded_malloc();
|
||||
|
||||
/* XXX: Releasing DrawObject is not thread safe, but adding lock
|
||||
* around it is gonna to harm even more. So for now let's
|
||||
@@ -1305,112 +1276,35 @@ static void scene_update_objects_threaded(Scene *scene, Scene *scene_parent)
|
||||
}
|
||||
}
|
||||
|
||||
/* Get independent groups of bases. */
|
||||
DAG_get_independent_groups(scene, &groups);
|
||||
state.scene = scene;
|
||||
state.scene_parent = scene_parent;
|
||||
|
||||
/* TODO(sergey): It could make sense to make DAG_get_independent_groups
|
||||
* to return number of groups. */
|
||||
tot_group = BLI_countlist(&groups);
|
||||
task_scheduler = BLI_task_scheduler_create(tot_thread);
|
||||
task_pool = BLI_task_pool_create(task_scheduler, &state);
|
||||
|
||||
/* We don't use more threads than we've got groups. */
|
||||
tot_thread = min_ii(tot_group, tot_thread);
|
||||
if (tot_thread > 1) {
|
||||
BLI_init_threads(&threads, scene_update_tagged_thread, tot_thread);
|
||||
}
|
||||
|
||||
handles = MEM_callocN(sizeof(SceneUpdateThreadHandle) * tot_thread,
|
||||
"scene update object handles");
|
||||
|
||||
#ifdef UPDATE_SANITY_CHECK
|
||||
{
|
||||
Base *base;
|
||||
for (base = scene->base.first; base; base = base->next) {
|
||||
base->object->id.flag |= LIB_DOIT;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Every thread handles groups_per_thread groups of bases. */
|
||||
current_group_link = groups.first;
|
||||
groups_per_thread = tot_group / tot_thread;
|
||||
|
||||
/* Some threads will handle more groups,
|
||||
* This happens if devision of groups didn't give integer value
|
||||
* and in this case 'additional_group_threads' of threads will
|
||||
* handle one more extra group.
|
||||
/* Initialize run-time data in the graph needed for traversing it
|
||||
* from multiple threads.
|
||||
*
|
||||
* This will mark DAG nodes as object/non-object and will calculate
|
||||
* "valency" of nodes (which is how many non-updated parents node
|
||||
* have, which helps a lot checking whether node could be scheduled
|
||||
* already or not).
|
||||
*/
|
||||
additional_group_threads = tot_group - groups_per_thread * tot_thread;
|
||||
DAG_threaded_update_begin(scene);
|
||||
|
||||
/* Fill in thread handles. */
|
||||
for (i = 0; i < tot_thread; i++) {
|
||||
SceneUpdateThreadHandle *handle = &handles[i];
|
||||
ListBase *current_bases = current_group_link->data;
|
||||
int j, current_groups_per_thread = groups_per_thread;
|
||||
/* Put all nodes which are already ready for schedule to the task pool.
|
||||
* usually its just a Scene node.
|
||||
*/
|
||||
DAG_threaded_update_foreach_ready_node(scene, scene_update_object_add_task, task_pool);
|
||||
|
||||
if (i < additional_group_threads) {
|
||||
current_groups_per_thread++;
|
||||
}
|
||||
/* work and wait until tasks are done */
|
||||
BLI_task_pool_work_and_wait(task_pool);
|
||||
|
||||
#ifdef UPDATE_SANITY_CHECK
|
||||
handle->thread = i;
|
||||
#endif
|
||||
/* free */
|
||||
BLI_task_pool_free(task_pool);
|
||||
BLI_task_scheduler_free(task_scheduler);
|
||||
|
||||
handle->scene = scene;
|
||||
handle->scene_parent = scene_parent;
|
||||
handle->current_group_link = current_group_link;
|
||||
handle->current_base_link = current_bases->first;
|
||||
|
||||
/* Find the barried link, which will also be a start group link
|
||||
* for the next thread.
|
||||
*
|
||||
* If this is the last thread, we could skip iteration cycle here.
|
||||
*/
|
||||
if (i != tot_thread - 1) {
|
||||
for (j = 0; j < current_groups_per_thread && current_group_link; j++) {
|
||||
current_group_link = current_group_link->next;
|
||||
}
|
||||
}
|
||||
else {
|
||||
current_group_link = NULL;
|
||||
}
|
||||
|
||||
handle->barrier_group_link = current_group_link;
|
||||
|
||||
if (tot_thread > 1) {
|
||||
BLI_insert_thread(&threads, handle);
|
||||
}
|
||||
}
|
||||
|
||||
if (tot_thread > 1) {
|
||||
BLI_end_threads(&threads);
|
||||
}
|
||||
else {
|
||||
scene_update_tagged_thread(handles);
|
||||
}
|
||||
|
||||
/* Free memory used by thread handles. */
|
||||
MEM_freeN(handles);
|
||||
|
||||
/* Traverse groups and fee all the memory used by them. */
|
||||
for (current_group_link = groups.first;
|
||||
current_group_link;
|
||||
current_group_link = current_group_link->next)
|
||||
{
|
||||
ListBase *current_bases = current_group_link->data;
|
||||
|
||||
BLI_freelistN(current_bases);
|
||||
MEM_freeN(current_bases);
|
||||
}
|
||||
BLI_freelistN(&groups);
|
||||
|
||||
#ifdef UPDATE_SANITY_CHECK
|
||||
{
|
||||
Base *base;
|
||||
for (base = scene->base.first; base; base = base->next) {
|
||||
BLI_assert((base->object->id.flag & LIB_DOIT) == 0);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
BLI_end_threaded_malloc();
|
||||
}
|
||||
|
||||
static void scene_update_objects(Scene *scene, Scene *scene_parent)
|
||||
@@ -1431,7 +1325,7 @@ static void scene_update_objects(Scene *scene, Scene *scene_parent)
|
||||
|
||||
#ifndef USE_THREADED_UPDATE
|
||||
if (true) {
|
||||
scene_update_all_bases(scene_parent, scene);
|
||||
scene_update_all_bases(scene, scene_parent);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
@@ -1439,7 +1333,7 @@ static void scene_update_objects(Scene *scene, Scene *scene_parent)
|
||||
scene_update_objects_threaded(scene, scene_parent);
|
||||
}
|
||||
else {
|
||||
scene_update_all_bases(scene_parent, scene);
|
||||
scene_update_all_bases(scene, scene_parent);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4075,24 +4075,6 @@ static void WM_OT_dependency_relations(wmOperatorType *ot)
|
||||
ot->exec = dependency_relations_exec;
|
||||
}
|
||||
|
||||
static int dependency_groups_exec(bContext *C, wmOperator *UNUSED(op))
|
||||
{
|
||||
Scene *scene = CTX_data_scene(C);
|
||||
|
||||
DAG_print_dependency_groups(scene);
|
||||
|
||||
return OPERATOR_FINISHED;
|
||||
}
|
||||
|
||||
static void WM_OT_dependency_groups(wmOperatorType *ot)
|
||||
{
|
||||
ot->name = "Dependency Groups";
|
||||
ot->idname = "WM_OT_dependency_groups";
|
||||
ot->description = "Print dependency graph groups to the console";
|
||||
|
||||
ot->exec = dependency_groups_exec;
|
||||
}
|
||||
|
||||
/* ******************************************************* */
|
||||
|
||||
static int wm_ndof_sensitivity_exec(bContext *UNUSED(C), wmOperator *op)
|
||||
@@ -4185,7 +4167,6 @@ void wm_operatortype_init(void)
|
||||
WM_operatortype_append(WM_OT_redraw_timer);
|
||||
WM_operatortype_append(WM_OT_memory_statistics);
|
||||
WM_operatortype_append(WM_OT_dependency_relations);
|
||||
WM_operatortype_append(WM_OT_dependency_groups);
|
||||
WM_operatortype_append(WM_OT_debug_menu);
|
||||
WM_operatortype_append(WM_OT_operator_defaults);
|
||||
WM_operatortype_append(WM_OT_splash);
|
||||
|
||||
Reference in New Issue
Block a user