2023-06-14 16:52:36 +10:00
|
|
|
/* SPDX-FileCopyrightText: 2011-2022 Blender Foundation
|
|
|
|
|
*
|
|
|
|
|
* SPDX-License-Identifier: Apache-2.0 */
|
2018-11-29 02:06:30 +01:00
|
|
|
|
2025-01-03 09:54:36 -05:00
|
|
|
#include <algorithm>
|
2024-12-26 17:53:59 +01:00
|
|
|
#include <cassert>
|
2025-02-12 17:18:55 +01:00
|
|
|
#include <chrono>
|
2024-12-26 17:53:59 +01:00
|
|
|
#include <thread>
|
|
|
|
|
|
|
|
|
|
#include "util/profiling.h"
|
2018-11-29 02:06:30 +01:00
|
|
|
|
|
|
|
|
CCL_NAMESPACE_BEGIN
|
|
|
|
|
|
2024-12-29 23:13:45 +01:00
|
|
|
Profiler::Profiler() : do_stop_worker(true) {}
|
2018-11-29 02:06:30 +01:00
|
|
|
|
|
|
|
|
Profiler::~Profiler()
|
|
|
|
|
{
|
2024-12-26 17:53:55 +01:00
|
|
|
assert(worker == nullptr);
|
2018-11-29 02:06:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void Profiler::run()
|
|
|
|
|
{
|
|
|
|
|
uint64_t updates = 0;
|
|
|
|
|
auto start_time = std::chrono::system_clock::now();
|
|
|
|
|
while (!do_stop_worker) {
|
|
|
|
|
thread_scoped_lock lock(mutex);
|
2024-12-26 19:41:25 +01:00
|
|
|
for (ProfilingState *state : states) {
|
2024-12-29 17:32:00 +01:00
|
|
|
const uint32_t cur_event = state->event;
|
|
|
|
|
const int32_t cur_shader = state->shader;
|
|
|
|
|
const int32_t cur_object = state->object;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-11-29 02:06:30 +01:00
|
|
|
/* The state reads/writes should be atomic, but just to be sure
|
|
|
|
|
* check the values for validity anyways. */
|
|
|
|
|
if (cur_event < PROFILING_NUM_EVENTS) {
|
|
|
|
|
event_samples[cur_event]++;
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-11-29 02:06:30 +01:00
|
|
|
if (cur_shader >= 0 && cur_shader < shader_samples.size()) {
|
Cycles: merge of cycles-x branch, a major update to the renderer
This includes much improved GPU rendering performance, viewport interactivity,
new shadow catcher, revamped sampling settings, subsurface scattering anisotropy,
new GPU volume sampling, improved PMJ sampling pattern, and more.
Some features have also been removed or changed, breaking backwards compatibility.
Including the removal of the OpenCL backend, for which alternatives are under
development.
Release notes and code docs:
https://wiki.blender.org/wiki/Reference/Release_Notes/3.0/Cycles
https://wiki.blender.org/wiki/Source/Render/Cycles
Credits:
* Sergey Sharybin
* Brecht Van Lommel
* Patrick Mours (OptiX backend)
* Christophe Hery (subsurface scattering anisotropy)
* William Leeson (PMJ sampling pattern)
* Alaska (various fixes and tweaks)
* Thomas Dinges (various fixes)
For the full commit history, see the cycles-x branch. This squashes together
all the changes since intermediate changes would often fail building or tests.
Ref T87839, T87837, T87836
Fixes T90734, T89353, T80267, T80267, T77185, T69800
2021-09-20 17:59:20 +02:00
|
|
|
shader_samples[cur_shader]++;
|
2018-11-29 02:06:30 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-11-29 02:06:30 +01:00
|
|
|
if (cur_object >= 0 && cur_object < object_samples.size()) {
|
|
|
|
|
object_samples[cur_object]++;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
lock.unlock();
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-11-29 02:06:30 +01:00
|
|
|
/* Relative waits always overshoot a bit, so just waiting 1ms every
|
|
|
|
|
* time would cause the sampling to drift over time.
|
|
|
|
|
* By keeping track of the absolute time, the wait times correct themselves -
|
|
|
|
|
* if one wait overshoots a lot, the next one will be shorter to compensate. */
|
|
|
|
|
updates++;
|
|
|
|
|
std::this_thread::sleep_until(start_time + updates * std::chrono::milliseconds(1));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-01-01 18:15:54 +01:00
|
|
|
void Profiler::reset(const int num_shaders, const int num_objects)
|
2018-11-29 02:06:30 +01:00
|
|
|
{
|
2024-12-29 17:32:00 +01:00
|
|
|
const bool running = (worker != nullptr);
|
2018-11-29 02:06:30 +01:00
|
|
|
if (running) {
|
|
|
|
|
stop();
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-11-29 02:06:30 +01:00
|
|
|
/* Resize and clear the accumulation vectors. */
|
|
|
|
|
shader_hits.assign(num_shaders, 0);
|
|
|
|
|
object_hits.assign(num_objects, 0);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-11-29 02:06:30 +01:00
|
|
|
event_samples.assign(PROFILING_NUM_EVENTS, 0);
|
|
|
|
|
shader_samples.assign(num_shaders, 0);
|
|
|
|
|
object_samples.assign(num_objects, 0);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-11-29 02:06:30 +01:00
|
|
|
if (running) {
|
|
|
|
|
start();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void Profiler::start()
|
|
|
|
|
{
|
2024-12-26 17:53:55 +01:00
|
|
|
assert(worker == nullptr);
|
2018-11-29 02:06:30 +01:00
|
|
|
do_stop_worker = false;
|
2024-12-29 23:13:45 +01:00
|
|
|
worker = make_unique<thread>([this] { run(); });
|
2018-11-29 02:06:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void Profiler::stop()
|
|
|
|
|
{
|
2024-12-26 17:53:55 +01:00
|
|
|
if (worker != nullptr) {
|
2018-11-29 02:06:30 +01:00
|
|
|
do_stop_worker = true;
|
|
|
|
|
|
|
|
|
|
worker->join();
|
2024-12-29 23:13:45 +01:00
|
|
|
worker.reset();
|
2018-11-29 02:06:30 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void Profiler::add_state(ProfilingState *state)
|
|
|
|
|
{
|
2024-12-29 17:32:00 +01:00
|
|
|
const thread_scoped_lock lock(mutex);
|
2018-11-29 02:06:30 +01:00
|
|
|
|
|
|
|
|
/* Add the ProfilingState from the list of sampled states. */
|
|
|
|
|
assert(std::find(states.begin(), states.end(), state) == states.end());
|
|
|
|
|
states.push_back(state);
|
|
|
|
|
|
|
|
|
|
/* Resize thread-local hit counters. */
|
|
|
|
|
state->shader_hits.assign(shader_hits.size(), 0);
|
|
|
|
|
state->object_hits.assign(object_hits.size(), 0);
|
|
|
|
|
|
|
|
|
|
/* Initialize the state. */
|
|
|
|
|
state->event = PROFILING_UNKNOWN;
|
|
|
|
|
state->shader = -1;
|
|
|
|
|
state->object = -1;
|
|
|
|
|
state->active = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void Profiler::remove_state(ProfilingState *state)
|
|
|
|
|
{
|
2024-12-29 17:32:00 +01:00
|
|
|
const thread_scoped_lock lock(mutex);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-11-29 02:06:30 +01:00
|
|
|
/* Remove the ProfilingState from the list of sampled states. */
|
|
|
|
|
states.erase(std::remove(states.begin(), states.end(), state), states.end());
|
|
|
|
|
state->active = false;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-11-29 02:06:30 +01:00
|
|
|
/* Merge thread-local hit counters. */
|
|
|
|
|
assert(shader_hits.size() == state->shader_hits.size());
|
|
|
|
|
for (int i = 0; i < shader_hits.size(); i++) {
|
|
|
|
|
shader_hits[i] += state->shader_hits[i];
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-11-29 02:06:30 +01:00
|
|
|
assert(object_hits.size() == state->object_hits.size());
|
|
|
|
|
for (int i = 0; i < object_hits.size(); i++) {
|
|
|
|
|
object_hits[i] += state->object_hits[i];
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uint64_t Profiler::get_event(ProfilingEvent event)
|
|
|
|
|
{
|
2024-12-26 17:53:55 +01:00
|
|
|
assert(worker == nullptr);
|
2018-11-29 02:06:30 +01:00
|
|
|
return event_samples[event];
|
|
|
|
|
}
|
|
|
|
|
|
2025-01-01 18:15:54 +01:00
|
|
|
bool Profiler::get_shader(const int shader, uint64_t &samples, uint64_t &hits)
|
2018-11-29 02:06:30 +01:00
|
|
|
{
|
2024-12-26 17:53:55 +01:00
|
|
|
assert(worker == nullptr);
|
2018-11-29 02:06:30 +01:00
|
|
|
if (shader_samples[shader] == 0) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
samples = shader_samples[shader];
|
|
|
|
|
hits = shader_hits[shader];
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2025-01-01 18:15:54 +01:00
|
|
|
bool Profiler::get_object(const int object, uint64_t &samples, uint64_t &hits)
|
2018-11-29 02:06:30 +01:00
|
|
|
{
|
2024-12-26 17:53:55 +01:00
|
|
|
assert(worker == nullptr);
|
2018-11-29 02:06:30 +01:00
|
|
|
if (object_samples[object] == 0) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
samples = object_samples[object];
|
|
|
|
|
hits = object_hits[object];
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2021-11-12 10:01:23 +01:00
|
|
|
bool Profiler::active() const
|
|
|
|
|
{
|
|
|
|
|
return (worker != nullptr);
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-29 02:06:30 +01:00
|
|
|
CCL_NAMESPACE_END
|