2011-04-27 11:58:34 +00:00
|
|
|
/*
|
2013-08-18 14:16:15 +00:00
|
|
|
* Copyright 2011-2013 Blender Foundation
|
2011-04-27 11:58:34 +00:00
|
|
|
*
|
2013-08-18 14:16:15 +00:00
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
|
* You may obtain a copy of the License at
|
2011-04-27 11:58:34 +00:00
|
|
|
*
|
2013-08-18 14:16:15 +00:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2011-04-27 11:58:34 +00:00
|
|
|
*
|
2013-08-18 14:16:15 +00:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
* See the License for the specific language governing permissions and
|
2014-12-25 02:50:24 +01:00
|
|
|
* limitations under the License.
|
2011-04-27 11:58:34 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
#include <string.h>
|
|
|
|
|
|
2014-10-06 13:43:23 +06:00
|
|
|
/* So ImathMath is included before our kernel_cpu_compat. */
|
|
|
|
|
#ifdef WITH_OSL
|
2016-02-05 09:09:39 +01:00
|
|
|
/* So no context pollution happens from indirectly included windows.h */
|
Cycles: Make all #include statements relative to cycles source directory
The idea is to make include statements more explicit and obvious where the
file is coming from, additionally reducing chance of wrong header being
picked up.
For example, it was not obvious whether bvh.h was refferring to builder
or traversal, whenter node.h is a generic graph node or a shader node
and cases like that.
Surely this might look obvious for the active developers, but after some
time of not touching the code it becomes less obvious where file is coming
from.
This was briefly mentioned in T50824 and seems @brecht is fine with such
explicitness, but need to agree with all active developers before committing
this.
Please note that this patch is lacking changes related on GPU/OpenCL
support. This will be solved if/when we all agree this is a good idea to move
forward.
Reviewers: brecht, lukasstockner97, maiself, nirved, dingto, juicyfruit, swerner
Reviewed By: lukasstockner97, maiself, nirved, dingto
Subscribers: brecht
Differential Revision: https://developer.blender.org/D2586
2017-03-28 20:39:14 +02:00
|
|
|
# include "util/util_windows.h"
|
2014-10-06 13:43:23 +06:00
|
|
|
# include <OSL/oslexec.h>
|
|
|
|
|
#endif
|
|
|
|
|
|
Cycles: Make all #include statements relative to cycles source directory
The idea is to make include statements more explicit and obvious where the
file is coming from, additionally reducing chance of wrong header being
picked up.
For example, it was not obvious whether bvh.h was refferring to builder
or traversal, whenter node.h is a generic graph node or a shader node
and cases like that.
Surely this might look obvious for the active developers, but after some
time of not touching the code it becomes less obvious where file is coming
from.
This was briefly mentioned in T50824 and seems @brecht is fine with such
explicitness, but need to agree with all active developers before committing
this.
Please note that this patch is lacking changes related on GPU/OpenCL
support. This will be solved if/when we all agree this is a good idea to move
forward.
Reviewers: brecht, lukasstockner97, maiself, nirved, dingto, juicyfruit, swerner
Reviewed By: lukasstockner97, maiself, nirved, dingto
Subscribers: brecht
Differential Revision: https://developer.blender.org/D2586
2017-03-28 20:39:14 +02:00
|
|
|
#include "device/device.h"
|
2017-05-07 14:40:58 +02:00
|
|
|
#include "device/device_denoising.h"
|
Cycles: Make all #include statements relative to cycles source directory
The idea is to make include statements more explicit and obvious where the
file is coming from, additionally reducing chance of wrong header being
picked up.
For example, it was not obvious whether bvh.h was refferring to builder
or traversal, whenter node.h is a generic graph node or a shader node
and cases like that.
Surely this might look obvious for the active developers, but after some
time of not touching the code it becomes less obvious where file is coming
from.
This was briefly mentioned in T50824 and seems @brecht is fine with such
explicitness, but need to agree with all active developers before committing
this.
Please note that this patch is lacking changes related on GPU/OpenCL
support. This will be solved if/when we all agree this is a good idea to move
forward.
Reviewers: brecht, lukasstockner97, maiself, nirved, dingto, juicyfruit, swerner
Reviewed By: lukasstockner97, maiself, nirved, dingto
Subscribers: brecht
Differential Revision: https://developer.blender.org/D2586
2017-03-28 20:39:14 +02:00
|
|
|
#include "device/device_intern.h"
|
|
|
|
|
#include "device/device_split_kernel.h"
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2020-03-06 14:15:21 +01:00
|
|
|
// clang-format off
|
Cycles: Make all #include statements relative to cycles source directory
The idea is to make include statements more explicit and obvious where the
file is coming from, additionally reducing chance of wrong header being
picked up.
For example, it was not obvious whether bvh.h was refferring to builder
or traversal, whenter node.h is a generic graph node or a shader node
and cases like that.
Surely this might look obvious for the active developers, but after some
time of not touching the code it becomes less obvious where file is coming
from.
This was briefly mentioned in T50824 and seems @brecht is fine with such
explicitness, but need to agree with all active developers before committing
this.
Please note that this patch is lacking changes related on GPU/OpenCL
support. This will be solved if/when we all agree this is a good idea to move
forward.
Reviewers: brecht, lukasstockner97, maiself, nirved, dingto, juicyfruit, swerner
Reviewed By: lukasstockner97, maiself, nirved, dingto
Subscribers: brecht
Differential Revision: https://developer.blender.org/D2586
2017-03-28 20:39:14 +02:00
|
|
|
#include "kernel/kernel.h"
|
|
|
|
|
#include "kernel/kernel_compat_cpu.h"
|
|
|
|
|
#include "kernel/kernel_types.h"
|
|
|
|
|
#include "kernel/split/kernel_split_data.h"
|
|
|
|
|
#include "kernel/kernel_globals.h"
|
2020-03-05 12:05:42 +01:00
|
|
|
#include "kernel/kernel_adaptive_sampling.h"
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2017-05-07 14:40:58 +02:00
|
|
|
#include "kernel/filter/filter.h"
|
|
|
|
|
|
Cycles: Make all #include statements relative to cycles source directory
The idea is to make include statements more explicit and obvious where the
file is coming from, additionally reducing chance of wrong header being
picked up.
For example, it was not obvious whether bvh.h was refferring to builder
or traversal, whenter node.h is a generic graph node or a shader node
and cases like that.
Surely this might look obvious for the active developers, but after some
time of not touching the code it becomes less obvious where file is coming
from.
This was briefly mentioned in T50824 and seems @brecht is fine with such
explicitness, but need to agree with all active developers before committing
this.
Please note that this patch is lacking changes related on GPU/OpenCL
support. This will be solved if/when we all agree this is a good idea to move
forward.
Reviewers: brecht, lukasstockner97, maiself, nirved, dingto, juicyfruit, swerner
Reviewed By: lukasstockner97, maiself, nirved, dingto
Subscribers: brecht
Differential Revision: https://developer.blender.org/D2586
2017-03-28 20:39:14 +02:00
|
|
|
#include "kernel/osl/osl_shader.h"
|
|
|
|
|
#include "kernel/osl/osl_globals.h"
|
2020-03-06 14:15:21 +01:00
|
|
|
// clang-format on
|
2011-04-27 11:58:34 +00:00
|
|
|
|
Cycles: Make all #include statements relative to cycles source directory
The idea is to make include statements more explicit and obvious where the
file is coming from, additionally reducing chance of wrong header being
picked up.
For example, it was not obvious whether bvh.h was refferring to builder
or traversal, whenter node.h is a generic graph node or a shader node
and cases like that.
Surely this might look obvious for the active developers, but after some
time of not touching the code it becomes less obvious where file is coming
from.
This was briefly mentioned in T50824 and seems @brecht is fine with such
explicitness, but need to agree with all active developers before committing
this.
Please note that this patch is lacking changes related on GPU/OpenCL
support. This will be solved if/when we all agree this is a good idea to move
forward.
Reviewers: brecht, lukasstockner97, maiself, nirved, dingto, juicyfruit, swerner
Reviewed By: lukasstockner97, maiself, nirved, dingto
Subscribers: brecht
Differential Revision: https://developer.blender.org/D2586
2017-03-28 20:39:14 +02:00
|
|
|
#include "render/buffers.h"
|
2018-10-28 05:37:41 -04:00
|
|
|
#include "render/coverage.h"
|
2012-09-04 13:29:07 +00:00
|
|
|
|
Cycles: Make all #include statements relative to cycles source directory
The idea is to make include statements more explicit and obvious where the
file is coming from, additionally reducing chance of wrong header being
picked up.
For example, it was not obvious whether bvh.h was refferring to builder
or traversal, whenter node.h is a generic graph node or a shader node
and cases like that.
Surely this might look obvious for the active developers, but after some
time of not touching the code it becomes less obvious where file is coming
from.
This was briefly mentioned in T50824 and seems @brecht is fine with such
explicitness, but need to agree with all active developers before committing
this.
Please note that this patch is lacking changes related on GPU/OpenCL
support. This will be solved if/when we all agree this is a good idea to move
forward.
Reviewers: brecht, lukasstockner97, maiself, nirved, dingto, juicyfruit, swerner
Reviewed By: lukasstockner97, maiself, nirved, dingto
Subscribers: brecht
Differential Revision: https://developer.blender.org/D2586
2017-03-28 20:39:14 +02:00
|
|
|
#include "util/util_debug.h"
|
|
|
|
|
#include "util/util_foreach.h"
|
|
|
|
|
#include "util/util_function.h"
|
|
|
|
|
#include "util/util_logging.h"
|
|
|
|
|
#include "util/util_map.h"
|
2020-06-01 00:11:17 +02:00
|
|
|
#include "util/util_openimagedenoise.h"
|
Cycles: Make all #include statements relative to cycles source directory
The idea is to make include statements more explicit and obvious where the
file is coming from, additionally reducing chance of wrong header being
picked up.
For example, it was not obvious whether bvh.h was refferring to builder
or traversal, whenter node.h is a generic graph node or a shader node
and cases like that.
Surely this might look obvious for the active developers, but after some
time of not touching the code it becomes less obvious where file is coming
from.
This was briefly mentioned in T50824 and seems @brecht is fine with such
explicitness, but need to agree with all active developers before committing
this.
Please note that this patch is lacking changes related on GPU/OpenCL
support. This will be solved if/when we all agree this is a good idea to move
forward.
Reviewers: brecht, lukasstockner97, maiself, nirved, dingto, juicyfruit, swerner
Reviewed By: lukasstockner97, maiself, nirved, dingto
Subscribers: brecht
Differential Revision: https://developer.blender.org/D2586
2017-03-28 20:39:14 +02:00
|
|
|
#include "util/util_opengl.h"
|
2017-08-02 02:09:08 +02:00
|
|
|
#include "util/util_optimization.h"
|
Cycles: Make all #include statements relative to cycles source directory
The idea is to make include statements more explicit and obvious where the
file is coming from, additionally reducing chance of wrong header being
picked up.
For example, it was not obvious whether bvh.h was refferring to builder
or traversal, whenter node.h is a generic graph node or a shader node
and cases like that.
Surely this might look obvious for the active developers, but after some
time of not touching the code it becomes less obvious where file is coming
from.
This was briefly mentioned in T50824 and seems @brecht is fine with such
explicitness, but need to agree with all active developers before committing
this.
Please note that this patch is lacking changes related on GPU/OpenCL
support. This will be solved if/when we all agree this is a good idea to move
forward.
Reviewers: brecht, lukasstockner97, maiself, nirved, dingto, juicyfruit, swerner
Reviewed By: lukasstockner97, maiself, nirved, dingto
Subscribers: brecht
Differential Revision: https://developer.blender.org/D2586
2017-03-28 20:39:14 +02:00
|
|
|
#include "util/util_progress.h"
|
|
|
|
|
#include "util/util_system.h"
|
2020-06-05 11:39:11 +02:00
|
|
|
#include "util/util_task.h"
|
Cycles: Make all #include statements relative to cycles source directory
The idea is to make include statements more explicit and obvious where the
file is coming from, additionally reducing chance of wrong header being
picked up.
For example, it was not obvious whether bvh.h was refferring to builder
or traversal, whenter node.h is a generic graph node or a shader node
and cases like that.
Surely this might look obvious for the active developers, but after some
time of not touching the code it becomes less obvious where file is coming
from.
This was briefly mentioned in T50824 and seems @brecht is fine with such
explicitness, but need to agree with all active developers before committing
this.
Please note that this patch is lacking changes related on GPU/OpenCL
support. This will be solved if/when we all agree this is a good idea to move
forward.
Reviewers: brecht, lukasstockner97, maiself, nirved, dingto, juicyfruit, swerner
Reviewed By: lukasstockner97, maiself, nirved, dingto
Subscribers: brecht
Differential Revision: https://developer.blender.org/D2586
2017-03-28 20:39:14 +02:00
|
|
|
#include "util/util_thread.h"
|
2011-04-27 11:58:34 +00:00
|
|
|
|
|
|
|
|
CCL_NAMESPACE_BEGIN
|
|
|
|
|
|
2017-02-14 06:20:48 -05:00
|
|
|
class CPUDevice;
|
|
|
|
|
|
2017-05-07 14:40:58 +02:00
|
|
|
/* Has to be outside of the class to be shared across template instantiations. */
|
|
|
|
|
static const char *logged_architecture = "";
|
2017-02-14 06:20:48 -05:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
template<typename F> class KernelFunctions {
|
|
|
|
|
public:
|
|
|
|
|
KernelFunctions()
|
|
|
|
|
{
|
|
|
|
|
kernel = (F)NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
KernelFunctions(
|
|
|
|
|
F kernel_default, F kernel_sse2, F kernel_sse3, F kernel_sse41, F kernel_avx, F kernel_avx2)
|
|
|
|
|
{
|
|
|
|
|
const char *architecture_name = "default";
|
|
|
|
|
kernel = kernel_default;
|
|
|
|
|
|
|
|
|
|
/* Silence potential warnings about unused variables
|
|
|
|
|
* when compiling without some architectures. */
|
|
|
|
|
(void)kernel_sse2;
|
|
|
|
|
(void)kernel_sse3;
|
|
|
|
|
(void)kernel_sse41;
|
|
|
|
|
(void)kernel_avx;
|
|
|
|
|
(void)kernel_avx2;
|
2017-02-14 06:20:48 -05:00
|
|
|
#ifdef WITH_CYCLES_OPTIMIZED_KERNEL_AVX2
|
2019-04-17 06:17:24 +02:00
|
|
|
if (DebugFlags().cpu.has_avx2() && system_cpu_support_avx2()) {
|
|
|
|
|
architecture_name = "AVX2";
|
|
|
|
|
kernel = kernel_avx2;
|
|
|
|
|
}
|
|
|
|
|
else
|
2017-02-14 06:20:48 -05:00
|
|
|
#endif
|
|
|
|
|
#ifdef WITH_CYCLES_OPTIMIZED_KERNEL_AVX
|
2019-04-17 06:17:24 +02:00
|
|
|
if (DebugFlags().cpu.has_avx() && system_cpu_support_avx()) {
|
|
|
|
|
architecture_name = "AVX";
|
|
|
|
|
kernel = kernel_avx;
|
|
|
|
|
}
|
|
|
|
|
else
|
2017-02-14 06:20:48 -05:00
|
|
|
#endif
|
|
|
|
|
#ifdef WITH_CYCLES_OPTIMIZED_KERNEL_SSE41
|
2019-04-17 06:17:24 +02:00
|
|
|
if (DebugFlags().cpu.has_sse41() && system_cpu_support_sse41()) {
|
|
|
|
|
architecture_name = "SSE4.1";
|
|
|
|
|
kernel = kernel_sse41;
|
|
|
|
|
}
|
|
|
|
|
else
|
2017-02-14 06:20:48 -05:00
|
|
|
#endif
|
|
|
|
|
#ifdef WITH_CYCLES_OPTIMIZED_KERNEL_SSE3
|
2019-04-17 06:17:24 +02:00
|
|
|
if (DebugFlags().cpu.has_sse3() && system_cpu_support_sse3()) {
|
|
|
|
|
architecture_name = "SSE3";
|
|
|
|
|
kernel = kernel_sse3;
|
|
|
|
|
}
|
|
|
|
|
else
|
2017-02-14 06:20:48 -05:00
|
|
|
#endif
|
|
|
|
|
#ifdef WITH_CYCLES_OPTIMIZED_KERNEL_SSE2
|
2019-04-17 06:17:24 +02:00
|
|
|
if (DebugFlags().cpu.has_sse2() && system_cpu_support_sse2()) {
|
|
|
|
|
architecture_name = "SSE2";
|
|
|
|
|
kernel = kernel_sse2;
|
|
|
|
|
}
|
2019-09-23 11:02:58 +02:00
|
|
|
#else
|
|
|
|
|
{
|
|
|
|
|
/* Dummy to prevent the architecture if below become
|
|
|
|
|
* conditional when WITH_CYCLES_OPTIMIZED_KERNEL_SSE2
|
|
|
|
|
* is not defined. */
|
|
|
|
|
}
|
2017-02-14 06:20:48 -05:00
|
|
|
#endif
|
2017-05-07 14:40:58 +02:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
if (strcmp(architecture_name, logged_architecture) != 0) {
|
|
|
|
|
VLOG(1) << "Will be using " << architecture_name << " kernels.";
|
|
|
|
|
logged_architecture = architecture_name;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
inline F operator()() const
|
|
|
|
|
{
|
|
|
|
|
assert(kernel);
|
|
|
|
|
return kernel;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
protected:
|
|
|
|
|
F kernel;
|
2017-05-07 14:40:58 +02:00
|
|
|
};
|
2017-02-14 06:20:48 -05:00
|
|
|
|
2017-05-07 14:40:58 +02:00
|
|
|
class CPUSplitKernel : public DeviceSplitKernel {
|
2019-04-17 06:17:24 +02:00
|
|
|
CPUDevice *device;
|
|
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
explicit CPUSplitKernel(CPUDevice *device);
|
|
|
|
|
|
|
|
|
|
virtual bool enqueue_split_kernel_data_init(const KernelDimensions &dim,
|
|
|
|
|
RenderTile &rtile,
|
|
|
|
|
int num_global_elements,
|
|
|
|
|
device_memory &kernel_globals,
|
|
|
|
|
device_memory &kernel_data_,
|
|
|
|
|
device_memory &split_data,
|
|
|
|
|
device_memory &ray_state,
|
|
|
|
|
device_memory &queue_index,
|
|
|
|
|
device_memory &use_queues_flag,
|
|
|
|
|
device_memory &work_pool_wgs);
|
|
|
|
|
|
|
|
|
|
virtual SplitKernelFunction *get_split_kernel_function(const string &kernel_name,
|
|
|
|
|
const DeviceRequestedFeatures &);
|
|
|
|
|
virtual int2 split_kernel_local_size();
|
2020-06-05 11:39:11 +02:00
|
|
|
virtual int2 split_kernel_global_size(device_memory &kg, device_memory &data, DeviceTask &task);
|
2019-04-17 06:17:24 +02:00
|
|
|
virtual uint64_t state_buffer_size(device_memory &kg, device_memory &data, size_t num_threads);
|
2017-05-07 14:40:58 +02:00
|
|
|
};
|
2017-02-14 06:20:48 -05:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
class CPUDevice : public Device {
|
|
|
|
|
public:
|
|
|
|
|
TaskPool task_pool;
|
|
|
|
|
KernelGlobals kernel_globals;
|
2013-12-07 02:29:53 +01:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
device_vector<TextureInfo> texture_info;
|
|
|
|
|
bool need_texture_info;
|
2017-10-06 21:47:41 +02:00
|
|
|
|
2012-12-01 19:15:05 +00:00
|
|
|
#ifdef WITH_OSL
|
2019-05-14 15:05:24 +02:00
|
|
|
OSLGlobals osl_globals;
|
2012-12-01 19:15:05 +00:00
|
|
|
#endif
|
2020-06-01 00:11:17 +02:00
|
|
|
#ifdef WITH_OPENIMAGEDENOISE
|
|
|
|
|
oidn::DeviceRef oidn_device;
|
|
|
|
|
oidn::FilterRef oidn_filter;
|
|
|
|
|
#endif
|
2020-07-10 20:00:20 +02:00
|
|
|
thread_spin_lock oidn_task_lock;
|
2017-02-14 06:20:48 -05:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
bool use_split_kernel;
|
|
|
|
|
|
|
|
|
|
DeviceRequestedFeatures requested_features;
|
|
|
|
|
|
|
|
|
|
KernelFunctions<void (*)(KernelGlobals *, float *, int, int, int, int, int)> path_trace_kernel;
|
|
|
|
|
KernelFunctions<void (*)(KernelGlobals *, uchar4 *, float *, float, int, int, int, int)>
|
|
|
|
|
convert_to_half_float_kernel;
|
|
|
|
|
KernelFunctions<void (*)(KernelGlobals *, uchar4 *, float *, float, int, int, int, int)>
|
|
|
|
|
convert_to_byte_kernel;
|
|
|
|
|
KernelFunctions<void (*)(KernelGlobals *, uint4 *, float4 *, int, int, int, int, int)>
|
|
|
|
|
shader_kernel;
|
Cycles: code refactor to bake using regular render session and tiles
There should be no user visible change from this, except that tile size
now affects performance. The goal here is to simplify bake denoising in
D3099, letting it reuse more denoising tiles and pass code.
A lot of code is now shared with regular rendering, with the two main
differences being that we read some render result passes from the bake API
when starting to render a tile, and call the bake kernel instead of the
path trace kernel.
With this kind of design where Cycles asks for tiles from the bake API,
it should eventually be easier to reduce memory usage, show tiles as
they are baked, or bake multiple passes at once, though there's still
quite some work needed for that.
Reviewers: #cycles
Subscribers: monio, wmatyjewicz, lukasstockner97, michaelknubben
Differential Revision: https://developer.blender.org/D3108
2019-05-10 21:39:58 +02:00
|
|
|
KernelFunctions<void (*)(KernelGlobals *, float *, int, int, int, int, int)> bake_kernel;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
|
|
|
|
KernelFunctions<void (*)(
|
|
|
|
|
int, TileInfo *, int, int, float *, float *, float *, float *, float *, int *, int, int)>
|
|
|
|
|
filter_divide_shadow_kernel;
|
|
|
|
|
KernelFunctions<void (*)(
|
|
|
|
|
int, TileInfo *, int, int, int, int, float *, float *, float, int *, int, int)>
|
|
|
|
|
filter_get_feature_kernel;
|
|
|
|
|
KernelFunctions<void (*)(int, int, int, int *, float *, float *, int, int *)>
|
|
|
|
|
filter_write_feature_kernel;
|
|
|
|
|
KernelFunctions<void (*)(int, int, float *, float *, float *, float *, int *, int)>
|
|
|
|
|
filter_detect_outliers_kernel;
|
|
|
|
|
KernelFunctions<void (*)(int, int, float *, float *, float *, float *, int *, int)>
|
|
|
|
|
filter_combine_halves_kernel;
|
|
|
|
|
|
|
|
|
|
KernelFunctions<void (*)(
|
|
|
|
|
int, int, float *, float *, float *, float *, int *, int, int, int, float, float)>
|
|
|
|
|
filter_nlm_calc_difference_kernel;
|
|
|
|
|
KernelFunctions<void (*)(float *, float *, int *, int, int)> filter_nlm_blur_kernel;
|
|
|
|
|
KernelFunctions<void (*)(float *, float *, int *, int, int)> filter_nlm_calc_weight_kernel;
|
|
|
|
|
KernelFunctions<void (*)(
|
|
|
|
|
int, int, float *, float *, float *, float *, float *, int *, int, int, int)>
|
|
|
|
|
filter_nlm_update_output_kernel;
|
|
|
|
|
KernelFunctions<void (*)(float *, float *, int *, int)> filter_nlm_normalize_kernel;
|
|
|
|
|
|
|
|
|
|
KernelFunctions<void (*)(
|
|
|
|
|
float *, TileInfo *, int, int, int, float *, int *, int *, int, int, bool, int, float)>
|
|
|
|
|
filter_construct_transform_kernel;
|
|
|
|
|
KernelFunctions<void (*)(int,
|
|
|
|
|
int,
|
|
|
|
|
int,
|
|
|
|
|
float *,
|
|
|
|
|
float *,
|
|
|
|
|
float *,
|
|
|
|
|
int *,
|
|
|
|
|
float *,
|
|
|
|
|
float3 *,
|
|
|
|
|
int *,
|
|
|
|
|
int *,
|
|
|
|
|
int,
|
|
|
|
|
int,
|
|
|
|
|
int,
|
|
|
|
|
int,
|
|
|
|
|
bool)>
|
|
|
|
|
filter_nlm_construct_gramian_kernel;
|
|
|
|
|
KernelFunctions<void (*)(int, int, int, float *, int *, float *, float3 *, int *, int)>
|
|
|
|
|
filter_finalize_kernel;
|
|
|
|
|
|
|
|
|
|
KernelFunctions<void (*)(KernelGlobals *,
|
|
|
|
|
ccl_constant KernelData *,
|
|
|
|
|
ccl_global void *,
|
|
|
|
|
int,
|
|
|
|
|
ccl_global char *,
|
|
|
|
|
int,
|
|
|
|
|
int,
|
|
|
|
|
int,
|
|
|
|
|
int,
|
|
|
|
|
int,
|
|
|
|
|
int,
|
|
|
|
|
int,
|
|
|
|
|
int,
|
|
|
|
|
ccl_global int *,
|
|
|
|
|
int,
|
|
|
|
|
ccl_global char *,
|
|
|
|
|
ccl_global unsigned int *,
|
|
|
|
|
unsigned int,
|
|
|
|
|
ccl_global float *)>
|
|
|
|
|
data_init_kernel;
|
|
|
|
|
unordered_map<string, KernelFunctions<void (*)(KernelGlobals *, KernelData *)>> split_kernels;
|
2017-05-07 14:40:58 +02:00
|
|
|
|
|
|
|
|
#define KERNEL_FUNCTIONS(name) \
|
2019-04-17 06:17:24 +02:00
|
|
|
KERNEL_NAME_EVAL(cpu, name), KERNEL_NAME_EVAL(cpu_sse2, name), \
|
|
|
|
|
KERNEL_NAME_EVAL(cpu_sse3, name), KERNEL_NAME_EVAL(cpu_sse41, name), \
|
|
|
|
|
KERNEL_NAME_EVAL(cpu_avx, name), KERNEL_NAME_EVAL(cpu_avx2, name)
|
|
|
|
|
|
|
|
|
|
CPUDevice(DeviceInfo &info_, Stats &stats_, Profiler &profiler_, bool background_)
|
|
|
|
|
: Device(info_, stats_, profiler_, background_),
|
2020-03-12 15:22:18 +01:00
|
|
|
texture_info(this, "__texture_info", MEM_GLOBAL),
|
2019-04-17 06:17:24 +02:00
|
|
|
#define REGISTER_KERNEL(name) name##_kernel(KERNEL_FUNCTIONS(name))
|
|
|
|
|
REGISTER_KERNEL(path_trace),
|
|
|
|
|
REGISTER_KERNEL(convert_to_half_float),
|
|
|
|
|
REGISTER_KERNEL(convert_to_byte),
|
|
|
|
|
REGISTER_KERNEL(shader),
|
Cycles: code refactor to bake using regular render session and tiles
There should be no user visible change from this, except that tile size
now affects performance. The goal here is to simplify bake denoising in
D3099, letting it reuse more denoising tiles and pass code.
A lot of code is now shared with regular rendering, with the two main
differences being that we read some render result passes from the bake API
when starting to render a tile, and call the bake kernel instead of the
path trace kernel.
With this kind of design where Cycles asks for tiles from the bake API,
it should eventually be easier to reduce memory usage, show tiles as
they are baked, or bake multiple passes at once, though there's still
quite some work needed for that.
Reviewers: #cycles
Subscribers: monio, wmatyjewicz, lukasstockner97, michaelknubben
Differential Revision: https://developer.blender.org/D3108
2019-05-10 21:39:58 +02:00
|
|
|
REGISTER_KERNEL(bake),
|
2019-04-17 06:17:24 +02:00
|
|
|
REGISTER_KERNEL(filter_divide_shadow),
|
|
|
|
|
REGISTER_KERNEL(filter_get_feature),
|
|
|
|
|
REGISTER_KERNEL(filter_write_feature),
|
|
|
|
|
REGISTER_KERNEL(filter_detect_outliers),
|
|
|
|
|
REGISTER_KERNEL(filter_combine_halves),
|
|
|
|
|
REGISTER_KERNEL(filter_nlm_calc_difference),
|
|
|
|
|
REGISTER_KERNEL(filter_nlm_blur),
|
|
|
|
|
REGISTER_KERNEL(filter_nlm_calc_weight),
|
|
|
|
|
REGISTER_KERNEL(filter_nlm_update_output),
|
|
|
|
|
REGISTER_KERNEL(filter_nlm_normalize),
|
|
|
|
|
REGISTER_KERNEL(filter_construct_transform),
|
|
|
|
|
REGISTER_KERNEL(filter_nlm_construct_gramian),
|
|
|
|
|
REGISTER_KERNEL(filter_finalize),
|
|
|
|
|
REGISTER_KERNEL(data_init)
|
2017-05-07 14:40:58 +02:00
|
|
|
#undef REGISTER_KERNEL
|
2019-04-17 06:17:24 +02:00
|
|
|
{
|
|
|
|
|
if (info.cpu_threads == 0) {
|
|
|
|
|
info.cpu_threads = TaskScheduler::num_threads();
|
|
|
|
|
}
|
2017-02-14 06:20:48 -05:00
|
|
|
|
2012-12-01 19:15:05 +00:00
|
|
|
#ifdef WITH_OSL
|
2019-05-14 15:05:24 +02:00
|
|
|
kernel_globals.osl = &osl_globals;
|
2012-12-01 19:15:05 +00:00
|
|
|
#endif
|
2019-04-17 06:17:24 +02:00
|
|
|
use_split_kernel = DebugFlags().cpu.split_kernel;
|
|
|
|
|
if (use_split_kernel) {
|
|
|
|
|
VLOG(1) << "Will be using split kernel.";
|
|
|
|
|
}
|
|
|
|
|
need_texture_info = false;
|
|
|
|
|
|
|
|
|
|
#define REGISTER_SPLIT_KERNEL(name) \
|
|
|
|
|
split_kernels[#name] = KernelFunctions<void (*)(KernelGlobals *, KernelData *)>( \
|
|
|
|
|
KERNEL_FUNCTIONS(name))
|
|
|
|
|
REGISTER_SPLIT_KERNEL(path_init);
|
|
|
|
|
REGISTER_SPLIT_KERNEL(scene_intersect);
|
|
|
|
|
REGISTER_SPLIT_KERNEL(lamp_emission);
|
|
|
|
|
REGISTER_SPLIT_KERNEL(do_volume);
|
|
|
|
|
REGISTER_SPLIT_KERNEL(queue_enqueue);
|
|
|
|
|
REGISTER_SPLIT_KERNEL(indirect_background);
|
|
|
|
|
REGISTER_SPLIT_KERNEL(shader_setup);
|
|
|
|
|
REGISTER_SPLIT_KERNEL(shader_sort);
|
|
|
|
|
REGISTER_SPLIT_KERNEL(shader_eval);
|
|
|
|
|
REGISTER_SPLIT_KERNEL(holdout_emission_blurring_pathtermination_ao);
|
|
|
|
|
REGISTER_SPLIT_KERNEL(subsurface_scatter);
|
|
|
|
|
REGISTER_SPLIT_KERNEL(direct_lighting);
|
|
|
|
|
REGISTER_SPLIT_KERNEL(shadow_blocked_ao);
|
|
|
|
|
REGISTER_SPLIT_KERNEL(shadow_blocked_dl);
|
|
|
|
|
REGISTER_SPLIT_KERNEL(enqueue_inactive);
|
|
|
|
|
REGISTER_SPLIT_KERNEL(next_iteration_setup);
|
|
|
|
|
REGISTER_SPLIT_KERNEL(indirect_subsurface);
|
|
|
|
|
REGISTER_SPLIT_KERNEL(buffer_update);
|
2020-03-05 12:05:42 +01:00
|
|
|
REGISTER_SPLIT_KERNEL(adaptive_stopping);
|
|
|
|
|
REGISTER_SPLIT_KERNEL(adaptive_filter_x);
|
|
|
|
|
REGISTER_SPLIT_KERNEL(adaptive_filter_y);
|
|
|
|
|
REGISTER_SPLIT_KERNEL(adaptive_adjust_samples);
|
2017-05-07 14:40:58 +02:00
|
|
|
#undef REGISTER_SPLIT_KERNEL
|
|
|
|
|
#undef KERNEL_FUNCTIONS
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
~CPUDevice()
|
|
|
|
|
{
|
2020-06-05 14:36:31 +02:00
|
|
|
task_pool.cancel();
|
2019-04-17 06:17:24 +02:00
|
|
|
texture_info.free();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
virtual bool show_samples() const
|
|
|
|
|
{
|
|
|
|
|
return (info.cpu_threads == 1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
virtual BVHLayoutMask get_bvh_layout_mask() const
|
|
|
|
|
{
|
|
|
|
|
BVHLayoutMask bvh_layout_mask = BVH_LAYOUT_BVH2;
|
2018-11-07 12:58:12 +01:00
|
|
|
#ifdef WITH_EMBREE
|
2019-04-17 06:17:24 +02:00
|
|
|
bvh_layout_mask |= BVH_LAYOUT_EMBREE;
|
|
|
|
|
#endif /* WITH_EMBREE */
|
|
|
|
|
return bvh_layout_mask;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void load_texture_info()
|
|
|
|
|
{
|
|
|
|
|
if (need_texture_info) {
|
|
|
|
|
texture_info.copy_to_device();
|
|
|
|
|
need_texture_info = false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void mem_alloc(device_memory &mem)
|
|
|
|
|
{
|
|
|
|
|
if (mem.type == MEM_TEXTURE) {
|
|
|
|
|
assert(!"mem_alloc not supported for textures.");
|
|
|
|
|
}
|
2020-03-12 15:22:18 +01:00
|
|
|
else if (mem.type == MEM_GLOBAL) {
|
|
|
|
|
assert(!"mem_alloc not supported for global memory.");
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
else {
|
|
|
|
|
if (mem.name) {
|
|
|
|
|
VLOG(1) << "Buffer allocate: " << mem.name << ", "
|
|
|
|
|
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
|
|
|
|
|
<< string_human_readable_size(mem.memory_size()) << ")";
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (mem.type == MEM_DEVICE_ONLY) {
|
|
|
|
|
assert(!mem.host_pointer);
|
|
|
|
|
size_t alignment = MIN_ALIGNMENT_CPU_DATA_TYPES;
|
|
|
|
|
void *data = util_aligned_malloc(mem.memory_size(), alignment);
|
|
|
|
|
mem.device_pointer = (device_ptr)data;
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
mem.device_pointer = (device_ptr)mem.host_pointer;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
mem.device_size = mem.memory_size();
|
|
|
|
|
stats.mem_alloc(mem.device_size);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void mem_copy_to(device_memory &mem)
|
|
|
|
|
{
|
2020-03-12 15:22:18 +01:00
|
|
|
if (mem.type == MEM_GLOBAL) {
|
|
|
|
|
global_free(mem);
|
|
|
|
|
global_alloc(mem);
|
|
|
|
|
}
|
|
|
|
|
else if (mem.type == MEM_TEXTURE) {
|
|
|
|
|
tex_free((device_texture &)mem);
|
|
|
|
|
tex_alloc((device_texture &)mem);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
else if (mem.type == MEM_PIXELS) {
|
|
|
|
|
assert(!"mem_copy_to not supported for pixels.");
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
if (!mem.device_pointer) {
|
|
|
|
|
mem_alloc(mem);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* copy is no-op */
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void mem_copy_from(device_memory & /*mem*/, int /*y*/, int /*w*/, int /*h*/, int /*elem*/)
|
|
|
|
|
{
|
|
|
|
|
/* no-op */
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void mem_zero(device_memory &mem)
|
|
|
|
|
{
|
|
|
|
|
if (!mem.device_pointer) {
|
|
|
|
|
mem_alloc(mem);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (mem.device_pointer) {
|
|
|
|
|
memset((void *)mem.device_pointer, 0, mem.memory_size());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void mem_free(device_memory &mem)
|
|
|
|
|
{
|
2020-03-12 15:22:18 +01:00
|
|
|
if (mem.type == MEM_GLOBAL) {
|
|
|
|
|
global_free(mem);
|
|
|
|
|
}
|
|
|
|
|
else if (mem.type == MEM_TEXTURE) {
|
|
|
|
|
tex_free((device_texture &)mem);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
else if (mem.device_pointer) {
|
|
|
|
|
if (mem.type == MEM_DEVICE_ONLY) {
|
|
|
|
|
util_aligned_free((void *)mem.device_pointer);
|
|
|
|
|
}
|
|
|
|
|
mem.device_pointer = 0;
|
|
|
|
|
stats.mem_free(mem.device_size);
|
|
|
|
|
mem.device_size = 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
virtual device_ptr mem_alloc_sub_ptr(device_memory &mem, int offset, int /*size*/)
|
|
|
|
|
{
|
|
|
|
|
return (device_ptr)(((char *)mem.device_pointer) + mem.memory_elements_size(offset));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void const_copy_to(const char *name, void *host, size_t size)
|
|
|
|
|
{
|
|
|
|
|
kernel_const_copy(&kernel_globals, name, host, size);
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-12 15:22:18 +01:00
|
|
|
void global_alloc(device_memory &mem)
|
2019-04-17 06:17:24 +02:00
|
|
|
{
|
2020-03-12 15:22:18 +01:00
|
|
|
VLOG(1) << "Global memory allocate: " << mem.name << ", "
|
2019-04-17 06:17:24 +02:00
|
|
|
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
|
|
|
|
|
<< string_human_readable_size(mem.memory_size()) << ")";
|
|
|
|
|
|
2020-03-12 15:22:18 +01:00
|
|
|
kernel_global_memory_copy(&kernel_globals, mem.name, mem.host_pointer, mem.data_size);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-03-12 15:22:18 +01:00
|
|
|
mem.device_pointer = (device_ptr)mem.host_pointer;
|
|
|
|
|
mem.device_size = mem.memory_size();
|
|
|
|
|
stats.mem_alloc(mem.device_size);
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-03-12 15:22:18 +01:00
|
|
|
void global_free(device_memory &mem)
|
|
|
|
|
{
|
|
|
|
|
if (mem.device_pointer) {
|
|
|
|
|
mem.device_pointer = 0;
|
|
|
|
|
stats.mem_free(mem.device_size);
|
|
|
|
|
mem.device_size = 0;
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2020-03-12 15:22:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void tex_alloc(device_texture &mem)
|
|
|
|
|
{
|
|
|
|
|
VLOG(1) << "Texture allocate: " << mem.name << ", "
|
|
|
|
|
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
|
|
|
|
|
<< string_human_readable_size(mem.memory_size()) << ")";
|
2019-04-17 06:17:24 +02:00
|
|
|
|
|
|
|
|
mem.device_pointer = (device_ptr)mem.host_pointer;
|
|
|
|
|
mem.device_size = mem.memory_size();
|
|
|
|
|
stats.mem_alloc(mem.device_size);
|
2020-03-12 15:22:18 +01:00
|
|
|
|
|
|
|
|
const uint slot = mem.slot;
|
|
|
|
|
if (slot >= texture_info.size()) {
|
|
|
|
|
/* Allocate some slots in advance, to reduce amount of re-allocations. */
|
|
|
|
|
texture_info.resize(slot + 128);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
texture_info[slot] = mem.info;
|
|
|
|
|
texture_info[slot].data = (uint64_t)mem.host_pointer;
|
|
|
|
|
need_texture_info = true;
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
|
2020-03-12 15:22:18 +01:00
|
|
|
void tex_free(device_texture &mem)
|
2019-04-17 06:17:24 +02:00
|
|
|
{
|
|
|
|
|
if (mem.device_pointer) {
|
|
|
|
|
mem.device_pointer = 0;
|
|
|
|
|
stats.mem_free(mem.device_size);
|
|
|
|
|
mem.device_size = 0;
|
|
|
|
|
need_texture_info = true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void *osl_memory()
|
|
|
|
|
{
|
2011-04-27 11:58:34 +00:00
|
|
|
#ifdef WITH_OSL
|
2019-05-14 15:05:24 +02:00
|
|
|
return &osl_globals;
|
2011-04-27 11:58:34 +00:00
|
|
|
#else
|
2019-04-17 06:17:24 +02:00
|
|
|
return NULL;
|
2011-04-27 11:58:34 +00:00
|
|
|
#endif
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
|
2020-06-05 11:39:11 +02:00
|
|
|
void thread_run(DeviceTask &task)
|
2019-04-17 06:17:24 +02:00
|
|
|
{
|
2020-06-05 11:39:11 +02:00
|
|
|
if (task.type == DeviceTask::RENDER)
|
|
|
|
|
thread_render(task);
|
|
|
|
|
else if (task.type == DeviceTask::SHADER)
|
|
|
|
|
thread_shader(task);
|
|
|
|
|
else if (task.type == DeviceTask::FILM_CONVERT)
|
|
|
|
|
thread_film_convert(task);
|
|
|
|
|
else if (task.type == DeviceTask::DENOISE_BUFFER)
|
|
|
|
|
thread_denoise(task);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool denoising_non_local_means(device_ptr image_ptr,
|
|
|
|
|
device_ptr guide_ptr,
|
|
|
|
|
device_ptr variance_ptr,
|
|
|
|
|
device_ptr out_ptr,
|
|
|
|
|
DenoisingTask *task)
|
|
|
|
|
{
|
|
|
|
|
ProfilingHelper profiling(task->profiler, PROFILING_DENOISING_NON_LOCAL_MEANS);
|
|
|
|
|
|
|
|
|
|
int4 rect = task->rect;
|
|
|
|
|
int r = task->nlm_state.r;
|
|
|
|
|
int f = task->nlm_state.f;
|
|
|
|
|
float a = task->nlm_state.a;
|
|
|
|
|
float k_2 = task->nlm_state.k_2;
|
|
|
|
|
|
|
|
|
|
int w = align_up(rect.z - rect.x, 4);
|
|
|
|
|
int h = rect.w - rect.y;
|
|
|
|
|
int stride = task->buffer.stride;
|
|
|
|
|
int channel_offset = task->nlm_state.is_color ? task->buffer.pass_stride : 0;
|
|
|
|
|
|
|
|
|
|
float *temporary_mem = (float *)task->buffer.temporary_mem.device_pointer;
|
|
|
|
|
float *blurDifference = temporary_mem;
|
|
|
|
|
float *difference = temporary_mem + task->buffer.pass_stride;
|
|
|
|
|
float *weightAccum = temporary_mem + 2 * task->buffer.pass_stride;
|
|
|
|
|
|
|
|
|
|
memset(weightAccum, 0, sizeof(float) * w * h);
|
|
|
|
|
memset((float *)out_ptr, 0, sizeof(float) * w * h);
|
|
|
|
|
|
|
|
|
|
for (int i = 0; i < (2 * r + 1) * (2 * r + 1); i++) {
|
|
|
|
|
int dy = i / (2 * r + 1) - r;
|
|
|
|
|
int dx = i % (2 * r + 1) - r;
|
|
|
|
|
|
|
|
|
|
int local_rect[4] = {
|
|
|
|
|
max(0, -dx), max(0, -dy), rect.z - rect.x - max(0, dx), rect.w - rect.y - max(0, dy)};
|
|
|
|
|
filter_nlm_calc_difference_kernel()(dx,
|
|
|
|
|
dy,
|
|
|
|
|
(float *)guide_ptr,
|
|
|
|
|
(float *)variance_ptr,
|
|
|
|
|
NULL,
|
|
|
|
|
difference,
|
|
|
|
|
local_rect,
|
|
|
|
|
w,
|
|
|
|
|
channel_offset,
|
|
|
|
|
0,
|
|
|
|
|
a,
|
|
|
|
|
k_2);
|
|
|
|
|
|
|
|
|
|
filter_nlm_blur_kernel()(difference, blurDifference, local_rect, w, f);
|
|
|
|
|
filter_nlm_calc_weight_kernel()(blurDifference, difference, local_rect, w, f);
|
|
|
|
|
filter_nlm_blur_kernel()(difference, blurDifference, local_rect, w, f);
|
|
|
|
|
|
|
|
|
|
filter_nlm_update_output_kernel()(dx,
|
|
|
|
|
dy,
|
|
|
|
|
blurDifference,
|
|
|
|
|
(float *)image_ptr,
|
|
|
|
|
difference,
|
|
|
|
|
(float *)out_ptr,
|
|
|
|
|
weightAccum,
|
|
|
|
|
local_rect,
|
|
|
|
|
channel_offset,
|
|
|
|
|
stride,
|
|
|
|
|
f);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int local_rect[4] = {0, 0, rect.z - rect.x, rect.w - rect.y};
|
|
|
|
|
filter_nlm_normalize_kernel()((float *)out_ptr, weightAccum, local_rect, w);
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool denoising_construct_transform(DenoisingTask *task)
|
|
|
|
|
{
|
|
|
|
|
ProfilingHelper profiling(task->profiler, PROFILING_DENOISING_CONSTRUCT_TRANSFORM);
|
|
|
|
|
|
|
|
|
|
for (int y = 0; y < task->filter_area.w; y++) {
|
|
|
|
|
for (int x = 0; x < task->filter_area.z; x++) {
|
|
|
|
|
filter_construct_transform_kernel()((float *)task->buffer.mem.device_pointer,
|
|
|
|
|
task->tile_info,
|
|
|
|
|
x + task->filter_area.x,
|
|
|
|
|
y + task->filter_area.y,
|
|
|
|
|
y * task->filter_area.z + x,
|
|
|
|
|
(float *)task->storage.transform.device_pointer,
|
|
|
|
|
(int *)task->storage.rank.device_pointer,
|
|
|
|
|
&task->rect.x,
|
|
|
|
|
task->buffer.pass_stride,
|
|
|
|
|
task->buffer.frame_stride,
|
|
|
|
|
task->buffer.use_time,
|
|
|
|
|
task->radius,
|
|
|
|
|
task->pca_threshold);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool denoising_accumulate(device_ptr color_ptr,
|
|
|
|
|
device_ptr color_variance_ptr,
|
|
|
|
|
device_ptr scale_ptr,
|
|
|
|
|
int frame,
|
|
|
|
|
DenoisingTask *task)
|
|
|
|
|
{
|
|
|
|
|
ProfilingHelper profiling(task->profiler, PROFILING_DENOISING_RECONSTRUCT);
|
|
|
|
|
|
|
|
|
|
float *temporary_mem = (float *)task->buffer.temporary_mem.device_pointer;
|
|
|
|
|
float *difference = temporary_mem;
|
|
|
|
|
float *blurDifference = temporary_mem + task->buffer.pass_stride;
|
|
|
|
|
|
|
|
|
|
int r = task->radius;
|
|
|
|
|
int frame_offset = frame * task->buffer.frame_stride;
|
|
|
|
|
for (int i = 0; i < (2 * r + 1) * (2 * r + 1); i++) {
|
|
|
|
|
int dy = i / (2 * r + 1) - r;
|
|
|
|
|
int dx = i % (2 * r + 1) - r;
|
|
|
|
|
|
|
|
|
|
int local_rect[4] = {max(0, -dx),
|
|
|
|
|
max(0, -dy),
|
|
|
|
|
task->reconstruction_state.source_w - max(0, dx),
|
|
|
|
|
task->reconstruction_state.source_h - max(0, dy)};
|
|
|
|
|
filter_nlm_calc_difference_kernel()(dx,
|
|
|
|
|
dy,
|
|
|
|
|
(float *)color_ptr,
|
|
|
|
|
(float *)color_variance_ptr,
|
|
|
|
|
(float *)scale_ptr,
|
|
|
|
|
difference,
|
|
|
|
|
local_rect,
|
|
|
|
|
task->buffer.stride,
|
|
|
|
|
task->buffer.pass_stride,
|
|
|
|
|
frame_offset,
|
|
|
|
|
1.0f,
|
|
|
|
|
task->nlm_k_2);
|
|
|
|
|
filter_nlm_blur_kernel()(difference, blurDifference, local_rect, task->buffer.stride, 4);
|
|
|
|
|
filter_nlm_calc_weight_kernel()(
|
|
|
|
|
blurDifference, difference, local_rect, task->buffer.stride, 4);
|
|
|
|
|
filter_nlm_blur_kernel()(difference, blurDifference, local_rect, task->buffer.stride, 4);
|
|
|
|
|
filter_nlm_construct_gramian_kernel()(dx,
|
|
|
|
|
dy,
|
|
|
|
|
task->tile_info->frames[frame],
|
|
|
|
|
blurDifference,
|
|
|
|
|
(float *)task->buffer.mem.device_pointer,
|
|
|
|
|
(float *)task->storage.transform.device_pointer,
|
|
|
|
|
(int *)task->storage.rank.device_pointer,
|
|
|
|
|
(float *)task->storage.XtWX.device_pointer,
|
|
|
|
|
(float3 *)task->storage.XtWY.device_pointer,
|
|
|
|
|
local_rect,
|
|
|
|
|
&task->reconstruction_state.filter_window.x,
|
|
|
|
|
task->buffer.stride,
|
|
|
|
|
4,
|
|
|
|
|
task->buffer.pass_stride,
|
|
|
|
|
frame_offset,
|
|
|
|
|
task->buffer.use_time);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool denoising_solve(device_ptr output_ptr, DenoisingTask *task)
|
|
|
|
|
{
|
|
|
|
|
for (int y = 0; y < task->filter_area.w; y++) {
|
|
|
|
|
for (int x = 0; x < task->filter_area.z; x++) {
|
|
|
|
|
filter_finalize_kernel()(x,
|
|
|
|
|
y,
|
|
|
|
|
y * task->filter_area.z + x,
|
|
|
|
|
(float *)output_ptr,
|
|
|
|
|
(int *)task->storage.rank.device_pointer,
|
|
|
|
|
(float *)task->storage.XtWX.device_pointer,
|
|
|
|
|
(float3 *)task->storage.XtWY.device_pointer,
|
|
|
|
|
&task->reconstruction_state.buffer_params.x,
|
|
|
|
|
task->render_buffer.samples);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool denoising_combine_halves(device_ptr a_ptr,
|
|
|
|
|
device_ptr b_ptr,
|
|
|
|
|
device_ptr mean_ptr,
|
|
|
|
|
device_ptr variance_ptr,
|
|
|
|
|
int r,
|
|
|
|
|
int4 rect,
|
|
|
|
|
DenoisingTask *task)
|
|
|
|
|
{
|
|
|
|
|
ProfilingHelper profiling(task->profiler, PROFILING_DENOISING_COMBINE_HALVES);
|
|
|
|
|
|
|
|
|
|
for (int y = rect.y; y < rect.w; y++) {
|
|
|
|
|
for (int x = rect.x; x < rect.z; x++) {
|
|
|
|
|
filter_combine_halves_kernel()(x,
|
|
|
|
|
y,
|
|
|
|
|
(float *)mean_ptr,
|
|
|
|
|
(float *)variance_ptr,
|
|
|
|
|
(float *)a_ptr,
|
|
|
|
|
(float *)b_ptr,
|
|
|
|
|
&rect.x,
|
|
|
|
|
r);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool denoising_divide_shadow(device_ptr a_ptr,
|
|
|
|
|
device_ptr b_ptr,
|
|
|
|
|
device_ptr sample_variance_ptr,
|
|
|
|
|
device_ptr sv_variance_ptr,
|
|
|
|
|
device_ptr buffer_variance_ptr,
|
|
|
|
|
DenoisingTask *task)
|
|
|
|
|
{
|
|
|
|
|
ProfilingHelper profiling(task->profiler, PROFILING_DENOISING_DIVIDE_SHADOW);
|
|
|
|
|
|
|
|
|
|
for (int y = task->rect.y; y < task->rect.w; y++) {
|
|
|
|
|
for (int x = task->rect.x; x < task->rect.z; x++) {
|
|
|
|
|
filter_divide_shadow_kernel()(task->render_buffer.samples,
|
|
|
|
|
task->tile_info,
|
|
|
|
|
x,
|
|
|
|
|
y,
|
|
|
|
|
(float *)a_ptr,
|
|
|
|
|
(float *)b_ptr,
|
|
|
|
|
(float *)sample_variance_ptr,
|
|
|
|
|
(float *)sv_variance_ptr,
|
|
|
|
|
(float *)buffer_variance_ptr,
|
|
|
|
|
&task->rect.x,
|
|
|
|
|
task->render_buffer.pass_stride,
|
|
|
|
|
task->render_buffer.offset);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool denoising_get_feature(int mean_offset,
|
|
|
|
|
int variance_offset,
|
|
|
|
|
device_ptr mean_ptr,
|
|
|
|
|
device_ptr variance_ptr,
|
|
|
|
|
float scale,
|
|
|
|
|
DenoisingTask *task)
|
|
|
|
|
{
|
|
|
|
|
ProfilingHelper profiling(task->profiler, PROFILING_DENOISING_GET_FEATURE);
|
|
|
|
|
|
|
|
|
|
for (int y = task->rect.y; y < task->rect.w; y++) {
|
|
|
|
|
for (int x = task->rect.x; x < task->rect.z; x++) {
|
|
|
|
|
filter_get_feature_kernel()(task->render_buffer.samples,
|
|
|
|
|
task->tile_info,
|
|
|
|
|
mean_offset,
|
|
|
|
|
variance_offset,
|
|
|
|
|
x,
|
|
|
|
|
y,
|
|
|
|
|
(float *)mean_ptr,
|
|
|
|
|
(float *)variance_ptr,
|
|
|
|
|
scale,
|
|
|
|
|
&task->rect.x,
|
|
|
|
|
task->render_buffer.pass_stride,
|
|
|
|
|
task->render_buffer.offset);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool denoising_write_feature(int out_offset,
|
|
|
|
|
device_ptr from_ptr,
|
|
|
|
|
device_ptr buffer_ptr,
|
|
|
|
|
DenoisingTask *task)
|
|
|
|
|
{
|
|
|
|
|
for (int y = 0; y < task->filter_area.w; y++) {
|
|
|
|
|
for (int x = 0; x < task->filter_area.z; x++) {
|
|
|
|
|
filter_write_feature_kernel()(task->render_buffer.samples,
|
|
|
|
|
x + task->filter_area.x,
|
|
|
|
|
y + task->filter_area.y,
|
|
|
|
|
&task->reconstruction_state.buffer_params.x,
|
|
|
|
|
(float *)from_ptr,
|
|
|
|
|
(float *)buffer_ptr,
|
|
|
|
|
out_offset,
|
|
|
|
|
&task->rect.x);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool denoising_detect_outliers(device_ptr image_ptr,
|
|
|
|
|
device_ptr variance_ptr,
|
|
|
|
|
device_ptr depth_ptr,
|
|
|
|
|
device_ptr output_ptr,
|
|
|
|
|
DenoisingTask *task)
|
|
|
|
|
{
|
|
|
|
|
ProfilingHelper profiling(task->profiler, PROFILING_DENOISING_DETECT_OUTLIERS);
|
|
|
|
|
|
|
|
|
|
for (int y = task->rect.y; y < task->rect.w; y++) {
|
|
|
|
|
for (int x = task->rect.x; x < task->rect.z; x++) {
|
|
|
|
|
filter_detect_outliers_kernel()(x,
|
|
|
|
|
y,
|
|
|
|
|
(float *)image_ptr,
|
|
|
|
|
(float *)variance_ptr,
|
|
|
|
|
(float *)depth_ptr,
|
|
|
|
|
(float *)output_ptr,
|
|
|
|
|
&task->rect.x,
|
|
|
|
|
task->buffer.pass_stride);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-07 19:43:51 +02:00
|
|
|
bool adaptive_sampling_filter(KernelGlobals *kg, RenderTile &tile, int sample)
|
2020-03-05 12:05:42 +01:00
|
|
|
{
|
|
|
|
|
WorkTile wtile;
|
|
|
|
|
wtile.x = tile.x;
|
|
|
|
|
wtile.y = tile.y;
|
|
|
|
|
wtile.w = tile.w;
|
|
|
|
|
wtile.h = tile.h;
|
|
|
|
|
wtile.offset = tile.offset;
|
|
|
|
|
wtile.stride = tile.stride;
|
|
|
|
|
wtile.buffer = (float *)tile.buffer;
|
|
|
|
|
|
2020-04-07 19:43:51 +02:00
|
|
|
/* For CPU we do adaptive stopping per sample so we can stop earlier, but
|
|
|
|
|
* for combined CPU + GPU rendering we match the GPU and do it per tile
|
|
|
|
|
* after a given number of sample steps. */
|
|
|
|
|
if (!kernel_data.integrator.adaptive_stop_per_sample) {
|
|
|
|
|
for (int y = wtile.y; y < wtile.y + wtile.h; ++y) {
|
|
|
|
|
for (int x = wtile.x; x < wtile.x + wtile.w; ++x) {
|
|
|
|
|
const int index = wtile.offset + x + y * wtile.stride;
|
|
|
|
|
float *buffer = wtile.buffer + index * kernel_data.film.pass_stride;
|
|
|
|
|
kernel_do_adaptive_stopping(kg, buffer, sample);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-05 12:05:42 +01:00
|
|
|
bool any = false;
|
2020-04-07 19:43:51 +02:00
|
|
|
for (int y = wtile.y; y < wtile.y + wtile.h; ++y) {
|
2020-03-05 12:05:42 +01:00
|
|
|
any |= kernel_do_adaptive_filter_x(kg, y, &wtile);
|
|
|
|
|
}
|
2020-04-07 19:43:51 +02:00
|
|
|
for (int x = wtile.x; x < wtile.x + wtile.w; ++x) {
|
2020-03-05 12:05:42 +01:00
|
|
|
any |= kernel_do_adaptive_filter_y(kg, x, &wtile);
|
|
|
|
|
}
|
|
|
|
|
return (!any);
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-06 15:26:54 +01:00
|
|
|
void adaptive_sampling_post(const RenderTile &tile, KernelGlobals *kg)
|
2020-03-05 12:05:42 +01:00
|
|
|
{
|
|
|
|
|
float *render_buffer = (float *)tile.buffer;
|
|
|
|
|
for (int y = tile.y; y < tile.y + tile.h; y++) {
|
|
|
|
|
for (int x = tile.x; x < tile.x + tile.w; x++) {
|
|
|
|
|
int index = tile.offset + x + y * tile.stride;
|
|
|
|
|
ccl_global float *buffer = render_buffer + index * kernel_data.film.pass_stride;
|
|
|
|
|
if (buffer[kernel_data.film.pass_sample_count] < 0.0f) {
|
|
|
|
|
buffer[kernel_data.film.pass_sample_count] = -buffer[kernel_data.film.pass_sample_count];
|
|
|
|
|
float sample_multiplier = tile.sample / max((float)tile.start_sample + 1.0f,
|
|
|
|
|
buffer[kernel_data.film.pass_sample_count]);
|
|
|
|
|
if (sample_multiplier != 1.0f) {
|
|
|
|
|
kernel_adaptive_post_adjust(kg, buffer, sample_multiplier);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
kernel_adaptive_post_adjust(kg, buffer, tile.sample / (tile.sample - 1.0f));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
Cycles: code refactor to bake using regular render session and tiles
There should be no user visible change from this, except that tile size
now affects performance. The goal here is to simplify bake denoising in
D3099, letting it reuse more denoising tiles and pass code.
A lot of code is now shared with regular rendering, with the two main
differences being that we read some render result passes from the bake API
when starting to render a tile, and call the bake kernel instead of the
path trace kernel.
With this kind of design where Cycles asks for tiles from the bake API,
it should eventually be easier to reduce memory usage, show tiles as
they are baked, or bake multiple passes at once, though there's still
quite some work needed for that.
Reviewers: #cycles
Subscribers: monio, wmatyjewicz, lukasstockner97, michaelknubben
Differential Revision: https://developer.blender.org/D3108
2019-05-10 21:39:58 +02:00
|
|
|
void render(DeviceTask &task, RenderTile &tile, KernelGlobals *kg)
|
2019-04-17 06:17:24 +02:00
|
|
|
{
|
|
|
|
|
const bool use_coverage = kernel_data.film.cryptomatte_passes & CRYPT_ACCURATE;
|
|
|
|
|
|
|
|
|
|
scoped_timer timer(&tile.buffers->render_time);
|
|
|
|
|
|
|
|
|
|
Coverage coverage(kg, tile);
|
|
|
|
|
if (use_coverage) {
|
|
|
|
|
coverage.init_path_trace();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
float *render_buffer = (float *)tile.buffer;
|
|
|
|
|
int start_sample = tile.start_sample;
|
|
|
|
|
int end_sample = tile.start_sample + tile.num_samples;
|
|
|
|
|
|
|
|
|
|
/* Needed for Embree. */
|
|
|
|
|
SIMD_SET_FLUSH_TO_ZERO;
|
|
|
|
|
|
|
|
|
|
for (int sample = start_sample; sample < end_sample; sample++) {
|
|
|
|
|
if (task.get_cancel() || task_pool.canceled()) {
|
|
|
|
|
if (task.need_finish_queue == false)
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
Cycles: code refactor to bake using regular render session and tiles
There should be no user visible change from this, except that tile size
now affects performance. The goal here is to simplify bake denoising in
D3099, letting it reuse more denoising tiles and pass code.
A lot of code is now shared with regular rendering, with the two main
differences being that we read some render result passes from the bake API
when starting to render a tile, and call the bake kernel instead of the
path trace kernel.
With this kind of design where Cycles asks for tiles from the bake API,
it should eventually be easier to reduce memory usage, show tiles as
they are baked, or bake multiple passes at once, though there's still
quite some work needed for that.
Reviewers: #cycles
Subscribers: monio, wmatyjewicz, lukasstockner97, michaelknubben
Differential Revision: https://developer.blender.org/D3108
2019-05-10 21:39:58 +02:00
|
|
|
if (tile.task == RenderTile::PATH_TRACE) {
|
|
|
|
|
for (int y = tile.y; y < tile.y + tile.h; y++) {
|
|
|
|
|
for (int x = tile.x; x < tile.x + tile.w; x++) {
|
|
|
|
|
if (use_coverage) {
|
|
|
|
|
coverage.init_pixel(x, y);
|
|
|
|
|
}
|
|
|
|
|
path_trace_kernel()(kg, render_buffer, sample, x, y, tile.offset, tile.stride);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
for (int y = tile.y; y < tile.y + tile.h; y++) {
|
|
|
|
|
for (int x = tile.x; x < tile.x + tile.w; x++) {
|
|
|
|
|
bake_kernel()(kg, render_buffer, sample, x, y, tile.offset, tile.stride);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
tile.sample = sample + 1;
|
|
|
|
|
|
2020-03-05 12:05:42 +01:00
|
|
|
if (task.adaptive_sampling.use && task.adaptive_sampling.need_filter(sample)) {
|
2020-04-07 19:43:51 +02:00
|
|
|
const bool stop = adaptive_sampling_filter(kg, tile, sample);
|
2020-03-05 12:05:42 +01:00
|
|
|
if (stop) {
|
2020-03-06 23:13:03 +01:00
|
|
|
const int num_progress_samples = end_sample - sample;
|
2020-03-05 12:05:42 +01:00
|
|
|
tile.sample = end_sample;
|
2020-03-06 23:13:03 +01:00
|
|
|
task.update_progress(&tile, tile.w * tile.h * num_progress_samples);
|
2020-03-05 12:05:42 +01:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-03-06 23:13:03 +01:00
|
|
|
|
|
|
|
|
task.update_progress(&tile, tile.w * tile.h);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
if (use_coverage) {
|
|
|
|
|
coverage.finalize();
|
|
|
|
|
}
|
2020-03-05 12:05:42 +01:00
|
|
|
|
|
|
|
|
if (task.adaptive_sampling.use) {
|
2020-03-06 15:26:54 +01:00
|
|
|
adaptive_sampling_post(tile, kg);
|
2020-03-05 12:05:42 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
|
2020-07-09 12:20:07 +02:00
|
|
|
void denoise_openimagedenoise_buffer(DeviceTask &task,
|
|
|
|
|
float *buffer,
|
2020-07-13 16:45:15 +02:00
|
|
|
const size_t offset,
|
|
|
|
|
const size_t stride,
|
|
|
|
|
const size_t x,
|
|
|
|
|
const size_t y,
|
|
|
|
|
const size_t w,
|
|
|
|
|
const size_t h,
|
|
|
|
|
const float scale)
|
2020-06-01 00:11:17 +02:00
|
|
|
{
|
|
|
|
|
#ifdef WITH_OPENIMAGEDENOISE
|
|
|
|
|
assert(openimagedenoise_supported());
|
|
|
|
|
|
2020-07-09 12:20:07 +02:00
|
|
|
/* Only one at a time, since OpenImageDenoise itself is multithreaded for full
|
|
|
|
|
* buffers, and for tiled rendering because creating multiple devices and filters
|
|
|
|
|
* is slow and memory hungry as well.
|
|
|
|
|
*
|
|
|
|
|
* TODO: optimize tiled rendering case, by batching together denoising of many
|
|
|
|
|
* tiles somehow? */
|
2020-06-01 00:11:17 +02:00
|
|
|
static thread_mutex mutex;
|
|
|
|
|
thread_scoped_lock lock(mutex);
|
|
|
|
|
|
|
|
|
|
/* Create device and filter, cached for reuse. */
|
|
|
|
|
if (!oidn_device) {
|
|
|
|
|
oidn_device = oidn::newDevice();
|
|
|
|
|
oidn_device.commit();
|
|
|
|
|
}
|
|
|
|
|
if (!oidn_filter) {
|
|
|
|
|
oidn_filter = oidn_device.newFilter("RT");
|
2020-07-09 12:20:07 +02:00
|
|
|
oidn_filter.set("hdr", true);
|
|
|
|
|
oidn_filter.set("srgb", false);
|
2020-06-01 00:11:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Set images with appropriate stride for our interleaved pass storage. */
|
2020-07-13 16:45:15 +02:00
|
|
|
struct {
|
2020-06-01 00:11:17 +02:00
|
|
|
const char *name;
|
2020-07-13 16:45:15 +02:00
|
|
|
const int offset;
|
|
|
|
|
const bool scale;
|
|
|
|
|
const bool use;
|
|
|
|
|
array<float> scaled_buffer;
|
|
|
|
|
} passes[] = {{"color", task.pass_denoising_data + DENOISING_PASS_COLOR, false, true},
|
|
|
|
|
{"albedo",
|
|
|
|
|
task.pass_denoising_data + DENOISING_PASS_ALBEDO,
|
|
|
|
|
true,
|
|
|
|
|
task.denoising.input_passes >= DENOISER_INPUT_RGB_ALBEDO},
|
|
|
|
|
{"normal",
|
|
|
|
|
task.pass_denoising_data + DENOISING_PASS_NORMAL,
|
|
|
|
|
true,
|
|
|
|
|
task.denoising.input_passes >= DENOISER_INPUT_RGB_ALBEDO_NORMAL},
|
|
|
|
|
{"output", 0, false, true},
|
2020-06-01 00:11:17 +02:00
|
|
|
{ NULL,
|
|
|
|
|
0 }};
|
|
|
|
|
|
|
|
|
|
for (int i = 0; passes[i].name; i++) {
|
2020-07-13 16:45:15 +02:00
|
|
|
if (!passes[i].use) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-09 12:20:07 +02:00
|
|
|
const int64_t pixel_offset = offset + x + y * stride;
|
2020-07-13 16:45:15 +02:00
|
|
|
const int64_t buffer_offset = (pixel_offset * task.pass_stride + passes[i].offset);
|
|
|
|
|
const int64_t pixel_stride = task.pass_stride;
|
2020-07-09 12:20:07 +02:00
|
|
|
const int64_t row_stride = stride * pixel_stride;
|
2020-06-01 00:11:17 +02:00
|
|
|
|
2020-07-13 16:45:15 +02:00
|
|
|
if (passes[i].scale && scale != 1.0f) {
|
|
|
|
|
/* Normalize albedo and normal passes as they are scaled by the number of samples.
|
2020-07-14 15:19:52 +10:00
|
|
|
* For the color passes OIDN will perform auto-exposure making it unnecessary. */
|
2020-07-13 16:45:15 +02:00
|
|
|
array<float> &scaled_buffer = passes[i].scaled_buffer;
|
|
|
|
|
scaled_buffer.resize(w * h * 3);
|
|
|
|
|
|
|
|
|
|
for (int y = 0; y < h; y++) {
|
|
|
|
|
const float *pass_row = buffer + buffer_offset + y * row_stride;
|
|
|
|
|
float *scaled_row = scaled_buffer.data() + y * w * 3;
|
|
|
|
|
|
|
|
|
|
for (int x = 0; x < w; x++) {
|
|
|
|
|
scaled_row[x * 3 + 0] = pass_row[x * pixel_stride + 0] * scale;
|
|
|
|
|
scaled_row[x * 3 + 1] = pass_row[x * pixel_stride + 1] * scale;
|
|
|
|
|
scaled_row[x * 3 + 2] = pass_row[x * pixel_stride + 2] * scale;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
oidn_filter.setImage(
|
|
|
|
|
passes[i].name, scaled_buffer.data(), oidn::Format::Float3, w, h, 0, 0, 0);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
oidn_filter.setImage(passes[i].name,
|
|
|
|
|
buffer + buffer_offset,
|
|
|
|
|
oidn::Format::Float3,
|
|
|
|
|
w,
|
|
|
|
|
h,
|
|
|
|
|
0,
|
|
|
|
|
pixel_stride * sizeof(float),
|
|
|
|
|
row_stride * sizeof(float));
|
|
|
|
|
}
|
2020-06-01 00:11:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Execute filter. */
|
|
|
|
|
oidn_filter.commit();
|
|
|
|
|
oidn_filter.execute();
|
|
|
|
|
#else
|
|
|
|
|
(void)task;
|
2020-07-09 12:20:07 +02:00
|
|
|
(void)buffer;
|
|
|
|
|
(void)offset;
|
|
|
|
|
(void)stride;
|
|
|
|
|
(void)x;
|
2020-07-10 19:56:53 +02:00
|
|
|
(void)y;
|
2020-07-09 12:20:07 +02:00
|
|
|
(void)w;
|
|
|
|
|
(void)h;
|
2020-07-13 16:45:15 +02:00
|
|
|
(void)scale;
|
2020-06-01 00:11:17 +02:00
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-09 12:20:07 +02:00
|
|
|
void denoise_openimagedenoise(DeviceTask &task, RenderTile &rtile)
|
|
|
|
|
{
|
|
|
|
|
if (task.type == DeviceTask::DENOISE_BUFFER) {
|
|
|
|
|
/* Copy pixels from compute device to CPU (no-op for CPU device). */
|
|
|
|
|
rtile.buffers->buffer.copy_from_device();
|
|
|
|
|
|
|
|
|
|
denoise_openimagedenoise_buffer(task,
|
|
|
|
|
(float *)rtile.buffer,
|
|
|
|
|
rtile.offset,
|
|
|
|
|
rtile.stride,
|
|
|
|
|
rtile.x,
|
|
|
|
|
rtile.y,
|
|
|
|
|
rtile.w,
|
2020-07-13 16:45:15 +02:00
|
|
|
rtile.h,
|
|
|
|
|
1.0f / rtile.sample);
|
2020-07-09 12:20:07 +02:00
|
|
|
|
|
|
|
|
/* todo: it may be possible to avoid this copy, but we have to ensure that
|
|
|
|
|
* when other code copies data from the device it doesn't overwrite the
|
|
|
|
|
* denoiser buffers. */
|
|
|
|
|
rtile.buffers->buffer.copy_to_device();
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
/* Per-tile denoising. */
|
|
|
|
|
rtile.sample = rtile.start_sample + rtile.num_samples;
|
2020-07-13 16:45:15 +02:00
|
|
|
const float scale = 1.0f / rtile.sample;
|
|
|
|
|
const float invscale = rtile.sample;
|
|
|
|
|
const size_t pass_stride = task.pass_stride;
|
2020-07-09 12:20:07 +02:00
|
|
|
|
|
|
|
|
/* Map neighboring tiles into one buffer for denoising. */
|
|
|
|
|
RenderTileNeighbors neighbors(rtile);
|
|
|
|
|
task.map_neighbor_tiles(neighbors, this);
|
|
|
|
|
RenderTile ¢er_tile = neighbors.tiles[RenderTileNeighbors::CENTER];
|
|
|
|
|
rtile = center_tile;
|
|
|
|
|
|
|
|
|
|
/* Calculate size of the tile to denoise (including overlap). The overlap
|
|
|
|
|
* size was chosen empirically. OpenImageDenoise specifies an overlap size
|
|
|
|
|
* of 128 but this is significantly bigger than typical tile size. */
|
|
|
|
|
const int4 rect = rect_clip(rect_expand(center_tile.bounds(), 64), neighbors.bounds());
|
|
|
|
|
const int2 rect_size = make_int2(rect.z - rect.x, rect.w - rect.y);
|
|
|
|
|
|
|
|
|
|
/* Adjacent tiles are in separate memory regions, copy into single buffer. */
|
|
|
|
|
array<float> merged(rect_size.x * rect_size.y * task.pass_stride);
|
|
|
|
|
|
|
|
|
|
for (int i = 0; i < RenderTileNeighbors::SIZE; i++) {
|
|
|
|
|
RenderTile &ntile = neighbors.tiles[i];
|
|
|
|
|
if (!ntile.buffer) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const int xmin = max(ntile.x, rect.x);
|
|
|
|
|
const int ymin = max(ntile.y, rect.y);
|
|
|
|
|
const int xmax = min(ntile.x + ntile.w, rect.z);
|
|
|
|
|
const int ymax = min(ntile.y + ntile.h, rect.w);
|
|
|
|
|
|
|
|
|
|
const size_t tile_offset = ntile.offset + xmin + ymin * ntile.stride;
|
2020-07-13 16:45:15 +02:00
|
|
|
const float *tile_buffer = (float *)ntile.buffer + tile_offset * pass_stride;
|
2020-07-09 12:20:07 +02:00
|
|
|
|
|
|
|
|
const size_t merged_stride = rect_size.x;
|
|
|
|
|
const size_t merged_offset = (xmin - rect.x) + (ymin - rect.y) * merged_stride;
|
2020-07-13 16:45:15 +02:00
|
|
|
float *merged_buffer = merged.data() + merged_offset * pass_stride;
|
2020-07-09 12:20:07 +02:00
|
|
|
|
|
|
|
|
for (int y = ymin; y < ymax; y++) {
|
2020-07-13 16:45:15 +02:00
|
|
|
for (int x = 0; x < pass_stride * (xmax - xmin); x++) {
|
|
|
|
|
merged_buffer[x] = tile_buffer[x] * scale;
|
|
|
|
|
}
|
|
|
|
|
tile_buffer += ntile.stride * pass_stride;
|
|
|
|
|
merged_buffer += merged_stride * pass_stride;
|
2020-07-09 12:20:07 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Denoise */
|
|
|
|
|
denoise_openimagedenoise_buffer(
|
2020-07-13 16:45:15 +02:00
|
|
|
task, merged.data(), 0, rect_size.x, 0, 0, rect_size.x, rect_size.y, 1.0f);
|
2020-07-09 12:20:07 +02:00
|
|
|
|
|
|
|
|
/* Copy back result from merged buffer. */
|
|
|
|
|
RenderTile &ntile = neighbors.target;
|
|
|
|
|
if (ntile.buffer) {
|
|
|
|
|
const int xmin = max(ntile.x, rect.x);
|
|
|
|
|
const int ymin = max(ntile.y, rect.y);
|
|
|
|
|
const int xmax = min(ntile.x + ntile.w, rect.z);
|
|
|
|
|
const int ymax = min(ntile.y + ntile.h, rect.w);
|
|
|
|
|
|
|
|
|
|
const size_t tile_offset = ntile.offset + xmin + ymin * ntile.stride;
|
2020-07-13 16:45:15 +02:00
|
|
|
float *tile_buffer = (float *)ntile.buffer + tile_offset * pass_stride;
|
2020-07-09 12:20:07 +02:00
|
|
|
|
|
|
|
|
const size_t merged_stride = rect_size.x;
|
|
|
|
|
const size_t merged_offset = (xmin - rect.x) + (ymin - rect.y) * merged_stride;
|
2020-07-13 16:45:15 +02:00
|
|
|
const float *merged_buffer = merged.data() + merged_offset * pass_stride;
|
2020-07-09 12:20:07 +02:00
|
|
|
|
|
|
|
|
for (int y = ymin; y < ymax; y++) {
|
2020-07-13 16:45:15 +02:00
|
|
|
for (int x = 0; x < pass_stride * (xmax - xmin); x += pass_stride) {
|
|
|
|
|
tile_buffer[x + 0] = merged_buffer[x + 0] * invscale;
|
|
|
|
|
tile_buffer[x + 1] = merged_buffer[x + 1] * invscale;
|
|
|
|
|
tile_buffer[x + 2] = merged_buffer[x + 2] * invscale;
|
|
|
|
|
}
|
|
|
|
|
tile_buffer += ntile.stride * pass_stride;
|
|
|
|
|
merged_buffer += merged_stride * pass_stride;
|
2020-07-09 12:20:07 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
task.unmap_neighbor_tiles(neighbors, this);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-31 23:49:10 +02:00
|
|
|
void denoise_nlm(DenoisingTask &denoising, RenderTile &tile)
|
2019-04-17 06:17:24 +02:00
|
|
|
{
|
|
|
|
|
ProfilingHelper profiling(denoising.profiler, PROFILING_DENOISING);
|
|
|
|
|
|
|
|
|
|
tile.sample = tile.start_sample + tile.num_samples;
|
|
|
|
|
|
|
|
|
|
denoising.functions.construct_transform = function_bind(
|
|
|
|
|
&CPUDevice::denoising_construct_transform, this, &denoising);
|
|
|
|
|
denoising.functions.accumulate = function_bind(
|
|
|
|
|
&CPUDevice::denoising_accumulate, this, _1, _2, _3, _4, &denoising);
|
|
|
|
|
denoising.functions.solve = function_bind(&CPUDevice::denoising_solve, this, _1, &denoising);
|
|
|
|
|
denoising.functions.divide_shadow = function_bind(
|
|
|
|
|
&CPUDevice::denoising_divide_shadow, this, _1, _2, _3, _4, _5, &denoising);
|
|
|
|
|
denoising.functions.non_local_means = function_bind(
|
|
|
|
|
&CPUDevice::denoising_non_local_means, this, _1, _2, _3, _4, &denoising);
|
|
|
|
|
denoising.functions.combine_halves = function_bind(
|
|
|
|
|
&CPUDevice::denoising_combine_halves, this, _1, _2, _3, _4, _5, _6, &denoising);
|
|
|
|
|
denoising.functions.get_feature = function_bind(
|
|
|
|
|
&CPUDevice::denoising_get_feature, this, _1, _2, _3, _4, _5, &denoising);
|
|
|
|
|
denoising.functions.write_feature = function_bind(
|
|
|
|
|
&CPUDevice::denoising_write_feature, this, _1, _2, _3, &denoising);
|
|
|
|
|
denoising.functions.detect_outliers = function_bind(
|
|
|
|
|
&CPUDevice::denoising_detect_outliers, this, _1, _2, _3, _4, &denoising);
|
|
|
|
|
|
|
|
|
|
denoising.filter_area = make_int4(tile.x, tile.y, tile.w, tile.h);
|
|
|
|
|
denoising.render_buffer.samples = tile.sample;
|
|
|
|
|
denoising.buffer.gpu_temporary_mem = false;
|
|
|
|
|
|
2020-07-09 20:01:22 +02:00
|
|
|
denoising.run_denoising(tile);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void thread_render(DeviceTask &task)
|
|
|
|
|
{
|
|
|
|
|
if (task_pool.canceled()) {
|
|
|
|
|
if (task.need_finish_queue == false)
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* allocate buffer for kernel globals */
|
|
|
|
|
device_only_memory<KernelGlobals> kgbuffer(this, "kernel_globals");
|
|
|
|
|
kgbuffer.alloc_to_device(1);
|
|
|
|
|
|
|
|
|
|
KernelGlobals *kg = new ((void *)kgbuffer.device_pointer)
|
|
|
|
|
KernelGlobals(thread_kernel_globals_init());
|
|
|
|
|
|
|
|
|
|
profiler.add_state(&kg->profiler);
|
|
|
|
|
|
|
|
|
|
CPUSplitKernel *split_kernel = NULL;
|
|
|
|
|
if (use_split_kernel) {
|
|
|
|
|
split_kernel = new CPUSplitKernel(this);
|
|
|
|
|
if (!split_kernel->load_kernels(requested_features)) {
|
|
|
|
|
thread_kernel_globals_free((KernelGlobals *)kgbuffer.device_pointer);
|
|
|
|
|
kgbuffer.free();
|
|
|
|
|
delete split_kernel;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-09 12:20:07 +02:00
|
|
|
/* NLM denoiser. */
|
2020-05-31 23:49:10 +02:00
|
|
|
DenoisingTask *denoising = NULL;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-07-09 12:20:07 +02:00
|
|
|
/* OpenImageDenoise: we can only denoise with one thread at a time, so to
|
|
|
|
|
* avoid waiting with mutex locks in the denoiser, we let only a single
|
|
|
|
|
* thread acquire denoising tiles. */
|
|
|
|
|
uint tile_types = task.tile_types;
|
|
|
|
|
bool hold_denoise_lock = false;
|
|
|
|
|
if ((tile_types & RenderTile::DENOISE) && task.denoising.type == DENOISER_OPENIMAGEDENOISE) {
|
|
|
|
|
if (!oidn_task_lock.try_lock()) {
|
|
|
|
|
tile_types &= ~RenderTile::DENOISE;
|
|
|
|
|
hold_denoise_lock = true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-31 23:49:10 +02:00
|
|
|
RenderTile tile;
|
2020-07-09 12:20:07 +02:00
|
|
|
while (task.acquire_tile(this, tile, tile_types)) {
|
2019-04-17 06:17:24 +02:00
|
|
|
if (tile.task == RenderTile::PATH_TRACE) {
|
|
|
|
|
if (use_split_kernel) {
|
|
|
|
|
device_only_memory<uchar> void_buffer(this, "void_buffer");
|
2020-06-05 11:39:11 +02:00
|
|
|
split_kernel->path_trace(task, tile, kgbuffer, void_buffer);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
else {
|
Cycles: code refactor to bake using regular render session and tiles
There should be no user visible change from this, except that tile size
now affects performance. The goal here is to simplify bake denoising in
D3099, letting it reuse more denoising tiles and pass code.
A lot of code is now shared with regular rendering, with the two main
differences being that we read some render result passes from the bake API
when starting to render a tile, and call the bake kernel instead of the
path trace kernel.
With this kind of design where Cycles asks for tiles from the bake API,
it should eventually be easier to reduce memory usage, show tiles as
they are baked, or bake multiple passes at once, though there's still
quite some work needed for that.
Reviewers: #cycles
Subscribers: monio, wmatyjewicz, lukasstockner97, michaelknubben
Differential Revision: https://developer.blender.org/D3108
2019-05-10 21:39:58 +02:00
|
|
|
render(task, tile, kg);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
}
|
Cycles: code refactor to bake using regular render session and tiles
There should be no user visible change from this, except that tile size
now affects performance. The goal here is to simplify bake denoising in
D3099, letting it reuse more denoising tiles and pass code.
A lot of code is now shared with regular rendering, with the two main
differences being that we read some render result passes from the bake API
when starting to render a tile, and call the bake kernel instead of the
path trace kernel.
With this kind of design where Cycles asks for tiles from the bake API,
it should eventually be easier to reduce memory usage, show tiles as
they are baked, or bake multiple passes at once, though there's still
quite some work needed for that.
Reviewers: #cycles
Subscribers: monio, wmatyjewicz, lukasstockner97, michaelknubben
Differential Revision: https://developer.blender.org/D3108
2019-05-10 21:39:58 +02:00
|
|
|
else if (tile.task == RenderTile::BAKE) {
|
|
|
|
|
render(task, tile, kg);
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
else if (tile.task == RenderTile::DENOISE) {
|
2020-06-01 00:11:17 +02:00
|
|
|
if (task.denoising.type == DENOISER_OPENIMAGEDENOISE) {
|
|
|
|
|
denoise_openimagedenoise(task, tile);
|
|
|
|
|
}
|
|
|
|
|
else if (task.denoising.type == DENOISER_NLM) {
|
2020-05-31 23:49:10 +02:00
|
|
|
if (denoising == NULL) {
|
|
|
|
|
denoising = new DenoisingTask(this, task);
|
|
|
|
|
denoising->profiler = &kg->profiler;
|
|
|
|
|
}
|
|
|
|
|
denoise_nlm(*denoising, tile);
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
task.update_progress(&tile, tile.w * tile.h);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
task.release_tile(tile);
|
|
|
|
|
|
|
|
|
|
if (task_pool.canceled()) {
|
|
|
|
|
if (task.need_finish_queue == false)
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-09 12:20:07 +02:00
|
|
|
if (hold_denoise_lock) {
|
|
|
|
|
oidn_task_lock.unlock();
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
profiler.remove_state(&kg->profiler);
|
|
|
|
|
|
|
|
|
|
thread_kernel_globals_free((KernelGlobals *)kgbuffer.device_pointer);
|
|
|
|
|
kg->~KernelGlobals();
|
|
|
|
|
kgbuffer.free();
|
|
|
|
|
delete split_kernel;
|
2020-05-31 23:49:10 +02:00
|
|
|
delete denoising;
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
|
2020-02-11 16:30:01 +01:00
|
|
|
void thread_denoise(DeviceTask &task)
|
|
|
|
|
{
|
|
|
|
|
RenderTile tile;
|
|
|
|
|
tile.x = task.x;
|
|
|
|
|
tile.y = task.y;
|
|
|
|
|
tile.w = task.w;
|
|
|
|
|
tile.h = task.h;
|
|
|
|
|
tile.buffer = task.buffer;
|
|
|
|
|
tile.sample = task.sample + task.num_samples;
|
|
|
|
|
tile.num_samples = task.num_samples;
|
|
|
|
|
tile.start_sample = task.sample;
|
|
|
|
|
tile.offset = task.offset;
|
|
|
|
|
tile.stride = task.stride;
|
|
|
|
|
tile.buffers = task.buffers;
|
|
|
|
|
|
2020-06-01 00:11:17 +02:00
|
|
|
if (task.denoising.type == DENOISER_OPENIMAGEDENOISE) {
|
|
|
|
|
denoise_openimagedenoise(task, tile);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
DenoisingTask denoising(this, task);
|
2020-02-11 16:30:01 +01:00
|
|
|
|
2020-06-01 00:11:17 +02:00
|
|
|
ProfilingState denoising_profiler_state;
|
|
|
|
|
profiler.add_state(&denoising_profiler_state);
|
|
|
|
|
denoising.profiler = &denoising_profiler_state;
|
2020-02-11 16:30:01 +01:00
|
|
|
|
2020-06-01 00:11:17 +02:00
|
|
|
denoise_nlm(denoising, tile);
|
2020-02-11 16:30:01 +01:00
|
|
|
|
2020-06-01 00:11:17 +02:00
|
|
|
profiler.remove_state(&denoising_profiler_state);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
task.update_progress(&tile, tile.w * tile.h);
|
2020-02-11 16:30:01 +01:00
|
|
|
}
|
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
void thread_film_convert(DeviceTask &task)
|
|
|
|
|
{
|
|
|
|
|
float sample_scale = 1.0f / (task.sample + 1);
|
|
|
|
|
|
|
|
|
|
if (task.rgba_half) {
|
|
|
|
|
for (int y = task.y; y < task.y + task.h; y++)
|
|
|
|
|
for (int x = task.x; x < task.x + task.w; x++)
|
|
|
|
|
convert_to_half_float_kernel()(&kernel_globals,
|
|
|
|
|
(uchar4 *)task.rgba_half,
|
|
|
|
|
(float *)task.buffer,
|
|
|
|
|
sample_scale,
|
|
|
|
|
x,
|
|
|
|
|
y,
|
|
|
|
|
task.offset,
|
|
|
|
|
task.stride);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
for (int y = task.y; y < task.y + task.h; y++)
|
|
|
|
|
for (int x = task.x; x < task.x + task.w; x++)
|
|
|
|
|
convert_to_byte_kernel()(&kernel_globals,
|
|
|
|
|
(uchar4 *)task.rgba_byte,
|
|
|
|
|
(float *)task.buffer,
|
|
|
|
|
sample_scale,
|
|
|
|
|
x,
|
|
|
|
|
y,
|
|
|
|
|
task.offset,
|
|
|
|
|
task.stride);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void thread_shader(DeviceTask &task)
|
|
|
|
|
{
|
2019-07-11 15:22:03 +02:00
|
|
|
KernelGlobals *kg = new KernelGlobals(thread_kernel_globals_init());
|
2012-12-01 19:15:05 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
for (int sample = 0; sample < task.num_samples; sample++) {
|
|
|
|
|
for (int x = task.shader_x; x < task.shader_x + task.shader_w; x++)
|
2019-07-11 15:22:03 +02:00
|
|
|
shader_kernel()(kg,
|
2019-04-17 06:17:24 +02:00
|
|
|
(uint4 *)task.shader_input,
|
|
|
|
|
(float4 *)task.shader_output,
|
|
|
|
|
task.shader_eval_type,
|
|
|
|
|
task.shader_filter,
|
|
|
|
|
x,
|
|
|
|
|
task.offset,
|
|
|
|
|
sample);
|
|
|
|
|
|
|
|
|
|
if (task.get_cancel() || task_pool.canceled())
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
task.update_progress(NULL);
|
|
|
|
|
}
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2019-07-11 15:22:03 +02:00
|
|
|
thread_kernel_globals_free(kg);
|
|
|
|
|
delete kg;
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int get_split_task_count(DeviceTask &task)
|
|
|
|
|
{
|
|
|
|
|
if (task.type == DeviceTask::SHADER)
|
|
|
|
|
return task.get_subtask_count(info.cpu_threads, 256);
|
|
|
|
|
else
|
|
|
|
|
return task.get_subtask_count(info.cpu_threads);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void task_add(DeviceTask &task)
|
|
|
|
|
{
|
|
|
|
|
/* Load texture info. */
|
|
|
|
|
load_texture_info();
|
|
|
|
|
|
|
|
|
|
/* split task into smaller ones */
|
|
|
|
|
list<DeviceTask> tasks;
|
|
|
|
|
|
2020-06-01 00:11:17 +02:00
|
|
|
if (task.type == DeviceTask::DENOISE_BUFFER &&
|
|
|
|
|
task.denoising.type == DENOISER_OPENIMAGEDENOISE) {
|
|
|
|
|
/* Denoise entire buffer at once with OIDN, it has own threading. */
|
|
|
|
|
tasks.push_back(task);
|
|
|
|
|
}
|
|
|
|
|
else if (task.type == DeviceTask::SHADER) {
|
2019-04-17 06:17:24 +02:00
|
|
|
task.split(tasks, info.cpu_threads, 256);
|
2020-06-01 00:11:17 +02:00
|
|
|
}
|
|
|
|
|
else {
|
2019-04-17 06:17:24 +02:00
|
|
|
task.split(tasks, info.cpu_threads);
|
2020-06-01 00:11:17 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-06-05 14:18:02 +02:00
|
|
|
foreach (DeviceTask &task, tasks) {
|
|
|
|
|
task_pool.push([=] {
|
|
|
|
|
DeviceTask task_copy = task;
|
|
|
|
|
thread_run(task_copy);
|
|
|
|
|
});
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void task_wait()
|
|
|
|
|
{
|
|
|
|
|
task_pool.wait_work();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void task_cancel()
|
|
|
|
|
{
|
|
|
|
|
task_pool.cancel();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
protected:
|
|
|
|
|
inline KernelGlobals thread_kernel_globals_init()
|
|
|
|
|
{
|
|
|
|
|
KernelGlobals kg = kernel_globals;
|
|
|
|
|
kg.transparent_shadow_intersections = NULL;
|
|
|
|
|
const int decoupled_count = sizeof(kg.decoupled_volume_steps) /
|
|
|
|
|
sizeof(*kg.decoupled_volume_steps);
|
|
|
|
|
for (int i = 0; i < decoupled_count; ++i) {
|
|
|
|
|
kg.decoupled_volume_steps[i] = NULL;
|
|
|
|
|
}
|
|
|
|
|
kg.decoupled_volume_steps_index = 0;
|
|
|
|
|
kg.coverage_asset = kg.coverage_object = kg.coverage_material = NULL;
|
2016-05-17 12:30:46 +02:00
|
|
|
#ifdef WITH_OSL
|
2019-05-14 15:05:24 +02:00
|
|
|
OSLShader::thread_init(&kg, &kernel_globals, &osl_globals);
|
2016-05-17 12:30:46 +02:00
|
|
|
#endif
|
2019-04-17 06:17:24 +02:00
|
|
|
return kg;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
inline void thread_kernel_globals_free(KernelGlobals *kg)
|
|
|
|
|
{
|
|
|
|
|
if (kg == NULL) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (kg->transparent_shadow_intersections != NULL) {
|
|
|
|
|
free(kg->transparent_shadow_intersections);
|
|
|
|
|
}
|
|
|
|
|
const int decoupled_count = sizeof(kg->decoupled_volume_steps) /
|
|
|
|
|
sizeof(*kg->decoupled_volume_steps);
|
|
|
|
|
for (int i = 0; i < decoupled_count; ++i) {
|
|
|
|
|
if (kg->decoupled_volume_steps[i] != NULL) {
|
|
|
|
|
free(kg->decoupled_volume_steps[i]);
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-05-17 12:30:46 +02:00
|
|
|
#ifdef WITH_OSL
|
2019-04-17 06:17:24 +02:00
|
|
|
OSLShader::thread_free(kg);
|
2016-05-17 12:30:46 +02:00
|
|
|
#endif
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2017-02-14 06:20:48 -05:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
virtual bool load_kernels(const DeviceRequestedFeatures &requested_features_)
|
|
|
|
|
{
|
|
|
|
|
requested_features = requested_features_;
|
2017-02-14 06:20:48 -05:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
return true;
|
|
|
|
|
}
|
2011-04-27 11:58:34 +00:00
|
|
|
};
|
|
|
|
|
|
2017-02-14 06:20:48 -05:00
|
|
|
/* split kernel */
|
|
|
|
|
|
|
|
|
|
class CPUSplitKernelFunction : public SplitKernelFunction {
|
2019-04-17 06:17:24 +02:00
|
|
|
public:
|
|
|
|
|
CPUDevice *device;
|
|
|
|
|
void (*func)(KernelGlobals *kg, KernelData *data);
|
|
|
|
|
|
|
|
|
|
CPUSplitKernelFunction(CPUDevice *device) : device(device), func(NULL)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
~CPUSplitKernelFunction()
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
virtual bool enqueue(const KernelDimensions &dim,
|
|
|
|
|
device_memory &kernel_globals,
|
|
|
|
|
device_memory &data)
|
|
|
|
|
{
|
|
|
|
|
if (!func) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
KernelGlobals *kg = (KernelGlobals *)kernel_globals.device_pointer;
|
|
|
|
|
kg->global_size = make_int2(dim.global_size[0], dim.global_size[1]);
|
|
|
|
|
|
|
|
|
|
for (int y = 0; y < dim.global_size[1]; y++) {
|
|
|
|
|
for (int x = 0; x < dim.global_size[0]; x++) {
|
|
|
|
|
kg->global_id = make_int2(x, y);
|
|
|
|
|
|
|
|
|
|
func(kg, (KernelData *)data.device_pointer);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
2017-02-14 06:20:48 -05:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
CPUSplitKernel::CPUSplitKernel(CPUDevice *device) : DeviceSplitKernel(device), device(device)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
bool CPUSplitKernel::enqueue_split_kernel_data_init(const KernelDimensions &dim,
|
|
|
|
|
RenderTile &rtile,
|
2017-02-14 06:20:48 -05:00
|
|
|
int num_global_elements,
|
2019-04-17 06:17:24 +02:00
|
|
|
device_memory &kernel_globals,
|
|
|
|
|
device_memory &data,
|
|
|
|
|
device_memory &split_data,
|
|
|
|
|
device_memory &ray_state,
|
|
|
|
|
device_memory &queue_index,
|
|
|
|
|
device_memory &use_queues_flags,
|
|
|
|
|
device_memory &work_pool_wgs)
|
2017-02-14 06:20:48 -05:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
KernelGlobals *kg = (KernelGlobals *)kernel_globals.device_pointer;
|
|
|
|
|
kg->global_size = make_int2(dim.global_size[0], dim.global_size[1]);
|
|
|
|
|
|
|
|
|
|
for (int y = 0; y < dim.global_size[1]; y++) {
|
|
|
|
|
for (int x = 0; x < dim.global_size[0]; x++) {
|
|
|
|
|
kg->global_id = make_int2(x, y);
|
|
|
|
|
|
|
|
|
|
device->data_init_kernel()((KernelGlobals *)kernel_globals.device_pointer,
|
|
|
|
|
(KernelData *)data.device_pointer,
|
|
|
|
|
(void *)split_data.device_pointer,
|
|
|
|
|
num_global_elements,
|
|
|
|
|
(char *)ray_state.device_pointer,
|
|
|
|
|
rtile.start_sample,
|
|
|
|
|
rtile.start_sample + rtile.num_samples,
|
|
|
|
|
rtile.x,
|
|
|
|
|
rtile.y,
|
|
|
|
|
rtile.w,
|
|
|
|
|
rtile.h,
|
|
|
|
|
rtile.offset,
|
|
|
|
|
rtile.stride,
|
|
|
|
|
(int *)queue_index.device_pointer,
|
|
|
|
|
dim.global_size[0] * dim.global_size[1],
|
|
|
|
|
(char *)use_queues_flags.device_pointer,
|
|
|
|
|
(uint *)work_pool_wgs.device_pointer,
|
|
|
|
|
rtile.num_samples,
|
|
|
|
|
(float *)rtile.buffer);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true;
|
2017-02-14 06:20:48 -05:00
|
|
|
}
|
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
SplitKernelFunction *CPUSplitKernel::get_split_kernel_function(const string &kernel_name,
|
|
|
|
|
const DeviceRequestedFeatures &)
|
2017-02-14 06:20:48 -05:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
CPUSplitKernelFunction *kernel = new CPUSplitKernelFunction(device);
|
2017-02-14 06:20:48 -05:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
kernel->func = device->split_kernels[kernel_name]();
|
|
|
|
|
if (!kernel->func) {
|
|
|
|
|
delete kernel;
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
2017-02-14 06:20:48 -05:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
return kernel;
|
2017-02-14 06:20:48 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int2 CPUSplitKernel::split_kernel_local_size()
|
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
return make_int2(1, 1);
|
2017-02-14 06:20:48 -05:00
|
|
|
}
|
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
int2 CPUSplitKernel::split_kernel_global_size(device_memory & /*kg*/,
|
|
|
|
|
device_memory & /*data*/,
|
2020-06-05 11:39:11 +02:00
|
|
|
DeviceTask & /*task*/)
|
2019-04-17 06:17:24 +02:00
|
|
|
{
|
|
|
|
|
return make_int2(1, 1);
|
2017-02-14 06:20:48 -05:00
|
|
|
}
|
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
uint64_t CPUSplitKernel::state_buffer_size(device_memory &kernel_globals,
|
|
|
|
|
device_memory & /*data*/,
|
|
|
|
|
size_t num_threads)
|
|
|
|
|
{
|
|
|
|
|
KernelGlobals *kg = (KernelGlobals *)kernel_globals.device_pointer;
|
2017-03-04 06:29:01 -05:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
return split_data_buffer_size(kg, num_threads);
|
2017-03-04 06:29:01 -05:00
|
|
|
}
|
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
Device *device_cpu_create(DeviceInfo &info, Stats &stats, Profiler &profiler, bool background)
|
2011-04-27 11:58:34 +00:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
return new CPUDevice(info, stats, profiler, background);
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
void device_cpu_info(vector<DeviceInfo> &devices)
|
2012-01-04 18:06:32 +00:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
DeviceInfo info;
|
|
|
|
|
|
|
|
|
|
info.type = DEVICE_CPU;
|
|
|
|
|
info.description = system_cpu_brand_string();
|
|
|
|
|
info.id = "CPU";
|
|
|
|
|
info.num = 0;
|
|
|
|
|
info.has_volume_decoupled = true;
|
2020-04-07 19:43:51 +02:00
|
|
|
info.has_adaptive_stop_per_sample = true;
|
2019-04-17 06:17:24 +02:00
|
|
|
info.has_osl = true;
|
|
|
|
|
info.has_half_images = true;
|
|
|
|
|
info.has_profiling = true;
|
2020-05-31 23:49:10 +02:00
|
|
|
info.denoisers = DENOISER_NLM;
|
2020-06-01 00:11:17 +02:00
|
|
|
if (openimagedenoise_supported()) {
|
|
|
|
|
info.denoisers |= DENOISER_OPENIMAGEDENOISE;
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
|
|
|
|
devices.insert(devices.begin(), info);
|
2012-01-04 18:06:32 +00:00
|
|
|
}
|
|
|
|
|
|
2018-11-09 12:01:38 +01:00
|
|
|
string device_cpu_capabilities()
|
2015-01-06 14:13:21 +05:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
string capabilities = "";
|
|
|
|
|
capabilities += system_cpu_support_sse2() ? "SSE2 " : "";
|
|
|
|
|
capabilities += system_cpu_support_sse3() ? "SSE3 " : "";
|
|
|
|
|
capabilities += system_cpu_support_sse41() ? "SSE41 " : "";
|
|
|
|
|
capabilities += system_cpu_support_avx() ? "AVX " : "";
|
|
|
|
|
capabilities += system_cpu_support_avx2() ? "AVX2" : "";
|
|
|
|
|
if (capabilities[capabilities.size() - 1] == ' ')
|
|
|
|
|
capabilities.resize(capabilities.size() - 1);
|
|
|
|
|
return capabilities;
|
2015-01-06 14:13:21 +05:00
|
|
|
}
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2015-01-06 14:13:21 +05:00
|
|
|
CCL_NAMESPACE_END
|