Fix: Cycles log levels conflict with macros on some platforms

In particular DEBUG, but prefix all of them to be sure.

Pull Request: https://projects.blender.org/blender/blender/pulls/141749
This commit is contained in:
Brecht Van Lommel
2025-07-10 19:44:14 +02:00
committed by Brecht Van Lommel
parent 2b86b32fc3
commit 73fe848e07
93 changed files with 660 additions and 651 deletions

View File

@@ -263,12 +263,12 @@ static void xml_read_shader_graph(XMLReadState &state, Shader *shader, const xml
}
if (!output) {
LOG(ERROR) << "Unknown output socket name \"" << from_node_name << "\" on \""
<< from_socket_name << "\".";
LOG_ERROR << "Unknown output socket name \"" << from_node_name << "\" on \""
<< from_socket_name << "\".";
}
}
else {
LOG(ERROR) << "Unknown shader node name \"" << from_node_name << "\"";
LOG_ERROR << "Unknown shader node name \"" << from_node_name << "\"";
}
if (graph_reader.node_map.find(to_node_name) != graph_reader.node_map.end()) {
@@ -281,12 +281,12 @@ static void xml_read_shader_graph(XMLReadState &state, Shader *shader, const xml
}
if (!input) {
LOG(ERROR) << "Unknown input socket name \"" << to_socket_name << "\" on \""
<< to_node_name << "\"";
LOG_ERROR << "Unknown input socket name \"" << to_socket_name << "\" on \""
<< to_node_name << "\"";
}
}
else {
LOG(ERROR) << "Unknown shader node name \"" << to_node_name << "\"";
LOG_ERROR << "Unknown shader node name \"" << to_node_name << "\"";
}
/* connect */
@@ -295,7 +295,7 @@ static void xml_read_shader_graph(XMLReadState &state, Shader *shader, const xml
}
}
else {
LOG(ERROR) << "Invalid from or to value for connect node.";
LOG_ERROR << "Invalid from or to value for connect node.";
}
continue;
@@ -318,17 +318,17 @@ static void xml_read_shader_graph(XMLReadState &state, Shader *shader, const xml
snode = OSLShaderManager::osl_node(graph.get(), state.scene, filepath, "");
if (!snode) {
LOG(ERROR) << "Failed to create OSL node from \"" << filepath << "\"";
LOG_ERROR << "Failed to create OSL node from \"" << filepath << "\"";
continue;
}
}
else {
LOG(ERROR) << "OSL node missing \"src\" attribute.";
LOG_ERROR << "OSL node missing \"src\" attribute.";
continue;
}
}
else {
LOG(ERROR) << "OSL node without using --shadingsys osl.";
LOG_ERROR << "OSL node without using --shadingsys osl.";
continue;
}
}
@@ -343,16 +343,16 @@ static void xml_read_shader_graph(XMLReadState &state, Shader *shader, const xml
const NodeType *node_type = NodeType::find(node_name);
if (!node_type) {
LOG(ERROR) << "Unknown shader node \"" << node.name() << "\"";
LOG_ERROR << "Unknown shader node \"" << node.name() << "\"";
continue;
}
if (node_type->type != NodeType::SHADER) {
LOG(ERROR) << "Node type \"" << node_type->name << "\" is not a shader node";
LOG_ERROR << "Node type \"" << node_type->name << "\" is not a shader node";
continue;
}
if (node_type->create == nullptr) {
LOG(ERROR) << "Can't create abstract node type \""
<< "\"";
LOG_ERROR << "Can't create abstract node type \""
<< "\"";
continue;
}
@@ -697,7 +697,7 @@ static void xml_read_state(XMLReadState &state, const xml_node node)
}
if (!found) {
LOG(ERROR) << "Unknown shader \"" << shadername << "\"";
LOG_ERROR << "Unknown shader \"" << shadername << "\"";
}
}
@@ -716,7 +716,7 @@ static void xml_read_state(XMLReadState &state, const xml_node node)
}
if (!found) {
LOG(ERROR) << "Unknown object \"" << objectname << "\"";
LOG_ERROR << "Unknown object \"" << objectname << "\"";
}
}
@@ -807,7 +807,7 @@ static void xml_read_scene(XMLReadState &state, const xml_node scene_node)
}
#endif
else {
LOG(ERROR) << "Unknown node \"" << node.name() << "\"";
LOG_ERROR << "Unknown node \"" << node.name() << "\"";
}
}
}
@@ -831,7 +831,7 @@ static void xml_read_include(XMLReadState &state, const string &src)
xml_read_scene(substate, cycles);
}
else {
LOG(ERROR) << "\"" << src << "\" read error: " << parse_result.description();
LOG_ERROR << "\"" << src << "\" read error: " << parse_result.description();
exit(EXIT_FAILURE);
}
}

View File

@@ -126,7 +126,7 @@ half4 *OpenGLDisplayDriver::map_texture_buffer()
half4 *mapped_rgba_pixels = reinterpret_cast<half4 *>(
glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_WRITE_ONLY));
if (!mapped_rgba_pixels) {
LOG(ERROR) << "Error mapping OpenGLDisplayDriver pixel buffer object.";
LOG_ERROR << "Error mapping OpenGLDisplayDriver pixel buffer object.";
}
if (texture_.need_zero) {
@@ -282,7 +282,7 @@ bool OpenGLDisplayDriver::gl_draw_resources_ensure()
if (!vertex_buffer_) {
glGenBuffers(1, &vertex_buffer_);
if (!vertex_buffer_) {
LOG(ERROR) << "Error creating vertex buffer.";
LOG_ERROR << "Error creating vertex buffer.";
return false;
}
}
@@ -326,7 +326,7 @@ bool OpenGLDisplayDriver::gl_texture_resources_ensure()
/* Create texture. */
glGenTextures(1, &texture_.gl_id);
if (!texture_.gl_id) {
LOG(ERROR) << "Error creating texture.";
LOG_ERROR << "Error creating texture.";
return false;
}
@@ -340,7 +340,7 @@ bool OpenGLDisplayDriver::gl_texture_resources_ensure()
/* Create PBO for the texture. */
glGenBuffers(1, &texture_.gl_pbo_id);
if (!texture_.gl_pbo_id) {
LOG(ERROR) << "Error creating texture pixel buffer object.";
LOG_ERROR << "Error creating texture pixel buffer object.";
return false;
}

View File

@@ -50,8 +50,8 @@ static const char *FRAGMENT_SHADER =
static void shader_print_errors(const char *task, const char *log, const char *code)
{
LOG(ERROR) << "Shader: " << task << " error:";
LOG(ERROR) << "===== shader string ====";
LOG_ERROR << "Shader: " << task << " error:";
LOG_ERROR << "===== shader string ====";
std::stringstream stream(code);
string partial;
@@ -59,14 +59,14 @@ static void shader_print_errors(const char *task, const char *log, const char *c
int line = 1;
while (getline(stream, partial, '\n')) {
if (line < 10) {
LOG(ERROR) << " " << line << " " << partial;
LOG_ERROR << " " << line << " " << partial;
}
else {
LOG(ERROR) << line << " " << partial;
LOG_ERROR << line << " " << partial;
}
line++;
}
LOG(ERROR) << log;
LOG_ERROR << log;
}
static int compile_shader_program()
@@ -176,14 +176,14 @@ void OpenGLShader::create_shader_if_needed()
image_texture_location_ = glGetUniformLocation(shader_program_, "image_texture");
if (image_texture_location_ < 0) {
LOG(ERROR) << "Shader doesn't contain the 'image_texture' uniform.";
LOG_ERROR << "Shader doesn't contain the 'image_texture' uniform.";
destroy_shader();
return;
}
fullscreen_location_ = glGetUniformLocation(shader_program_, "fullscreen");
if (fullscreen_location_ < 0) {
LOG(ERROR) << "Shader doesn't contain the 'fullscreen' uniform.";
LOG_ERROR << "Shader doesn't contain the 'fullscreen' uniform.";
destroy_shader();
return;
}

View File

@@ -67,7 +67,7 @@ static void window_display_text(int /*x*/, int /*y*/, const char *text)
static string last_text;
if (text != last_text) {
LOG(INFO_IMPORTANT) << text;
LOG_INFO_IMPORTANT << text;
last_text = text;
}
#endif
@@ -292,7 +292,7 @@ void window_main_loop(const char *title,
height,
SDL_WINDOW_RESIZABLE | SDL_WINDOW_OPENGL | SDL_WINDOW_SHOWN);
if (V.window == nullptr) {
LOG(ERROR) << "Failed to create window: " << SDL_GetError();
LOG_ERROR << "Failed to create window: " << SDL_GetError();
return;
}

View File

@@ -764,7 +764,7 @@ void BlenderSync::sync_camera_motion(BL::RenderSettings &b_render,
/* TODO(sergey): De-duplicate calculation with camera sync. */
const float fov = 2.0f * atanf((0.5f * sensor_size) / bcam.lens / aspectratio);
if (fov != cam->get_fov()) {
LOG(WORK) << "Camera " << b_ob.name() << " FOV change detected.";
LOG_WORK << "Camera " << b_ob.name() << " FOV change detected.";
if (motion_time == 0.0f) {
cam->set_fov(fov);
}

View File

@@ -386,7 +386,7 @@ static void ExportCurveSegments(Scene *scene, Hair *hair, ParticleCurveData *CDa
/* check allocation */
if ((hair->get_curve_keys().size() != num_keys) || (hair->num_curves() != num_curves)) {
LOG(WARNING) << "Hair memory allocation failed, clearing data.";
LOG_WARNING << "Hair memory allocation failed, clearing data.";
hair->clear(true);
}
}
@@ -450,7 +450,7 @@ static void export_hair_motion_validate_attribute(Hair *hair,
if (num_motion_keys != num_keys || !have_motion) {
/* No motion or hair "topology" changed, remove attributes again. */
if (num_motion_keys != num_keys) {
LOG(WORK) << "Hair topology changed, removing motion attribute.";
LOG_WORK << "Hair topology changed, removing motion attribute.";
}
hair->attributes.remove(ATTR_STD_MOTION_VERTEX_POSITION);
}

View File

@@ -108,20 +108,20 @@ void BlenderFallbackDisplayShader::create_shader_if_needed()
shader_program_ = compile_fallback_shader();
if (!shader_program_) {
LOG(ERROR) << "Failed to compile fallback shader";
LOG_ERROR << "Failed to compile fallback shader";
return;
}
image_texture_location_ = GPU_shader_get_uniform(shader_program_, "image_texture");
if (image_texture_location_ < 0) {
LOG(ERROR) << "Shader doesn't contain the 'image_texture' uniform.";
LOG_ERROR << "Shader doesn't contain the 'image_texture' uniform.";
destroy_shader();
return;
}
fullscreen_location_ = GPU_shader_get_uniform(shader_program_, "fullscreen");
if (fullscreen_location_ < 0) {
LOG(ERROR) << "Shader doesn't contain the 'fullscreen' uniform.";
LOG_ERROR << "Shader doesn't contain the 'fullscreen' uniform.";
destroy_shader();
return;
}
@@ -163,7 +163,7 @@ GPUShader *BlenderDisplaySpaceShader::get_shader_program()
shader_program_ = GPU_shader_get_bound();
}
if (!shader_program_) {
LOG(ERROR) << "Error retrieving shader program for display space shader.";
LOG_ERROR << "Error retrieving shader program for display space shader.";
}
return shader_program_;
@@ -233,7 +233,7 @@ class DisplayGPUTexture {
nullptr);
if (!gpu_texture) {
LOG(ERROR) << "Error creating texture.";
LOG_ERROR << "Error creating texture.";
return false;
}
@@ -340,7 +340,7 @@ class DisplayGPUPixelBuffer {
}
if (gpu_pixel_buffer == nullptr) {
LOG(ERROR) << "Error creating texture pixel buffer object.";
LOG_ERROR << "Error creating texture pixel buffer object.";
return false;
}
@@ -470,7 +470,7 @@ BlenderDisplayDriver::~BlenderDisplayDriver()
void BlenderDisplayDriver::next_tile_begin()
{
if (!tiles_->current_tile.tile.ready_to_draw()) {
LOG(ERROR)
LOG_ERROR
<< "Unexpectedly moving to the next tile without any data provided for current tile.";
return;
}
@@ -553,7 +553,7 @@ static void update_tile_texture_pixels(const DrawTileAndPBO &tile)
const DisplayGPUTexture &texture = tile.tile.texture;
if (!DCHECK_NOTNULL(tile.buffer_object.gpu_pixel_buffer)) {
LOG(ERROR) << "Display driver tile pixel buffer unavailable.";
LOG_ERROR << "Display driver tile pixel buffer unavailable.";
return;
}
GPU_texture_update_sub_from_pixel_buffer(texture.gpu_texture,
@@ -608,12 +608,12 @@ half4 *BlenderDisplayDriver::map_texture_buffer()
{
GPUPixelBuffer *pix_buf = tiles_->current_tile.buffer_object.gpu_pixel_buffer;
if (!DCHECK_NOTNULL(pix_buf)) {
LOG(ERROR) << "Display driver tile pixel buffer unavailable.";
LOG_ERROR << "Display driver tile pixel buffer unavailable.";
return nullptr;
}
half4 *mapped_rgba_pixels = reinterpret_cast<half4 *>(GPU_pixel_buffer_map(pix_buf));
if (!mapped_rgba_pixels) {
LOG(ERROR) << "Error mapping BlenderDisplayDriver pixel buffer object.";
LOG_ERROR << "Error mapping BlenderDisplayDriver pixel buffer object.";
}
return mapped_rgba_pixels;
}
@@ -622,7 +622,7 @@ void BlenderDisplayDriver::unmap_texture_buffer()
{
GPUPixelBuffer *pix_buf = tiles_->current_tile.buffer_object.gpu_pixel_buffer;
if (!DCHECK_NOTNULL(pix_buf)) {
LOG(ERROR) << "Display driver tile pixel buffer unavailable.";
LOG_ERROR << "Display driver tile pixel buffer unavailable.";
return;
}
GPU_pixel_buffer_unmap(pix_buf);
@@ -751,7 +751,7 @@ static void draw_tile(const float2 &zoom,
const DisplayGPUTexture &texture = draw_tile.texture;
if (!DCHECK_NOTNULL(texture.gpu_texture)) {
LOG(ERROR) << "Display driver tile GPU texture resource unavailable.";
LOG_ERROR << "Display driver tile GPU texture resource unavailable.";
return;
}
@@ -866,20 +866,20 @@ void BlenderDisplayDriver::draw(const Params &params)
gpu_context_unlock();
LOG(STATS) << "Display driver number of textures: " << DisplayGPUTexture::num_used;
LOG(STATS) << "Display driver number of PBOs: " << DisplayGPUPixelBuffer::num_used;
LOG_STATS << "Display driver number of textures: " << DisplayGPUTexture::num_used;
LOG_STATS << "Display driver number of PBOs: " << DisplayGPUPixelBuffer::num_used;
}
void BlenderDisplayDriver::gpu_context_create()
{
if (!RE_engine_gpu_context_create(reinterpret_cast<RenderEngine *>(b_engine_.ptr.data))) {
LOG(ERROR) << "Error creating GPU context.";
LOG_ERROR << "Error creating GPU context.";
return;
}
/* Create global GPU resources for display driver. */
if (!gpu_resources_create()) {
LOG(ERROR) << "Error creating GPU resources for Display Driver.";
LOG_ERROR << "Error creating GPU resources for Display Driver.";
return;
}
}
@@ -913,7 +913,7 @@ bool BlenderDisplayDriver::gpu_resources_create()
{
/* Ensure context is active for resource creation. */
if (!gpu_context_enable()) {
LOG(ERROR) << "Error enabling GPU context.";
LOG_ERROR << "Error enabling GPU context.";
return false;
}
@@ -921,7 +921,7 @@ bool BlenderDisplayDriver::gpu_resources_create()
gpu_render_sync_ = GPU_fence_create();
if (!DCHECK_NOTNULL(gpu_upload_sync_) || !DCHECK_NOTNULL(gpu_render_sync_)) {
LOG(ERROR) << "Error creating GPU synchronization primitives.";
LOG_ERROR << "Error creating GPU synchronization primitives.";
assert(0);
return false;
}

View File

@@ -16,24 +16,24 @@ void CCL_log_init()
[](const ccl::LogLevel level, const char *file_line, const char *func, const char *msg) {
const CLG_LogType *log_type = CLOG_ENSURE(&LOG);
switch (level) {
case ccl::FATAL:
case ccl::DFATAL:
case ccl::LOG_LEVEL_FATAL:
case ccl::LOG_LEVEL_DFATAL:
CLG_log_str(log_type, CLG_LEVEL_FATAL, file_line, func, msg);
return;
case ccl::ERROR:
case ccl::DERROR:
case ccl::LOG_LEVEL_ERROR:
case ccl::LOG_LEVEL_DERROR:
CLG_log_str(log_type, CLG_LEVEL_ERROR, file_line, func, msg);
return;
case ccl::WARNING:
case ccl::DWARNING:
case ccl::LOG_LEVEL_WARNING:
case ccl::LOG_LEVEL_DWARNING:
CLG_log_str(log_type, CLG_LEVEL_WARN, file_line, func, msg);
return;
case ccl::INFO:
case ccl::INFO_IMPORTANT:
case ccl::WORK:
case ccl::STATS:
case ccl::DEBUG:
case ccl::UNKNOWN:
case ccl::LOG_LEVEL_INFO:
case ccl::LOG_LEVEL_INFO_IMPORTANT:
case ccl::LOG_LEVEL_WORK:
case ccl::LOG_LEVEL_STATS:
case ccl::LOG_LEVEL_DEBUG:
case ccl::LOG_LEVEL_UNKNOWN:
CLG_log_str(log_type, CLG_LEVEL_INFO, file_line, func, msg);
return;
}
@@ -43,22 +43,22 @@ void CCL_log_init()
const CLG_LogType *log_type = CLOG_ENSURE(&LOG);
switch (log_type->level) {
case CLG_LEVEL_FATAL:
ccl::log_level_set(ccl::FATAL);
ccl::log_level_set(ccl::LOG_LEVEL_FATAL);
break;
case CLG_LEVEL_ERROR:
ccl::log_level_set(ccl::ERROR);
ccl::log_level_set(ccl::LOG_LEVEL_ERROR);
break;
case CLG_LEVEL_WARN:
ccl::log_level_set(ccl::WARNING);
ccl::log_level_set(ccl::LOG_LEVEL_WARNING);
break;
case CLG_LEVEL_INFO:
ccl::log_level_set(ccl::INFO);
ccl::log_level_set(ccl::LOG_LEVEL_INFO);
break;
case CLG_LEVEL_DEBUG:
ccl::log_level_set(ccl::WORK);
ccl::log_level_set(ccl::LOG_LEVEL_WORK);
break;
case CLG_LEVEL_TRACE:
ccl::log_level_set(ccl::DEBUG);
ccl::log_level_set(ccl::LOG_LEVEL_DEBUG);
break;
}
}

View File

@@ -998,10 +998,10 @@ void BlenderSync::sync_mesh_motion(BObjectInfo &b_ob_info, Mesh *mesh, const int
{
/* no motion, remove attributes again */
if (b_verts_num != numverts) {
LOG(WARNING) << "Topology differs, disabling motion blur for object " << ob_name;
LOG_WARNING << "Topology differs, disabling motion blur for object " << ob_name;
}
else {
LOG(DEBUG) << "No actual deformation motion for object " << ob_name;
LOG_DEBUG << "No actual deformation motion for object " << ob_name;
}
attributes.remove(ATTR_STD_MOTION_VERTEX_POSITION);
if (attr_mN) {
@@ -1009,7 +1009,7 @@ void BlenderSync::sync_mesh_motion(BObjectInfo &b_ob_info, Mesh *mesh, const int
}
}
else if (motion_step > 0) {
LOG(DEBUG) << "Filling deformation motion for object " << ob_name;
LOG_DEBUG << "Filling deformation motion for object " << ob_name;
/* motion, fill up previous steps that we might have skipped because
* they had no motion, but we need them anyway now */
const float3 *P = mesh->get_verts().data();
@@ -1024,8 +1024,8 @@ void BlenderSync::sync_mesh_motion(BObjectInfo &b_ob_info, Mesh *mesh, const int
}
else {
if (b_verts_num != numverts) {
LOG(WARNING) << "Topology differs, discarding motion blur for object " << ob_name
<< " at time " << motion_step;
LOG_WARNING << "Topology differs, discarding motion blur for object " << ob_name
<< " at time " << motion_step;
const float3 *P = mesh->get_verts().data();
const float3 *N = (attr_N) ? attr_N->data_float3() : nullptr;
std::copy_n(P, numverts, mP);

View File

@@ -711,7 +711,7 @@ void BlenderSync::sync_motion(BL::RenderSettings &b_render,
continue;
}
LOG(WORK) << "Synchronizing motion for the relative time " << relative_time << ".";
LOG_WORK << "Synchronizing motion for the relative time " << relative_time << ".";
/* fixed shutter time to get previous and next frame for motion pass */
const float shuttertime = scene->motion_shutter_time();

View File

@@ -727,7 +727,7 @@ static PyObject *set_device_override_func(PyObject * /*self*/, PyObject *arg)
BlenderSession::device_override = DEVICE_MASK_ONEAPI;
}
else {
LOG(ERROR) << override << " is not a valid Cycles device.";
LOG_ERROR << override << " is not a valid Cycles device.";
Py_RETURN_FALSE;
}

View File

@@ -473,8 +473,8 @@ void BlenderSession::render(BL::Depsgraph &b_depsgraph_)
double total_time;
double render_time;
session->progress.get_time(total_time, render_time);
LOG(INFO) << "Total render time: " << total_time;
LOG(INFO) << "Render time (without synchronization): " << render_time;
LOG_INFO << "Total render time: " << total_time;
LOG_INFO << "Render time (without synchronization): " << render_time;
}
void BlenderSession::render_frame_finish()

View File

@@ -307,7 +307,7 @@ void BlenderSync::sync_data(BL::RenderSettings &b_render,
* false = don't delete unused shaders, not supported. */
shader_map.post_sync(false);
LOG(INFO) << "Total time spent synchronizing data: " << timer.get_time();
LOG_INFO << "Total time spent synchronizing data: " << timer.get_time();
has_updates_ = false;
}
@@ -458,7 +458,7 @@ void BlenderSync::sync_integrator(BL::ViewLayer &b_view_layer,
}
if (scrambling_distance != 1.0f) {
LOG(INFO) << "Using scrambling distance: " << scrambling_distance;
LOG_INFO << "Using scrambling distance: " << scrambling_distance;
}
integrator->set_scrambling_distance(scrambling_distance);
@@ -791,7 +791,7 @@ void BlenderSync::sync_render_passes(BL::RenderLayer &b_rlay, BL::ViewLayer &b_v
if (!get_known_pass_type(b_pass, pass_type, pass_mode)) {
if (!expected_passes.count(b_pass.name())) {
LOG(ERROR) << "Unknown pass " << b_pass.name();
LOG_ERROR << "Unknown pass " << b_pass.name();
}
continue;
}
@@ -1080,7 +1080,7 @@ DenoiseParams BlenderSync::get_denoise_params(BL::Scene &b_scene,
break;
default:
LOG(ERROR) << "Unhandled input passes enum " << input_passes;
LOG_ERROR << "Unhandled input passes enum " << input_passes;
break;
}

View File

@@ -136,12 +136,12 @@ class BlenderSmokeLoader : public VDBImageLoader {
}
}
else {
LOG(ERROR) << "Unknown volume attribute " << Attribute::standard_name(attribute)
<< "skipping ";
LOG_ERROR << "Unknown volume attribute " << Attribute::standard_name(attribute)
<< "skipping ";
voxels[0] = 0.0f;
return false;
}
LOG(ERROR) << "Unexpected smoke volume resolution, skipping";
LOG_ERROR << "Unexpected smoke volume resolution, skipping";
#else
(void)voxels;
(void)width;

View File

@@ -556,7 +556,7 @@ unique_ptr<BVHNode> BVHBuild::run()
if (rootnode) {
if (progress.get_cancel()) {
rootnode.reset();
LOG(WORK) << "BVH build canceled.";
LOG_WORK << "BVH build canceled.";
}
else {
/*rotate(rootnode, 4, 5);*/
@@ -564,26 +564,26 @@ unique_ptr<BVHNode> BVHBuild::run()
rootnode->update_time();
}
if (rootnode != nullptr) {
LOG(WORK) << "BVH build statistics:"
<< " Build time: " << time_dt() - build_start_time << "\n"
<< " Total number of nodes: "
<< string_human_readable_number(rootnode->getSubtreeSize(BVH_STAT_NODE_COUNT))
<< "\n"
<< " Number of inner nodes: "
<< string_human_readable_number(rootnode->getSubtreeSize(BVH_STAT_INNER_COUNT))
<< "\n"
<< " Number of leaf nodes: "
<< string_human_readable_number(rootnode->getSubtreeSize(BVH_STAT_LEAF_COUNT))
<< "\n"
<< " Number of unaligned nodes: "
<< string_human_readable_number(rootnode->getSubtreeSize(BVH_STAT_UNALIGNED_COUNT))
<< "\n"
<< " Allocation slop factor: "
<< ((prim_type.capacity() != 0) ? (float)prim_type.size() / prim_type.capacity() :
1.0f)
<< "\n"
<< " Maximum depth: "
<< string_human_readable_number(rootnode->getSubtreeSize(BVH_STAT_DEPTH));
LOG_WORK << "BVH build statistics:"
<< " Build time: " << time_dt() - build_start_time << "\n"
<< " Total number of nodes: "
<< string_human_readable_number(rootnode->getSubtreeSize(BVH_STAT_NODE_COUNT))
<< "\n"
<< " Number of inner nodes: "
<< string_human_readable_number(rootnode->getSubtreeSize(BVH_STAT_INNER_COUNT))
<< "\n"
<< " Number of leaf nodes: "
<< string_human_readable_number(rootnode->getSubtreeSize(BVH_STAT_LEAF_COUNT))
<< "\n"
<< " Number of unaligned nodes: "
<< string_human_readable_number(rootnode->getSubtreeSize(BVH_STAT_UNALIGNED_COUNT))
<< "\n"
<< " Allocation slop factor: "
<< ((prim_type.capacity() != 0) ? (float)prim_type.size() / prim_type.capacity() :
1.0f)
<< "\n"
<< " Maximum depth: "
<< string_human_readable_number(rootnode->getSubtreeSize(BVH_STAT_DEPTH));
}
}

View File

@@ -58,7 +58,7 @@ const char *bvh_layout_name(BVHLayout layout)
case BVH_LAYOUT_ALL:
return "ALL";
}
LOG(DFATAL) << "Unsupported BVH layout was passed.";
LOG_DFATAL << "Unsupported BVH layout was passed.";
return "";
}
@@ -143,7 +143,7 @@ unique_ptr<BVH> BVH::create(const BVHParams &params,
case BVH_LAYOUT_ALL:
break;
}
LOG(DFATAL) << "Requested unsupported BVH layout.";
LOG_DFATAL << "Requested unsupported BVH layout.";
return nullptr;
}

View File

@@ -65,7 +65,7 @@ static bool rtc_memory_monitor_func(void *userPtr, const ssize_t bytes, const bo
static void rtc_error_func(void * /*unused*/, enum RTCError /*unused*/, const char *str)
{
LOG(WARNING) << str;
LOG_WARNING << str;
}
static double progress_start_time = 0.0;

View File

@@ -44,8 +44,8 @@ CPUDevice::CPUDevice(const DeviceInfo &info_, Stats &stats_, Profiler &profiler_
{
/* Pick any kernel, all of them are supposed to have same level of microarchitecture
* optimization. */
LOG(INFO) << "Using " << get_cpu_kernels().integrator_init_from_camera.get_uarch_name()
<< " CPU kernels.";
LOG_INFO << "Using " << get_cpu_kernels().integrator_init_from_camera.get_uarch_name()
<< " CPU kernels.";
if (info.cpu_threads == 0) {
info.cpu_threads = TaskScheduler::max_concurrency();
@@ -97,9 +97,9 @@ void CPUDevice::mem_alloc(device_memory &mem)
}
else {
if (mem.name) {
LOG(WORK) << "Buffer allocate: " << mem.name << ", "
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
<< string_human_readable_size(mem.memory_size()) << ")";
LOG_WORK << "Buffer allocate: " << mem.name << ", "
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
<< string_human_readable_size(mem.memory_size()) << ")";
}
if (mem.type == MEM_DEVICE_ONLY) {
@@ -199,9 +199,9 @@ void CPUDevice::const_copy_to(const char *name, void *host, const size_t size)
void CPUDevice::global_alloc(device_memory &mem)
{
LOG(WORK) << "Global memory allocate: " << mem.name << ", "
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
<< string_human_readable_size(mem.memory_size()) << ")";
LOG_WORK << "Global memory allocate: " << mem.name << ", "
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
<< string_human_readable_size(mem.memory_size()) << ")";
kernel_global_memory_copy(&kernel_globals, mem.name, mem.host_pointer, mem.data_size);
@@ -221,9 +221,9 @@ void CPUDevice::global_free(device_memory &mem)
void CPUDevice::tex_alloc(device_texture &mem)
{
LOG(WORK) << "Texture allocate: " << mem.name << ", "
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
<< string_human_readable_size(mem.memory_size()) << ")";
LOG_WORK << "Texture allocate: " << mem.name << ", "
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
<< string_human_readable_size(mem.memory_size()) << ")";
mem.device_pointer = (device_ptr)mem.host_pointer;
mem.device_size = mem.memory_size();

View File

@@ -35,25 +35,25 @@ bool device_cuda_init()
initialized = true;
int cuew_result = cuewInit(CUEW_INIT_CUDA);
if (cuew_result == CUEW_SUCCESS) {
LOG(INFO) << "CUEW initialization succeeded";
LOG_INFO << "CUEW initialization succeeded";
if (CUDADevice::have_precompiled_kernels()) {
LOG(INFO) << "Found precompiled kernels";
LOG_INFO << "Found precompiled kernels";
result = true;
}
else if (cuewCompilerPath() != nullptr) {
LOG(INFO) << "Found CUDA compiler " << cuewCompilerPath();
LOG_INFO << "Found CUDA compiler " << cuewCompilerPath();
result = true;
}
else {
LOG(INFO) << "Neither precompiled kernels nor CUDA compiler was found,"
<< " unable to use CUDA";
LOG_INFO << "Neither precompiled kernels nor CUDA compiler was found,"
<< " unable to use CUDA";
}
}
else {
LOG(WARNING) << "CUEW initialization failed: "
<< ((cuew_result == CUEW_ERROR_ATEXIT_FAILED) ?
"Error setting up atexit() handler" :
"Error opening the library");
LOG_WARNING << "CUEW initialization failed: "
<< ((cuew_result == CUEW_ERROR_ATEXIT_FAILED) ?
"Error setting up atexit() handler" :
"Error opening the library");
}
return result;
@@ -75,7 +75,7 @@ unique_ptr<Device> device_cuda_create(const DeviceInfo &info,
(void)profiler;
(void)headless;
LOG(FATAL) << "Request to create CUDA device without compiled-in support. Should never happen.";
LOG_FATAL << "Request to create CUDA device without compiled-in support. Should never happen.";
return nullptr;
#endif
@@ -109,7 +109,7 @@ void device_cuda_info(vector<DeviceInfo> &devices)
CUresult result = device_cuda_safe_init();
if (result != CUDA_SUCCESS) {
if (result != CUDA_ERROR_NO_DEVICE) {
LOG(ERROR) << "CUDA cuInit: " << cuewErrorString(result);
LOG_ERROR << "CUDA cuInit: " << cuewErrorString(result);
}
return;
}
@@ -117,7 +117,7 @@ void device_cuda_info(vector<DeviceInfo> &devices)
int count = 0;
result = cuDeviceGetCount(&count);
if (result != CUDA_SUCCESS) {
LOG(ERROR) << "CUDA cuDeviceGetCount: " << cuewErrorString(result);
LOG_ERROR << "CUDA cuDeviceGetCount: " << cuewErrorString(result);
return;
}
@@ -128,13 +128,12 @@ void device_cuda_info(vector<DeviceInfo> &devices)
result = cuDeviceGetName(name, 256, num);
if (result != CUDA_SUCCESS) {
LOG(ERROR) << "CUDA cuDeviceGetName: " << cuewErrorString(result);
LOG_ERROR << "CUDA cuDeviceGetName: " << cuewErrorString(result);
continue;
}
if (!cudaSupportsDevice(num)) {
LOG(INFO) << "Ignoring device \"" << name
<< "\", this graphics card is no longer supported.";
LOG_INFO << "Ignoring device \"" << name << "\", this graphics card is no longer supported.";
continue;
}
@@ -194,26 +193,26 @@ void device_cuda_info(vector<DeviceInfo> &devices)
* Windows 10 even when it is, due to an issue in application profiles.
* Detect case where we expect it to be available and override. */
if (preempt_attr == 0 && (major >= 6) && system_windows_version_at_least(10, 17134)) {
LOG(INFO) << "Assuming device has compute preemption on Windows 10.";
LOG_INFO << "Assuming device has compute preemption on Windows 10.";
preempt_attr = 1;
}
# endif
if (timeout_attr && !preempt_attr) {
LOG(INFO) << "Device is recognized as display.";
LOG_INFO << "Device is recognized as display.";
info.description += " (Display)";
info.display_device = true;
display_devices.push_back(info);
}
else {
LOG(INFO) << "Device has compute preemption or is not used for display.";
LOG_INFO << "Device has compute preemption or is not used for display.";
devices.push_back(info);
}
LOG(INFO) << "Added device \"" << info.description << "\" with id \"" << info.id << "\".";
LOG_INFO << "Added device \"" << info.description << "\" with id \"" << info.id << "\".";
if (info.denoisers & DENOISER_OPENIMAGEDENOISE) {
LOG(INFO) << "Device with id \"" << info.id << "\" supports "
<< denoiserTypeToHumanReadable(DENOISER_OPENIMAGEDENOISE) << ".";
LOG_INFO << "Device with id \"" << info.id << "\" supports "
<< denoiserTypeToHumanReadable(DENOISER_OPENIMAGEDENOISE) << ".";
}
}

View File

@@ -260,9 +260,9 @@ string CUDADevice::compile_kernel(const string &common_cflags,
if (!use_adaptive_compilation()) {
if (!force_ptx) {
const string cubin = path_get(string_printf("lib/%s_sm_%d%d.cubin.zst", name, major, minor));
LOG(INFO) << "Testing for pre-compiled kernel " << cubin << ".";
LOG_INFO << "Testing for pre-compiled kernel " << cubin << ".";
if (path_exists(cubin)) {
LOG(INFO) << "Using precompiled kernel.";
LOG_INFO << "Using precompiled kernel.";
return cubin;
}
}
@@ -272,9 +272,9 @@ string CUDADevice::compile_kernel(const string &common_cflags,
while (ptx_major >= 3) {
const string ptx = path_get(
string_printf("lib/%s_compute_%d%d.ptx.zst", name, ptx_major, ptx_minor));
LOG(INFO) << "Testing for pre-compiled kernel " << ptx << ".";
LOG_INFO << "Testing for pre-compiled kernel " << ptx << ".";
if (path_exists(ptx)) {
LOG(INFO) << "Using precompiled kernel.";
LOG_INFO << "Using precompiled kernel.";
return ptx;
}
@@ -302,9 +302,9 @@ string CUDADevice::compile_kernel(const string &common_cflags,
const string cubin_file = string_printf(
"cycles_%s_%s_%d%d_%s.%s", name, kernel_arch, major, minor, kernel_md5.c_str(), kernel_ext);
const string cubin = path_cache_get(path_join("kernels", cubin_file));
LOG(INFO) << "Testing for locally compiled kernel " << cubin << ".";
LOG_INFO << "Testing for locally compiled kernel " << cubin << ".";
if (path_exists(cubin)) {
LOG(INFO) << "Using locally compiled kernel.";
LOG_INFO << "Using locally compiled kernel.";
return cubin;
}
@@ -338,15 +338,15 @@ string CUDADevice::compile_kernel(const string &common_cflags,
}
const int nvcc_cuda_version = cuewCompilerVersion();
LOG(INFO) << "Found nvcc " << nvcc << ", CUDA version " << nvcc_cuda_version << ".";
LOG_INFO << "Found nvcc " << nvcc << ", CUDA version " << nvcc_cuda_version << ".";
if (nvcc_cuda_version < 101) {
LOG(WARNING) << "Unsupported CUDA version " << nvcc_cuda_version / 10 << "."
<< nvcc_cuda_version % 10 << ", you need CUDA 10.1 or newer";
LOG_WARNING << "Unsupported CUDA version " << nvcc_cuda_version / 10 << "."
<< nvcc_cuda_version % 10 << ", you need CUDA 10.1 or newer";
return string();
}
if (!(nvcc_cuda_version >= 102 && nvcc_cuda_version < 130)) {
LOG(WARNING) << "CUDA version " << nvcc_cuda_version / 10 << "." << nvcc_cuda_version % 10
<< "CUDA 10.1 to 12 are officially supported.";
LOG_WARNING << "CUDA version " << nvcc_cuda_version / 10 << "." << nvcc_cuda_version % 10
<< "CUDA 10.1 to 12 are officially supported.";
}
double starttime = time_dt();
@@ -371,9 +371,9 @@ string CUDADevice::compile_kernel(const string &common_cflags,
cubin.c_str(),
common_cflags.c_str());
LOG(INFO_IMPORTANT) << "Compiling " << ((use_adaptive_compilation()) ? "adaptive " : "")
<< "CUDA kernel ...";
LOG(INFO_IMPORTANT) << command;
LOG_INFO_IMPORTANT << "Compiling " << ((use_adaptive_compilation()) ? "adaptive " : "")
<< "CUDA kernel ...";
LOG_INFO_IMPORTANT << command;
# ifdef _WIN32
command = "call " + command;
@@ -393,8 +393,8 @@ string CUDADevice::compile_kernel(const string &common_cflags,
return string();
}
LOG(INFO_IMPORTANT) << "Kernel compilation finished in " << std::fixed << std::setprecision(2)
<< time_dt() - starttime << "s";
LOG_INFO_IMPORTANT << "Kernel compilation finished in " << std::fixed << std::setprecision(2)
<< time_dt() - starttime << "s";
return cubin;
}
@@ -408,8 +408,7 @@ bool CUDADevice::load_kernels(const uint kernel_features)
*/
if (cuModule) {
if (use_adaptive_compilation()) {
LOG(INFO)
<< "Skipping CUDA kernel reload for adaptive compilation, not currently supported.";
LOG_INFO << "Skipping CUDA kernel reload for adaptive compilation, not currently supported.";
}
return true;
}
@@ -498,8 +497,8 @@ void CUDADevice::reserve_local_memory(const uint kernel_features)
cuMemGetInfo(&free_after, &total);
}
LOG(INFO) << "Local memory reserved " << string_human_readable_number(free_before - free_after)
<< " bytes. (" << string_human_readable_size(free_before - free_after) << ")";
LOG_INFO << "Local memory reserved " << string_human_readable_number(free_before - free_after)
<< " bytes. (" << string_human_readable_size(free_before - free_after) << ")";
# if 0
/* For testing mapped host memory, fill up device memory. */
@@ -1015,10 +1014,10 @@ bool CUDADevice::should_use_graphics_interop(const GraphicsInteropDevice &intero
if (log) {
if (found) {
LOG(INFO) << "Graphics interop: found matching OpenGL device for CUDA";
LOG_INFO << "Graphics interop: found matching OpenGL device for CUDA";
}
else {
LOG(INFO) << "Graphics interop: no matching OpenGL device for CUDA";
LOG_INFO << "Graphics interop: no matching OpenGL device for CUDA";
}
}
@@ -1033,16 +1032,16 @@ bool CUDADevice::should_use_graphics_interop(const GraphicsInteropDevice &intero
if (log) {
if (found) {
LOG(INFO) << "Graphics interop: found matching Vulkan device for CUDA";
LOG_INFO << "Graphics interop: found matching Vulkan device for CUDA";
}
else {
LOG(INFO) << "Graphics interop: no matching Vulkan device for CUDA";
LOG_INFO << "Graphics interop: no matching Vulkan device for CUDA";
}
LOG(INFO) << "Graphics Interop: CUDA UUID "
<< string_hex(reinterpret_cast<uint8_t *>(uuid.bytes), sizeof(uuid.bytes))
<< ", Vulkan UUID "
<< string_hex(interop_device.uuid.data(), interop_device.uuid.size());
LOG_INFO << "Graphics Interop: CUDA UUID "
<< string_hex(reinterpret_cast<uint8_t *>(uuid.bytes), sizeof(uuid.bytes))
<< ", Vulkan UUID "
<< string_hex(interop_device.uuid.data(), interop_device.uuid.size());
}
return found;

View File

@@ -52,7 +52,7 @@ void CUDADeviceGraphicsInterop::set_buffer(GraphicsInteropBuffer &interop_buffer
interop_buffer.take_handle(),
CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
if (result != CUDA_SUCCESS) {
LOG(ERROR) << "Error registering OpenGL buffer: " << cuewErrorString(result);
LOG_ERROR << "Error registering OpenGL buffer: " << cuewErrorString(result);
break;
}
@@ -82,7 +82,7 @@ void CUDADeviceGraphicsInterop::set_buffer(GraphicsInteropBuffer &interop_buffer
# else
close(external_memory_handle_desc.handle.fd);
# endif
LOG(ERROR) << "Error importing Vulkan memory: " << cuewErrorString(result);
LOG_ERROR << "Error importing Vulkan memory: " << cuewErrorString(result);
break;
}
@@ -101,7 +101,7 @@ void CUDADeviceGraphicsInterop::set_buffer(GraphicsInteropBuffer &interop_buffer
external_memory_device_ptr = 0;
}
LOG(ERROR) << "Error mapping Vulkan memory: " << cuewErrorString(result);
LOG_ERROR << "Error mapping Vulkan memory: " << cuewErrorString(result);
break;
}

View File

@@ -35,7 +35,7 @@ void CUDADeviceKernels::load(CUDADevice *device)
&kernel.min_blocks, &kernel.num_threads_per_block, kernel.function, nullptr, 0, 0));
}
else {
LOG(ERROR) << "Unable to load kernel " << function_name;
LOG_ERROR << "Unable to load kernel " << function_name;
}
}

View File

@@ -40,12 +40,12 @@ int CUDADeviceQueue::num_concurrent_states(const size_t state_size) const
num_states = max((int)(num_states * factor), 1024);
}
else {
LOG(STATS) << "CYCLES_CONCURRENT_STATES_FACTOR evaluated to 0";
LOG_STATS << "CYCLES_CONCURRENT_STATES_FACTOR evaluated to 0";
}
}
LOG(STATS) << "GPU queue concurrent states: " << num_states << ", using up to "
<< string_human_readable_size(num_states * state_size);
LOG_STATS << "GPU queue concurrent states: " << num_states << ", using up to "
<< string_human_readable_size(num_states * state_size);
return num_states;
}

View File

@@ -54,7 +54,7 @@ void Device::set_error(const string &error)
if (!have_error()) {
error_msg = error;
}
LOG(ERROR) << error;
LOG_ERROR << error;
fflush(stderr);
}
@@ -419,8 +419,8 @@ DeviceInfo Device::get_multi_device(const vector<DeviceInfo> &subdevices,
const int orig_cpu_threads = (threads) ? threads : TaskScheduler::max_concurrency();
const int cpu_threads = max(orig_cpu_threads - (subdevices.size() - 1), size_t(0));
LOG(INFO) << "CPU render threads reduced from " << orig_cpu_threads << " to "
<< cpu_threads << ", to dedicate to GPU.";
LOG_INFO << "CPU render threads reduced from " << orig_cpu_threads << " to " << cpu_threads
<< ", to dedicate to GPU.";
if (cpu_threads >= 1) {
DeviceInfo cpu_device = device;
@@ -432,7 +432,7 @@ DeviceInfo Device::get_multi_device(const vector<DeviceInfo> &subdevices,
}
}
else {
LOG(INFO) << "CPU render threads disabled for interactive render.";
LOG_INFO << "CPU render threads disabled for interactive render.";
continue;
}
}
@@ -483,7 +483,7 @@ void Device::free_memory()
unique_ptr<DeviceQueue> Device::gpu_queue_create()
{
LOG(FATAL) << "Device does not support queues.";
LOG_FATAL << "Device does not support queues.";
return nullptr;
}
@@ -497,7 +497,7 @@ const CPUKernels &Device::get_cpu_kernels()
void Device::get_cpu_kernel_thread_globals(
vector<ThreadKernelGlobalsCPU> & /*kernel_thread_globals*/)
{
LOG(FATAL) << "Device does not support CPU kernels.";
LOG_FATAL << "Device does not support CPU kernels.";
}
OSLGlobals *Device::get_cpu_osl_memory()
@@ -507,7 +507,7 @@ OSLGlobals *Device::get_cpu_osl_memory()
void *Device::get_guiding_device() const
{
LOG(ERROR) << "Request guiding field from a device which does not support it.";
LOG_ERROR << "Request guiding field from a device which does not support it.";
return nullptr;
}
@@ -553,7 +553,7 @@ void GPUDevice::init_host_memory(const size_t preferred_texture_headroom,
}
}
else {
LOG(WARNING) << "Mapped host memory disabled, failed to get system RAM";
LOG_WARNING << "Mapped host memory disabled, failed to get system RAM";
map_host_limit = 0;
}
@@ -566,8 +566,8 @@ void GPUDevice::init_host_memory(const size_t preferred_texture_headroom,
device_texture_headroom = preferred_texture_headroom > 0 ? preferred_texture_headroom :
128 * 1024 * 1024LL; // 128MB
LOG(INFO) << "Mapped host memory limit set to " << string_human_readable_number(map_host_limit)
<< " bytes. (" << string_human_readable_size(map_host_limit) << ")";
LOG_INFO << "Mapped host memory limit set to " << string_human_readable_number(map_host_limit)
<< " bytes. (" << string_human_readable_size(map_host_limit) << ")";
}
void GPUDevice::move_textures_to_host(size_t size, const size_t headroom, const bool for_texture)
@@ -628,7 +628,7 @@ void GPUDevice::move_textures_to_host(size_t size, const size_t headroom, const
* multiple backend devices could be moving the memory. The
* first one will do it, and the rest will adopt the pointer. */
if (max_mem) {
LOG(WORK) << "Move memory from device to host: " << max_mem->name;
LOG_WORK << "Move memory from device to host: " << max_mem->name;
/* Potentially need to call back into multi device, so pointer mapping
* and peer devices are updated. This is also necessary since the device
@@ -727,9 +727,9 @@ GPUDevice::Mem *GPUDevice::generic_alloc(device_memory &mem, const size_t pitch_
}
if (mem.name) {
LOG(WORK) << "Buffer allocate: " << mem.name << ", "
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
<< string_human_readable_size(mem.memory_size()) << ")" << status;
LOG_WORK << "Buffer allocate: " << mem.name << ", "
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
<< string_human_readable_size(mem.memory_size()) << ")" << status;
}
mem.device_pointer = (device_ptr)device_pointer;

View File

@@ -40,34 +40,34 @@ bool device_hip_init()
int hipew_result = hipewInit(HIPEW_INIT_HIP);
if (hipew_result == HIPEW_SUCCESS) {
LOG(INFO) << "HIPEW initialization succeeded";
LOG_INFO << "HIPEW initialization succeeded";
if (!hipSupportsDriver()) {
LOG(WARNING) << "Driver version is too old";
LOG_WARNING << "Driver version is too old";
}
else if (HIPDevice::have_precompiled_kernels()) {
LOG(INFO) << "Found precompiled kernels";
LOG_INFO << "Found precompiled kernels";
result = true;
}
else if (hipewCompilerPath() != nullptr) {
LOG(INFO) << "Found HIPCC " << hipewCompilerPath();
LOG_INFO << "Found HIPCC " << hipewCompilerPath();
result = true;
}
else {
LOG(INFO) << "Neither precompiled kernels nor HIPCC was found,"
<< " unable to use HIP";
LOG_INFO << "Neither precompiled kernels nor HIPCC was found,"
<< " unable to use HIP";
}
}
else {
if (hipew_result == HIPEW_ERROR_ATEXIT_FAILED) {
LOG(WARNING) << "HIPEW initialization failed: Error setting up atexit() handler";
LOG_WARNING << "HIPEW initialization failed: Error setting up atexit() handler";
}
else if (hipew_result == HIPEW_ERROR_OLD_DRIVER) {
LOG(WARNING)
LOG_WARNING
<< "HIPEW initialization failed: Driver version too old, requires AMD Radeon Pro "
"24.Q2 driver or newer";
}
else {
LOG(WARNING) << "HIPEW initialization failed: Error opening HIP dynamic library";
LOG_WARNING << "HIPEW initialization failed: Error opening HIP dynamic library";
}
}
@@ -95,7 +95,7 @@ unique_ptr<Device> device_hip_create(const DeviceInfo &info,
(void)profiler;
(void)headless;
LOG(FATAL) << "Request to create HIP device without compiled-in support. Should never happen.";
LOG_FATAL << "Request to create HIP device without compiled-in support. Should never happen.";
return nullptr;
#endif
@@ -129,7 +129,7 @@ void device_hip_info(vector<DeviceInfo> &devices)
hipError_t result = device_hip_safe_init();
if (result != hipSuccess) {
if (result != hipErrorNoDevice) {
LOG(ERROR) << "HIP hipInit: " << hipewErrorString(result);
LOG_ERROR << "HIP hipInit: " << hipewErrorString(result);
}
return;
}
@@ -137,7 +137,7 @@ void device_hip_info(vector<DeviceInfo> &devices)
int count = 0;
result = hipGetDeviceCount(&count);
if (result != hipSuccess) {
LOG(ERROR) << "HIP hipGetDeviceCount: " << hipewErrorString(result);
LOG_ERROR << "HIP hipGetDeviceCount: " << hipewErrorString(result);
return;
}
@@ -154,7 +154,7 @@ void device_hip_info(vector<DeviceInfo> &devices)
result = hipDeviceGetName(name, 256, num);
if (result != hipSuccess) {
LOG(ERROR) << "HIP hipDeviceGetName: " << hipewErrorString(result);
LOG_ERROR << "HIP hipDeviceGetName: " << hipewErrorString(result);
continue;
}
@@ -219,21 +219,21 @@ void device_hip_info(vector<DeviceInfo> &devices)
hipDeviceGetAttribute(&timeout_attr, hipDeviceAttributeKernelExecTimeout, num);
if (timeout_attr) {
LOG(INFO) << "Device is recognized as display.";
LOG_INFO << "Device is recognized as display.";
info.description += " (Display)";
info.display_device = true;
display_devices.push_back(info);
}
else {
LOG(INFO) << "Device has compute preemption or is not used for display.";
LOG_INFO << "Device has compute preemption or is not used for display.";
devices.push_back(info);
}
LOG(INFO) << "Added device \"" << info.description << "\" with id \"" << info.id << "\".";
LOG_INFO << "Added device \"" << info.description << "\" with id \"" << info.id << "\".";
if (info.denoisers & DENOISER_OPENIMAGEDENOISE) {
LOG(INFO) << "Device with id \"" << info.id << "\" supports "
<< denoiserTypeToHumanReadable(DENOISER_OPENIMAGEDENOISE) << ".";
LOG_INFO << "Device with id \"" << info.id << "\" supports "
<< denoiserTypeToHumanReadable(DENOISER_OPENIMAGEDENOISE) << ".";
}
}

View File

@@ -245,9 +245,9 @@ string HIPDevice::compile_kernel(const uint kernel_features, const char *name, c
/* Attempt to use kernel provided with Blender. */
if (!use_adaptive_compilation()) {
const string fatbin = path_get(string_printf("lib/%s_%s.fatbin.zst", name, arch.c_str()));
LOG(INFO) << "Testing for pre-compiled kernel " << fatbin << ".";
LOG_INFO << "Testing for pre-compiled kernel " << fatbin << ".";
if (path_exists(fatbin)) {
LOG(INFO) << "Using precompiled kernel.";
LOG_INFO << "Using precompiled kernel.";
return fatbin;
}
}
@@ -283,9 +283,9 @@ string HIPDevice::compile_kernel(const uint kernel_features, const char *name, c
const string fatbin_file = string_printf(
"cycles_%s_%s_%s", name, arch.c_str(), kernel_md5.c_str());
const string fatbin = path_cache_get(path_join("kernels", fatbin_file));
LOG(INFO) << "Testing for locally compiled kernel " << fatbin << ".";
LOG_INFO << "Testing for locally compiled kernel " << fatbin << ".";
if (path_exists(fatbin)) {
LOG(INFO) << "Using locally compiled kernel.";
LOG_INFO << "Using locally compiled kernel.";
return fatbin;
}
@@ -328,7 +328,7 @@ string HIPDevice::compile_kernel(const uint kernel_features, const char *name, c
}
# endif
const int hipcc_hip_version = hipewCompilerVersion();
LOG(INFO) << "Found hipcc " << hipcc << ", HIP version " << hipcc_hip_version << ".";
LOG_INFO << "Found hipcc " << hipcc << ", HIP version " << hipcc_hip_version << ".";
double starttime = time_dt();
@@ -346,8 +346,8 @@ string HIPDevice::compile_kernel(const uint kernel_features, const char *name, c
fatbin.c_str(),
common_cflags.c_str());
LOG(INFO_IMPORTANT) << "Compiling " << ((use_adaptive_compilation()) ? "adaptive " : "")
<< "HIP kernel ...";
LOG_INFO_IMPORTANT << "Compiling " << ((use_adaptive_compilation()) ? "adaptive " : "")
<< "HIP kernel ...";
# ifdef _WIN32
command = "call " + command;
@@ -367,8 +367,8 @@ string HIPDevice::compile_kernel(const uint kernel_features, const char *name, c
return string();
}
LOG(INFO_IMPORTANT) << "Kernel compilation finished in " << std::fixed << std::setprecision(2)
<< time_dt() - starttime << "s";
LOG_INFO_IMPORTANT << "Kernel compilation finished in " << std::fixed << std::setprecision(2)
<< time_dt() - starttime << "s";
return fatbin;
}
@@ -381,7 +381,7 @@ bool HIPDevice::load_kernels(const uint kernel_features)
*/
if (hipModule) {
if (use_adaptive_compilation()) {
LOG(INFO) << "Skipping HIP kernel reload for adaptive compilation, not currently supported.";
LOG_INFO << "Skipping HIP kernel reload for adaptive compilation, not currently supported.";
}
return true;
}
@@ -469,8 +469,8 @@ void HIPDevice::reserve_local_memory(const uint kernel_features)
hipMemGetInfo(&free_after, &total);
}
LOG(INFO) << "Local memory reserved " << string_human_readable_number(free_before - free_after)
<< " bytes. (" << string_human_readable_size(free_before - free_after) << ")";
LOG_INFO << "Local memory reserved " << string_human_readable_number(free_before - free_after)
<< " bytes. (" << string_human_readable_size(free_before - free_after) << ")";
# if 0
/* For testing mapped host memory, fill up device memory. */
@@ -990,10 +990,10 @@ bool HIPDevice::should_use_graphics_interop(const GraphicsInteropDevice &interop
if (log) {
if (found) {
LOG(INFO) << "Graphics interop: found matching OpenGL device for HIP";
LOG_INFO << "Graphics interop: found matching OpenGL device for HIP";
}
else {
LOG(INFO) << "Graphics interop: no matching OpenGL device for HIP";
LOG_INFO << "Graphics interop: no matching OpenGL device for HIP";
}
}

View File

@@ -45,7 +45,7 @@ void HIPDeviceGraphicsInterop::set_buffer(GraphicsInteropBuffer &interop_buffer)
&hip_graphics_resource_, interop_buffer.take_handle(), hipGraphicsRegisterFlagsNone);
if (result != hipSuccess) {
LOG(ERROR) << "Error registering OpenGL buffer: " << hipewErrorString(result);
LOG_ERROR << "Error registering OpenGL buffer: " << hipewErrorString(result);
break;
}

View File

@@ -35,7 +35,7 @@ void HIPDeviceKernels::load(HIPDevice *device)
&kernel.min_blocks, &kernel.num_threads_per_block, kernel.function, 0, 0));
}
else {
LOG(ERROR) << "Unable to load kernel " << function_name;
LOG_ERROR << "Unable to load kernel " << function_name;
}
}

View File

@@ -40,12 +40,12 @@ int HIPDeviceQueue::num_concurrent_states(const size_t state_size) const
num_states = max((int)(num_states * factor), 1024);
}
else {
LOG(STATS) << "CYCLES_CONCURRENT_STATES_FACTOR evaluated to 0";
LOG_STATS << "CYCLES_CONCURRENT_STATES_FACTOR evaluated to 0";
}
}
LOG(STATS) << "GPU queue concurrent states: " << num_states << ", using up to "
<< string_human_readable_size(num_states * state_size);
LOG_STATS << "GPU queue concurrent states: " << num_states << ", using up to "
<< string_human_readable_size(num_states * state_size);
return num_states;
}

View File

@@ -52,11 +52,11 @@ bool hipSupportsDriver()
int hip_driver_version = 0;
hipError_t result = hipDriverGetVersion(&hip_driver_version);
if (result != hipSuccess) {
LOG(WARNING) << "Error getting driver version: " << hipewErrorString(result);
LOG_WARNING << "Error getting driver version: " << hipewErrorString(result);
return false;
}
LOG(DEBUG) << "Detected HIP driver version: " << hip_driver_version;
LOG_DEBUG << "Detected HIP driver version: " << hip_driver_version;
if (hip_driver_version < 60140252) {
/* Cycles crashes during rendering due to issues in older GPU drivers.

View File

@@ -103,7 +103,7 @@ HIPRTDevice::HIPRTDevice(const DeviceInfo &info,
return;
}
if (LOG_IS_ON(DEBUG)) {
if (LOG_IS_ON(LOG_LEVEL_DEBUG)) {
hiprtSetLogLevel(hiprtLogLevelInfo | hiprtLogLevelWarn | hiprtLogLevelError);
}
else {
@@ -154,9 +154,9 @@ string HIPRTDevice::compile_kernel(const uint kernel_features, const char *name,
if (!use_adaptive_compilation()) {
const string fatbin = path_get(string_printf("lib/%s_rt_%s.hipfb.zst", name, arch.c_str()));
LOG(INFO) << "Testing for pre-compiled kernel " << fatbin << ".";
LOG_INFO << "Testing for pre-compiled kernel " << fatbin << ".";
if (path_exists(fatbin)) {
LOG(INFO) << "Using precompiled kernel.";
LOG_INFO << "Using precompiled kernel.";
return fatbin;
}
}
@@ -173,9 +173,9 @@ string HIPRTDevice::compile_kernel(const uint kernel_features, const char *name,
const string fatbin = path_cache_get(path_join("kernels", fatbin_file));
const string hiprt_include_path = path_join(source_path, "kernel/device/hiprt");
LOG(INFO) << "Testing for locally compiled kernel " << fatbin << ".";
LOG_INFO << "Testing for locally compiled kernel " << fatbin << ".";
if (path_exists(fatbin)) {
LOG(INFO) << "Using locally compiled kernel.";
LOG_INFO << "Using locally compiled kernel.";
return fatbin;
}
@@ -208,10 +208,10 @@ string HIPRTDevice::compile_kernel(const uint kernel_features, const char *name,
}
const int hipcc_hip_version = hipewCompilerVersion();
LOG(INFO) << "Found hipcc " << hipcc << ", HIP version " << hipcc_hip_version << ".";
LOG_INFO << "Found hipcc " << hipcc << ", HIP version " << hipcc_hip_version << ".";
if (hipcc_hip_version < 40) {
LOG(WARNING) << "Unsupported HIP version " << hipcc_hip_version / 10 << "."
<< hipcc_hip_version % 10 << ", you need HIP 4.0 or newer.\n";
LOG_WARNING << "Unsupported HIP version " << hipcc_hip_version / 10 << "."
<< hipcc_hip_version % 10 << ", you need HIP 4.0 or newer.\n";
return string();
}
@@ -234,7 +234,7 @@ string HIPRTDevice::compile_kernel(const uint kernel_features, const char *name,
options.append(" -D WITH_NANOVDB");
# endif
LOG(INFO_IMPORTANT) << "Compiling " << source_path << " and caching to " << fatbin;
LOG_INFO_IMPORTANT << "Compiling " << source_path << " and caching to " << fatbin;
double starttime = time_dt();
@@ -257,8 +257,8 @@ string HIPRTDevice::compile_kernel(const uint kernel_features, const char *name,
return string();
}
LOG(INFO_IMPORTANT) << "Kernel compilation finished in " << std::fixed << std::setprecision(2)
<< time_dt() - starttime << "s";
LOG_INFO_IMPORTANT << "Kernel compilation finished in " << std::fixed << std::setprecision(2)
<< time_dt() - starttime << "s";
return fatbin;
}
@@ -267,7 +267,7 @@ bool HIPRTDevice::load_kernels(const uint kernel_features)
{
if (hipModule) {
if (use_adaptive_compilation()) {
LOG(INFO) << "Skipping HIP kernel reload for adaptive compilation, not currently supported.";
LOG_INFO << "Skipping HIP kernel reload for adaptive compilation, not currently supported.";
}
return true;
}

View File

@@ -46,7 +46,7 @@ bool HIPRTDeviceQueue::enqueue(DeviceKernel kernel,
hiprt_device_->global_stack_buffer);
if (rt_result != hiprtSuccess) {
LOG(ERROR) << "Failed to create hiprt Global Stack Buffer";
LOG_ERROR << "Failed to create hiprt Global Stack Buffer";
return false;
}
}

View File

@@ -163,7 +163,7 @@ const char *device_kernel_as_string(DeviceKernel kernel)
break;
};
#ifndef __KERNEL_ONEAPI__
LOG(FATAL) << "Unhandled kernel " << static_cast<int>(kernel) << ", should never happen.";
LOG_FATAL << "Unhandled kernel " << static_cast<int>(kernel) << ", should never happen.";
#endif
return "UNKNOWN";
}

View File

@@ -102,11 +102,11 @@ void device_metal_info(vector<DeviceInfo> &devices)
devices.push_back(info);
device_index++;
LOG(INFO) << "Added device \"" << info.description << "\" with id \"" << info.id << "\".";
LOG_INFO << "Added device \"" << info.description << "\" with id \"" << info.id << "\".";
if (info.denoisers & DENOISER_OPENIMAGEDENOISE) {
LOG(INFO) << "Device with id \"" << info.id << "\" supports "
<< denoiserTypeToHumanReadable(DENOISER_OPENIMAGEDENOISE) << ".";
LOG_INFO << "Device with id \"" << info.id << "\" supports "
<< denoiserTypeToHumanReadable(DENOISER_OPENIMAGEDENOISE) << ".";
}
}
}

View File

@@ -502,8 +502,8 @@ void MetalDevice::compile_and_load(const int device_id, MetalPipelineType pso_ty
if (MetalDevice *instance = get_device_by_ID(device_id, lock)) {
if (mtlLibrary) {
if (error && [error localizedDescription]) {
LOG(WARNING) << "MSL compilation messages: "
<< [[error localizedDescription] UTF8String];
LOG_WARNING << "MSL compilation messages: "
<< [[error localizedDescription] UTF8String];
}
instance->mtlLibrary[pso_type] = mtlLibrary;
@@ -586,9 +586,9 @@ MetalDevice::MetalMem *MetalDevice::generic_alloc(device_memory &mem)
}
if (mem.name) {
LOG(WORK) << "Buffer allocate: " << mem.name << ", "
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
<< string_human_readable_size(mem.memory_size()) << ")";
LOG_WORK << "Buffer allocate: " << mem.name << ", "
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
<< string_human_readable_size(mem.memory_size()) << ")";
}
mem.device_size = metal_buffer.allocatedSize;
@@ -1034,9 +1034,9 @@ void MetalDevice::tex_alloc(device_texture &mem)
desc.storageMode = MTLStorageModeShared;
desc.usage = MTLTextureUsageShaderRead;
LOG(WORK) << "Texture 2D allocate: " << mem.name << ", "
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
<< string_human_readable_size(mem.memory_size()) << ")";
LOG_WORK << "Texture 2D allocate: " << mem.name << ", "
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
<< string_human_readable_size(mem.memory_size()) << ")";
mtlTexture = [mtlDevice newTextureWithDescriptor:desc];
if (!mtlTexture) {

View File

@@ -398,8 +398,8 @@ bool MetalDeviceQueue::enqueue(DeviceKernel kernel,
debug_enqueue_begin(kernel, work_size);
LOG(STATS) << "Metal queue launch " << device_kernel_as_string(kernel) << ", work_size "
<< work_size;
LOG_STATS << "Metal queue launch " << device_kernel_as_string(kernel) << ", work_size "
<< work_size;
id<MTLComputeCommandEncoder> mtlComputeCommandEncoder = get_compute_encoder(kernel);

View File

@@ -15,7 +15,7 @@
# include "util/thread.h"
# define metal_printf LOG(STATS) << string_printf
# define metal_printf LOG_STATS << string_printf
CCL_NAMESPACE_BEGIN

View File

@@ -87,7 +87,7 @@ unique_ptr<Device> device_oneapi_create(const DeviceInfo &info,
(void)profiler;
(void)headless;
LOG(FATAL) << "Requested to create oneAPI device while not enabled for this build.";
LOG_FATAL << "Requested to create oneAPI device while not enabled for this build.";
return nullptr;
#endif
@@ -144,11 +144,11 @@ static void device_iterator_cb(const char *id,
info.has_execution_optimization = has_execution_optimization;
devices->push_back(info);
LOG(INFO) << "Added device \"" << info.description << "\" with id \"" << info.id << "\".";
LOG_INFO << "Added device \"" << info.description << "\" with id \"" << info.id << "\".";
if (info.denoisers & DENOISER_OPENIMAGEDENOISE) {
LOG(INFO) << "Device with id \"" << info.id << "\" supports "
<< denoiserTypeToHumanReadable(DENOISER_OPENIMAGEDENOISE) << ".";
LOG_INFO << "Device with id \"" << info.id << "\" supports "
<< denoiserTypeToHumanReadable(DENOISER_OPENIMAGEDENOISE) << ".";
}
}
#endif

View File

@@ -73,8 +73,8 @@ OneapiDevice::OneapiDevice(const DeviceInfo &info, Stats &stats, Profiler &profi
oneapi_error_string_ + "\"");
}
else {
LOG(DEBUG) << "oneAPI queue has been successfully created for the device \""
<< info.description << "\"";
LOG_DEBUG << "oneAPI queue has been successfully created for the device \"" << info.description
<< "\"";
assert(device_queue_);
}
@@ -85,7 +85,7 @@ OneapiDevice::OneapiDevice(const DeviceInfo &info, Stats &stats, Profiler &profi
# endif
if (use_hardware_raytracing) {
LOG(INFO) << "oneAPI will use hardware ray tracing for intersection acceleration.";
LOG_INFO << "oneAPI will use hardware ray tracing for intersection acceleration.";
}
size_t globals_segment_size;
@@ -95,7 +95,7 @@ OneapiDevice::OneapiDevice(const DeviceInfo &info, Stats &stats, Profiler &profi
oneapi_error_string_ + "\"");
}
else {
LOG(DEBUG) << "Successfully created global/constant memory segment (kernel globals object)";
LOG_DEBUG << "Successfully created global/constant memory segment (kernel globals object)";
}
kg_memory_ = usm_aligned_alloc_host(device_queue_, globals_segment_size, 16);
@@ -115,8 +115,8 @@ OneapiDevice::OneapiDevice(const DeviceInfo &info, Stats &stats, Profiler &profi
device_working_headroom = override_headroom;
device_texture_headroom = override_headroom;
}
LOG(DEBUG) << "oneAPI memory headroom size: "
<< string_human_readable_size(device_working_headroom);
LOG_DEBUG << "oneAPI memory headroom size: "
<< string_human_readable_size(device_working_headroom);
}
OneapiDevice::~OneapiDevice()
@@ -239,11 +239,11 @@ bool OneapiDevice::load_kernels(const uint requested_features)
"\"");
return false;
}
LOG(INFO) << "Test kernel has been executed successfully for \"" << info.description << "\"";
LOG_INFO << "Test kernel has been executed successfully for \"" << info.description << "\"";
assert(device_queue_);
if (use_hardware_raytracing && !can_use_hardware_raytracing_for_features(requested_features)) {
LOG(INFO)
LOG_INFO
<< "Hardware ray tracing disabled, not supported yet by oneAPI for requested features.";
use_hardware_raytracing = false;
}
@@ -254,7 +254,7 @@ bool OneapiDevice::load_kernels(const uint requested_features)
set_error("oneAPI kernels loading: got a runtime exception \"" + oneapi_error_string_ + "\"");
}
else {
LOG(INFO) << "Kernels loading (compilation) has been done for \"" << info.description << "\"";
LOG_INFO << "Kernels loading (compilation) has been done for \"" << info.description << "\"";
}
if (is_finished_ok) {
@@ -294,9 +294,9 @@ void OneapiDevice::reserve_private_memory(const uint kernel_features)
size_t free_after = get_free_mem();
LOG(INFO) << "For kernel execution were reserved "
<< string_human_readable_number(free_before - free_after) << " bytes. ("
<< string_human_readable_size(free_before - free_after) << ")";
LOG_INFO << "For kernel execution were reserved "
<< string_human_readable_number(free_before - free_after) << " bytes. ("
<< string_human_readable_size(free_before - free_after) << ")";
}
void OneapiDevice::get_device_memory_info(size_t &total, size_t &free)
@@ -422,9 +422,9 @@ void OneapiDevice::mem_alloc(device_memory &mem)
}
else {
if (mem.name) {
LOG(DEBUG) << "OneapiDevice::mem_alloc: \"" << mem.name << "\", "
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
<< string_human_readable_size(mem.memory_size()) << ")";
LOG_DEBUG << "OneapiDevice::mem_alloc: \"" << mem.name << "\", "
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
<< string_human_readable_size(mem.memory_size()) << ")";
}
generic_alloc(mem);
}
@@ -433,9 +433,9 @@ void OneapiDevice::mem_alloc(device_memory &mem)
void OneapiDevice::mem_copy_to(device_memory &mem)
{
if (mem.name) {
LOG(DEBUG) << "OneapiDevice::mem_copy_to: \"" << mem.name << "\", "
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
<< string_human_readable_size(mem.memory_size()) << ")";
LOG_DEBUG << "OneapiDevice::mem_copy_to: \"" << mem.name << "\", "
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
<< string_human_readable_size(mem.memory_size()) << ")";
}
/* After getting runtime errors we need to avoid performing oneAPI runtime operations
@@ -461,9 +461,9 @@ void OneapiDevice::mem_copy_to(device_memory &mem)
void OneapiDevice::mem_move_to_host(device_memory &mem)
{
if (mem.name) {
LOG(DEBUG) << "OneapiDevice::mem_move_to_host: \"" << mem.name << "\", "
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
<< string_human_readable_size(mem.memory_size()) << ")";
LOG_DEBUG << "OneapiDevice::mem_move_to_host: \"" << mem.name << "\", "
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
<< string_human_readable_size(mem.memory_size()) << ")";
}
/* After getting runtime errors we need to avoid performing oneAPI runtime operations
@@ -496,10 +496,10 @@ void OneapiDevice::mem_copy_from(
const size_t offset = elem * y * w;
if (mem.name) {
LOG(DEBUG) << "OneapiDevice::mem_copy_from: \"" << mem.name << "\" object of "
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
<< string_human_readable_size(mem.memory_size()) << ") from offset " << offset
<< " data " << size << " bytes";
LOG_DEBUG << "OneapiDevice::mem_copy_from: \"" << mem.name << "\" object of "
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
<< string_human_readable_size(mem.memory_size()) << ") from offset " << offset
<< " data " << size << " bytes";
}
/* After getting runtime errors we need to avoid performing oneAPI runtime operations
@@ -526,9 +526,9 @@ void OneapiDevice::mem_copy_from(
void OneapiDevice::mem_zero(device_memory &mem)
{
if (mem.name) {
LOG(DEBUG) << "OneapiDevice::mem_zero: \"" << mem.name << "\", "
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
<< string_human_readable_size(mem.memory_size()) << ")\n";
LOG_DEBUG << "OneapiDevice::mem_zero: \"" << mem.name << "\", "
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
<< string_human_readable_size(mem.memory_size()) << ")\n";
}
/* After getting runtime errors we need to avoid performing oneAPI runtime operations
@@ -556,9 +556,9 @@ void OneapiDevice::mem_zero(device_memory &mem)
void OneapiDevice::mem_free(device_memory &mem)
{
if (mem.name) {
LOG(DEBUG) << "OneapiDevice::mem_free: \"" << mem.name << "\", "
<< string_human_readable_number(mem.device_size) << " bytes. ("
<< string_human_readable_size(mem.device_size) << ")\n";
LOG_DEBUG << "OneapiDevice::mem_free: \"" << mem.name << "\", "
<< string_human_readable_number(mem.device_size) << " bytes. ("
<< string_human_readable_size(mem.device_size) << ")\n";
}
if (mem.type == MEM_GLOBAL) {
@@ -584,9 +584,9 @@ void OneapiDevice::const_copy_to(const char *name, void *host, const size_t size
{
assert(name);
LOG(DEBUG) << "OneapiDevice::const_copy_to \"" << name << "\" object "
<< string_human_readable_number(size) << " bytes. ("
<< string_human_readable_size(size) << ")";
LOG_DEBUG << "OneapiDevice::const_copy_to \"" << name << "\" object "
<< string_human_readable_number(size) << " bytes. ("
<< string_human_readable_size(size) << ")";
if (strcmp(name, "data") == 0) {
assert(size <= sizeof(KernelData));
@@ -634,9 +634,9 @@ void OneapiDevice::global_alloc(device_memory &mem)
assert(mem.name);
size_t size = mem.memory_size();
LOG(DEBUG) << "OneapiDevice::global_alloc \"" << mem.name << "\" object "
<< string_human_readable_number(size) << " bytes. ("
<< string_human_readable_size(size) << ")";
LOG_DEBUG << "OneapiDevice::global_alloc \"" << mem.name << "\" object "
<< string_human_readable_number(size) << " bytes. ("
<< string_human_readable_size(size) << ")";
generic_alloc(mem);
generic_copy_to(mem);
@@ -776,9 +776,9 @@ void OneapiDevice::tex_alloc(device_texture &mem)
desc = sycl::ext::oneapi::experimental::image_descriptor(
{mem.data_width, mem.data_height, 0}, mem.data_elements, channel_type);
LOG(WORK) << "Array 2D/3D allocate: " << mem.name << ", "
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
<< string_human_readable_size(mem.memory_size()) << ")";
LOG_WORK << "Array 2D/3D allocate: " << mem.name << ", "
<< string_human_readable_number(mem.memory_size()) << " bytes. ("
<< string_human_readable_size(mem.memory_size()) << ")";
sycl::ext::oneapi::experimental::image_mem_handle memHandle =
sycl::ext::oneapi::experimental::alloc_image_mem(desc, *queue);
@@ -1360,10 +1360,10 @@ int parse_driver_build_version(const sycl::device &device)
}
if (driver_build_version == 0) {
LOG(WARNING) << "Unable to parse unknown Intel GPU driver version. \"" << driver_version
<< "\" does not match xx.xx.xxxxx (Linux), x.x.xxxx (L0),"
<< " xx.xx.xxx.xxxx (Windows) for device \""
<< device.get_info<sycl::info::device::name>() << "\".";
LOG_WARNING << "Unable to parse unknown Intel GPU driver version. \"" << driver_version
<< "\" does not match xx.xx.xxxxx (Linux), x.x.xxxx (L0),"
<< " xx.xx.xxx.xxxx (Windows) for device \""
<< device.get_info<sycl::info::device::name>() << "\".";
}
return driver_build_version;
@@ -1445,10 +1445,10 @@ std::vector<sycl::device> available_sycl_devices()
if (driver_build_version < lowest_supported_driver_version) {
filter_out = true;
LOG(WARNING) << "Driver version for device \""
<< device.get_info<sycl::info::device::name>()
<< "\" is too old. Expected \"" << lowest_supported_driver_version
<< "\" or newer, but got \"" << driver_build_version << "\".";
LOG_WARNING << "Driver version for device \""
<< device.get_info<sycl::info::device::name>()
<< "\" is too old. Expected \"" << lowest_supported_driver_version
<< "\" or newer, but got \"" << driver_build_version << "\".";
}
}
}
@@ -1460,7 +1460,7 @@ std::vector<sycl::device> available_sycl_devices()
}
}
catch (sycl::exception &e) {
LOG(WARNING) << "An error has been encountered while enumerating SYCL devices: " << e.what();
LOG_WARNING << "An error has been encountered while enumerating SYCL devices: " << e.what();
}
return available_devices;
}

View File

@@ -28,8 +28,8 @@ int OneapiDeviceQueue::num_concurrent_states(const size_t state_size) const
{
int num_states = 4 * num_concurrent_busy_states(state_size);
LOG(STATS) << "GPU queue concurrent states: " << num_states << ", using up to "
<< string_human_readable_size(num_states * state_size);
LOG_STATS << "GPU queue concurrent states: " << num_states << ", using up to "
<< string_human_readable_size(num_states * state_size);
return num_states;
}

View File

@@ -44,12 +44,12 @@ bool device_optix_init()
const OptixResult result = optixInit();
if (result == OPTIX_ERROR_UNSUPPORTED_ABI_VERSION) {
LOG(WARNING) << "OptiX initialization failed because the installed NVIDIA driver is too old. "
"Please update to the latest driver first!";
LOG_WARNING << "OptiX initialization failed because the installed NVIDIA driver is too old. "
"Please update to the latest driver first!";
return false;
}
if (result != OPTIX_SUCCESS) {
LOG(WARNING) << "OptiX initialization failed with error code " << (unsigned int)result;
LOG_WARNING << "OptiX initialization failed with error code " << (unsigned int)result;
return false;
}
@@ -114,7 +114,7 @@ unique_ptr<Device> device_optix_create(const DeviceInfo &info,
(void)profiler;
(void)headless;
LOG(FATAL) << "Request to create OptiX device without compiled-in support. Should never happen.";
LOG_FATAL << "Request to create OptiX device without compiled-in support. Should never happen.";
return nullptr;
#endif

View File

@@ -68,16 +68,16 @@ OptiXDevice::OptiXDevice(const DeviceInfo &info, Stats &stats, Profiler &profile
options.logCallbackFunction = [](unsigned int level, const char *, const char *message, void *) {
switch (level) {
case 1:
LOG(FATAL) << message;
LOG_FATAL << message;
break;
case 2:
LOG(ERROR) << message;
LOG_ERROR << message;
break;
case 3:
LOG(WARNING) << message;
LOG_WARNING << message;
break;
case 4:
LOG(INFO) << message;
LOG_INFO << message;
break;
default:
break;
@@ -85,7 +85,7 @@ OptiXDevice::OptiXDevice(const DeviceInfo &info, Stats &stats, Profiler &profile
};
# endif
if (DebugFlags().optix.use_debug) {
LOG(INFO) << "Using OptiX debug mode.";
LOG_INFO << "Using OptiX debug mode.";
options.validationMode = OPTIX_DEVICE_CONTEXT_VALIDATION_MODE_ALL;
}
optix_assert(optixDeviceContextCreate(cuContext, &options, &context));
@@ -1073,11 +1073,11 @@ bool OptiXDevice::build_optix_bvh(BVHOptiX *bvh,
use_fast_trace_bvh = true;
}
else if (use_fast_trace_bvh) {
LOG(INFO) << "Using fast to trace OptiX BVH";
LOG_INFO << "Using fast to trace OptiX BVH";
options.buildFlags = OPTIX_BUILD_FLAG_PREFER_FAST_TRACE | OPTIX_BUILD_FLAG_ALLOW_COMPACTION;
}
else {
LOG(INFO) << "Using fast to update OptiX BVH";
LOG_INFO << "Using fast to update OptiX BVH";
options.buildFlags = OPTIX_BUILD_FLAG_PREFER_FAST_BUILD | OPTIX_BUILD_FLAG_ALLOW_UPDATE;
}

View File

@@ -174,8 +174,8 @@ bool OptiXDeviceQueue::enqueue(DeviceKernel kernel,
break;
default:
LOG(ERROR) << "Invalid kernel " << device_kernel_as_string(kernel)
<< " is attempted to be enqueued.";
LOG_ERROR << "Invalid kernel " << device_kernel_as_string(kernel)
<< " is attempted to be enqueued.";
return false;
}

View File

@@ -21,7 +21,7 @@ DeviceQueue::DeviceQueue(Device *device) : device(device)
DeviceQueue::~DeviceQueue()
{
if (LOG_IS_ON(STATS)) {
if (LOG_IS_ON(LOG_LEVEL_STATS)) {
/* Print kernel execution times sorted by time. */
vector<pair<DeviceKernelMask, double>> stats_sorted;
for (const auto &stat : stats_kernel_time_) {
@@ -34,24 +34,23 @@ DeviceQueue::~DeviceQueue()
return a.second > b.second;
});
LOG(STATS) << "GPU queue stats:";
LOG_STATS << "GPU queue stats:";
double total_time = 0.0;
for (const auto &[mask, time] : stats_sorted) {
total_time += time;
LOG(STATS) << " " << std::setfill(' ') << std::setw(10) << std::fixed
<< std::setprecision(5) << std::right << time
<< "s: " << device_kernel_mask_as_string(mask);
LOG_STATS << " " << std::setfill(' ') << std::setw(10) << std::fixed << std::setprecision(5)
<< std::right << time << "s: " << device_kernel_mask_as_string(mask);
}
if (is_per_kernel_performance_) {
LOG(STATS) << "GPU queue total time: " << std::fixed << std::setprecision(5) << total_time;
LOG_STATS << "GPU queue total time: " << std::fixed << std::setprecision(5) << total_time;
}
}
}
void DeviceQueue::debug_init_execution()
{
if (LOG_IS_ON(STATS)) {
if (LOG_IS_ON(LOG_LEVEL_STATS)) {
last_sync_time_ = time_dt();
}
@@ -60,9 +59,9 @@ void DeviceQueue::debug_init_execution()
void DeviceQueue::debug_enqueue_begin(DeviceKernel kernel, const int work_size)
{
if (LOG_IS_ON(STATS)) {
LOG(STATS) << "GPU queue launch " << device_kernel_as_string(kernel) << ", work_size "
<< work_size;
if (LOG_IS_ON(LOG_LEVEL_STATS)) {
LOG_STATS << "GPU queue launch " << device_kernel_as_string(kernel) << ", work_size "
<< work_size;
}
last_kernels_enqueued_.set(kernel, true);
@@ -70,17 +69,17 @@ void DeviceQueue::debug_enqueue_begin(DeviceKernel kernel, const int work_size)
void DeviceQueue::debug_enqueue_end()
{
if (LOG_IS_ON(STATS) && is_per_kernel_performance_) {
if (LOG_IS_ON(LOG_LEVEL_STATS) && is_per_kernel_performance_) {
synchronize();
}
}
void DeviceQueue::debug_synchronize()
{
if (LOG_IS_ON(STATS)) {
if (LOG_IS_ON(LOG_LEVEL_STATS)) {
const double new_time = time_dt();
const double elapsed_time = new_time - last_sync_time_;
LOG(STATS) << "GPU queue synchronize, elapsed " << std::setw(10) << elapsed_time << "s";
LOG_STATS << "GPU queue synchronize, elapsed " << std::setw(10) << elapsed_time << "s";
/* There is no sense to have an entries in the performance data
* container without related kernel information. */

View File

@@ -159,7 +159,7 @@ class DeviceQueue {
* resource as a buffer writable by kernels of this device. */
virtual unique_ptr<DeviceGraphicsInterop> graphics_interop_create()
{
LOG(FATAL) << "Request of GPU interop of a device which does not support it.";
LOG_FATAL << "Request of GPU interop of a device which does not support it.";
return nullptr;
}

View File

@@ -215,7 +215,7 @@ NodeType *NodeType::add(const char *name_, CreateFunc create_, Type type_, const
const ustring name(name_);
if (types().find(name) != types().end()) {
LOG(ERROR) << "Node type " << name_ << " registered twice";
LOG_ERROR << "Node type " << name_ << " registered twice";
assert(0);
return nullptr;
}

View File

@@ -159,8 +159,8 @@ void xml_read_node(XMLReader &reader, Node *node, const xml_node xml_node)
node->set(socket, value);
}
else {
LOG(ERROR) << "Unknown value \"" << value.c_str() << "\" for attribute \""
<< socket.name.c_str() << "\"";
LOG_ERROR << "Unknown value \"" << value.c_str() << "\" for attribute \""
<< socket.name.c_str() << "\"";
}
break;
}

View File

@@ -44,10 +44,10 @@ class HdCyclesVolumeLoader : public VDBImageLoader {
}
}
catch (const openvdb::IoError &e) {
LOG(WARNING) << "Error loading OpenVDB file: " << e.what();
LOG_WARNING << "Error loading OpenVDB file: " << e.what();
}
catch (...) {
LOG(WARNING) << "Error loading OpenVDB file: Unknown error";
LOG_WARNING << "Error loading OpenVDB file: Unknown error";
}
}
};

View File

@@ -60,7 +60,7 @@ void HdCyclesFileReader::read(Session *session, const char *filepath, const bool
/* Open Stage. */
const UsdStageRefPtr stage = UsdStage::Open(filepath);
if (!stage) {
LOG(ERROR) << "USD failed to read " << filepath;
LOG_ERROR << "USD failed to read " << filepath;
return;
}

View File

@@ -223,7 +223,7 @@ void Denoiser::set_params(const DenoiseParams &params)
params_ = params;
}
else {
LOG(ERROR) << "Attempt to change denoiser type.";
LOG_ERROR << "Attempt to change denoiser type.";
}
}
@@ -258,8 +258,8 @@ bool Denoiser::load_kernels(Progress *progress)
return false;
}
LOG(WORK) << "Will denoise on " << denoiser_device_->info.description << " ("
<< denoiser_device_->info.id << ")";
LOG_WORK << "Will denoise on " << denoiser_device_->info.description << " ("
<< denoiser_device_->info.id << ")";
denoise_kernels_are_loaded_ = true;
return true;

View File

@@ -54,7 +54,7 @@ bool DenoiserGPU::denoise_buffer(const BufferParams &buffer_params,
task.render_buffers = render_buffers;
}
else {
LOG(WORK) << "Creating temporary buffer on denoiser device.";
LOG_WORK << "Creating temporary buffer on denoiser device.";
/* Create buffer which is available by the device used by denoiser. */
@@ -103,7 +103,7 @@ bool DenoiserGPU::denoise_buffer(const DenoiseTask &task)
}
if (!denoise_filter_guiding_preprocess(context)) {
LOG(ERROR) << "Error preprocessing guiding passes.";
LOG_ERROR << "Error preprocessing guiding passes.";
return false;
}
@@ -120,12 +120,12 @@ bool DenoiserGPU::denoise_buffer(const DenoiseTask &task)
bool DenoiserGPU::denoise_ensure(DenoiseContext &context)
{
if (!denoise_create_if_needed(context)) {
LOG(ERROR) << "GPU denoiser creation has failed.";
LOG_ERROR << "GPU denoiser creation has failed.";
return false;
}
if (!denoise_configure_if_needed(context)) {
LOG(ERROR) << "GPU denoiser configuration has failed.";
LOG_ERROR << "GPU denoiser configuration has failed.";
return false;
}
@@ -341,20 +341,20 @@ void DenoiserGPU::denoise_pass(DenoiseContext &context, PassType pass_type)
return;
}
if (pass.denoised_offset == PASS_UNUSED) {
LOG(DFATAL) << "Missing denoised pass " << pass_type_as_string(pass_type);
LOG_DFATAL << "Missing denoised pass " << pass_type_as_string(pass_type);
return;
}
if (pass.use_denoising_albedo) {
if (context.albedo_replaced_with_fake) {
LOG(ERROR) << "Pass which requires albedo is denoised after fake albedo has been set.";
LOG_ERROR << "Pass which requires albedo is denoised after fake albedo has been set.";
return;
}
}
else if (context.use_guiding_passes && !context.albedo_replaced_with_fake) {
context.albedo_replaced_with_fake = true;
if (!denoise_filter_guiding_set_fake_albedo(context)) {
LOG(ERROR) << "Error replacing real albedo with the fake one.";
LOG_ERROR << "Error replacing real albedo with the fake one.";
return;
}
}
@@ -362,12 +362,12 @@ void DenoiserGPU::denoise_pass(DenoiseContext &context, PassType pass_type)
/* Read and preprocess noisy color input pass. */
denoise_color_read(context, pass);
if (!denoise_filter_color_preprocess(context, pass)) {
LOG(ERROR) << "Error converting denoising passes to RGB buffer.";
LOG_ERROR << "Error converting denoising passes to RGB buffer.";
return;
}
if (!denoise_run(context, pass)) {
LOG(ERROR) << "Error running denoiser.";
LOG_ERROR << "Error running denoiser.";
return;
}
@@ -375,7 +375,7 @@ void DenoiserGPU::denoise_pass(DenoiseContext &context, PassType pass_type)
*
* This will scale the denoiser result up to match the number of, possibly per-pixel, samples. */
if (!denoise_filter_color_postprocess(context, pass)) {
LOG(ERROR) << "Error copying denoiser result to the denoised pass.";
LOG_ERROR << "Error copying denoiser result to the denoised pass.";
return;
}

View File

@@ -121,7 +121,7 @@ class OIDNDenoiseContext {
const char *custom_weight_path = getenv("CYCLES_OIDN_CUSTOM_WEIGHTS");
if (custom_weight_path) {
if (!path_read_binary(custom_weight_path, custom_weights)) {
LOG(ERROR) << "Failed to load custom OpenImageDenoise weights";
LOG_ERROR << "Failed to load custom OpenImageDenoise weights";
}
}
}
@@ -151,14 +151,14 @@ class OIDNDenoiseContext {
if (oidn_color_pass.use_denoising_albedo) {
if (albedo_replaced_with_fake_) {
LOG(ERROR) << "Pass which requires albedo is denoised after fake albedo has been set.";
LOG_ERROR << "Pass which requires albedo is denoised after fake albedo has been set.";
return;
}
}
OIDNPass oidn_output_pass(buffer_params_, "output", pass_type, PassMode::DENOISED);
if (oidn_output_pass.offset == PASS_UNUSED) {
LOG(DFATAL) << "Missing denoised pass " << pass_type_as_string(pass_type);
LOG_DFATAL << "Missing denoised pass " << pass_type_as_string(pass_type);
return;
}
@@ -307,8 +307,8 @@ class OIDNDenoiseContext {
/* Read pass pixels using PassAccessor into a temporary buffer which is owned by the pass.. */
void read_pass_pixels_into_buffer(OIDNPass &oidn_pass)
{
LOG(WORK) << "Allocating temporary buffer for pass " << oidn_pass.name << " ("
<< pass_type_as_string(oidn_pass.type) << ")";
LOG_WORK << "Allocating temporary buffer for pass " << oidn_pass.name << " ("
<< pass_type_as_string(oidn_pass.type) << ")";
const int64_t width = buffer_params_.width;
const int64_t height = buffer_params_.height;

View File

@@ -77,8 +77,8 @@ bool OIDNDenoiserGPU::is_device_supported(const DeviceInfo &device)
return false;
}
LOG(DEBUG) << "Checking device " << device.description << " (" << device.id
<< ") for OIDN GPU support";
LOG_DEBUG << "Checking device " << device.description << " (" << device.id
<< ") for OIDN GPU support";
int device_type = OIDN_DEVICE_TYPE_DEFAULT;
switch (device.type) {
@@ -101,20 +101,20 @@ bool OIDNDenoiserGPU::is_device_supported(const DeviceInfo &device)
# ifdef OIDN_DEVICE_METAL
case DEVICE_METAL: {
const int num_devices = oidnGetNumPhysicalDevices();
LOG(DEBUG) << "Found " << num_devices << " OIDN device(s)";
LOG_DEBUG << "Found " << num_devices << " OIDN device(s)";
for (int i = 0; i < num_devices; i++) {
const int type = oidnGetPhysicalDeviceInt(i, "type");
const char *name = oidnGetPhysicalDeviceString(i, "name");
LOG(DEBUG) << "OIDN device " << i << ": name=\"" << name
<< "\", type=" << oidn_device_type_to_string(OIDNDeviceType(type));
LOG_DEBUG << "OIDN device " << i << ": name=\"" << name
<< "\", type=" << oidn_device_type_to_string(OIDNDeviceType(type));
if (type == OIDN_DEVICE_TYPE_METAL) {
if (device.id.find(name) != std::string::npos) {
LOG(DEBUG) << "OIDN device name matches the Cycles device name";
LOG_DEBUG << "OIDN device name matches the Cycles device name";
return true;
}
}
}
LOG(DEBUG) << "No matched OIDN device found";
LOG_DEBUG << "No matched OIDN device found";
return false;
}
# endif
@@ -127,30 +127,30 @@ bool OIDNDenoiserGPU::is_device_supported(const DeviceInfo &device)
/* Match GPUs by their PCI ID. */
const int num_devices = oidnGetNumPhysicalDevices();
LOG(DEBUG) << "Found " << num_devices << " OIDN device(s)";
LOG_DEBUG << "Found " << num_devices << " OIDN device(s)";
for (int i = 0; i < num_devices; i++) {
const int type = oidnGetPhysicalDeviceInt(i, "type");
const char *name = oidnGetPhysicalDeviceString(i, "name");
LOG(DEBUG) << "OIDN device " << i << ": name=\"" << name
<< "\" type=" << oidn_device_type_to_string(OIDNDeviceType(type));
LOG_DEBUG << "OIDN device " << i << ": name=\"" << name
<< "\" type=" << oidn_device_type_to_string(OIDNDeviceType(type));
if (type == device_type) {
if (oidnGetPhysicalDeviceBool(i, "pciAddressSupported")) {
unsigned int pci_domain = oidnGetPhysicalDeviceInt(i, "pciDomain");
unsigned int pci_bus = oidnGetPhysicalDeviceInt(i, "pciBus");
unsigned int pci_device = oidnGetPhysicalDeviceInt(i, "pciDevice");
string pci_id = string_printf("%04x:%02x:%02x", pci_domain, pci_bus, pci_device);
LOG(INFO) << "OIDN device PCI-e identifier: " << pci_id;
LOG_INFO << "OIDN device PCI-e identifier: " << pci_id;
if (device.id.find(pci_id) != string::npos) {
LOG(DEBUG) << "OIDN device PCI-e identifier matches the Cycles device ID";
LOG_DEBUG << "OIDN device PCI-e identifier matches the Cycles device ID";
return true;
}
}
else {
LOG(DEBUG) << "Device does not support pciAddressSupported";
LOG_DEBUG << "Device does not support pciAddressSupported";
}
}
}
LOG(DEBUG) << "No matched OIDN device found";
LOG_DEBUG << "No matched OIDN device found";
return false;
# endif
}
@@ -201,7 +201,7 @@ OIDNFilter OIDNDenoiserGPU::create_filter()
if (filter == nullptr) {
const OIDNError err = oidnGetDeviceError(oidn_device_, &error_message);
if (OIDN_ERROR_NONE != err) {
LOG(ERROR) << "OIDN error: " << error_message;
LOG_ERROR << "OIDN error: " << error_message;
set_error(error_message);
}
}
@@ -252,7 +252,7 @@ bool OIDNDenoiserGPU::commit_and_execute_filter(OIDNFilter filter, ExecMode mode
if (error_message == nullptr) {
error_message = "Unspecified OIDN error";
}
LOG(ERROR) << "OIDN error: " << error_message;
LOG_ERROR << "OIDN error: " << error_message;
set_error(error_message);
return false;
}
@@ -335,7 +335,7 @@ bool OIDNDenoiserGPU::denoise_create_if_needed(DenoiseContext &context)
oidn_filter_, "weights", custom_weights.data(), custom_weights.size());
}
else {
LOG(ERROR) << "Failed to load custom OpenImageDenoise weights";
LOG_ERROR << "Failed to load custom OpenImageDenoise weights";
}
}

View File

@@ -385,8 +385,8 @@ void PathTrace::path_trace(RenderWork &render_work)
return;
}
LOG(WORK) << "Will path trace " << render_work.path_trace.num_samples
<< " samples at the resolution divider " << render_work.resolution_divider;
LOG_WORK << "Will path trace " << render_work.path_trace.num_samples
<< " samples at the resolution divider " << render_work.resolution_divider;
const double start_time = time_dt();
@@ -415,9 +415,9 @@ void PathTrace::path_trace(RenderWork &render_work)
work_balance_infos_[i].time_spent += work_time;
work_balance_infos_[i].occupancy = statistics.occupancy;
LOG(INFO) << "Rendered " << num_samples << " samples in " << work_time << " seconds ("
<< work_time / num_samples
<< " seconds per sample), occupancy: " << statistics.occupancy;
LOG_INFO << "Rendered " << num_samples << " samples in " << work_time << " seconds ("
<< work_time / num_samples
<< " seconds per sample), occupancy: " << statistics.occupancy;
});
float occupancy_accum = 0.0f;
@@ -440,10 +440,10 @@ void PathTrace::adaptive_sample(RenderWork &render_work)
bool did_reschedule_on_idle = false;
while (true) {
LOG(WORK) << "Will filter adaptive stopping buffer, threshold "
<< render_work.adaptive_sampling.threshold;
LOG_WORK << "Will filter adaptive stopping buffer, threshold "
<< render_work.adaptive_sampling.threshold;
if (render_work.adaptive_sampling.reset) {
LOG(WORK) << "Will re-calculate convergency flag for currently converged pixels.";
LOG_WORK << "Will re-calculate convergency flag for currently converged pixels.";
}
const double start_time = time_dt();
@@ -462,11 +462,11 @@ void PathTrace::adaptive_sample(RenderWork &render_work)
render_work, time_dt() - start_time, is_cancel_requested());
if (num_active_pixels == 0) {
LOG(WORK) << "All pixels converged.";
LOG_WORK << "All pixels converged.";
if (!render_scheduler_.render_work_reschedule_on_converge(render_work)) {
break;
}
LOG(WORK) << "Continuing with lower threshold.";
LOG_WORK << "Continuing with lower threshold.";
}
else if (did_reschedule_on_idle) {
break;
@@ -478,10 +478,10 @@ void PathTrace::adaptive_sample(RenderWork &render_work)
* A better heuristic is possible here: for example, use maximum of 128^2 and percentage of
* the final resolution. */
if (!render_scheduler_.render_work_reschedule_on_idle(render_work)) {
LOG(WORK) << "Rescheduling is not possible: final threshold is reached.";
LOG_WORK << "Rescheduling is not possible: final threshold is reached.";
break;
}
LOG(WORK) << "Rescheduling lower threshold.";
LOG_WORK << "Rescheduling lower threshold.";
did_reschedule_on_idle = true;
}
else {
@@ -575,7 +575,7 @@ void PathTrace::cryptomatte_postprocess(const RenderWork &render_work)
if (!render_work.cryptomatte.postprocess) {
return;
}
LOG(WORK) << "Perform cryptomatte work.";
LOG_WORK << "Perform cryptomatte work.";
parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
path_trace_work->cryptomatte_postproces();
@@ -593,7 +593,7 @@ void PathTrace::denoise(const RenderWork &render_work)
return;
}
LOG(WORK) << "Perform denoising work.";
LOG_WORK << "Perform denoising work.";
const double start_time = time_dt();
@@ -686,31 +686,31 @@ void PathTrace::update_display(const RenderWork &render_work)
}
if (!display_ && !output_driver_) {
LOG(WORK) << "Ignore display update.";
LOG_WORK << "Ignore display update.";
return;
}
if (full_params_.width == 0 || full_params_.height == 0) {
LOG(WORK) << "Skipping PathTraceDisplay update due to 0 size of the render buffer.";
LOG_WORK << "Skipping PathTraceDisplay update due to 0 size of the render buffer.";
return;
}
const double start_time = time_dt();
if (output_driver_) {
LOG(WORK) << "Invoke buffer update callback.";
LOG_WORK << "Invoke buffer update callback.";
const PathTraceTile tile(*this);
output_driver_->update_render_tile(tile);
}
if (display_) {
LOG(WORK) << "Perform copy to GPUDisplay work.";
LOG_WORK << "Perform copy to GPUDisplay work.";
const int texture_width = render_state_.effective_big_tile_params.window_width;
const int texture_height = render_state_.effective_big_tile_params.window_height;
if (!display_->update_begin(texture_width, texture_height)) {
LOG(ERROR) << "Error beginning GPUDisplay update.";
LOG_ERROR << "Error beginning GPUDisplay update.";
return;
}
@@ -746,33 +746,33 @@ void PathTrace::rebalance(const RenderWork &render_work)
const int num_works = path_trace_works_.size();
if (num_works == 1) {
LOG(WORK) << "Ignoring rebalance work due to single device render.";
LOG_WORK << "Ignoring rebalance work due to single device render.";
return;
}
const double start_time = time_dt();
if (LOG_IS_ON(WORK)) {
LOG(WORK) << "Perform rebalance work.";
LOG(WORK) << "Per-device path tracing time (seconds):";
if (LOG_IS_ON(LOG_LEVEL_WORK)) {
LOG_WORK << "Perform rebalance work.";
LOG_WORK << "Per-device path tracing time (seconds):";
for (int i = 0; i < num_works; ++i) {
LOG(WORK) << path_trace_works_[i]->get_device()->info.description << ": "
<< work_balance_infos_[i].time_spent;
LOG_WORK << path_trace_works_[i]->get_device()->info.description << ": "
<< work_balance_infos_[i].time_spent;
}
}
const bool did_rebalance = work_balance_do_rebalance(work_balance_infos_);
if (LOG_IS_ON(WORK)) {
LOG(WORK) << "Calculated per-device weights for works:";
if (LOG_IS_ON(LOG_LEVEL_WORK)) {
LOG_WORK << "Calculated per-device weights for works:";
for (int i = 0; i < num_works; ++i) {
LOG(WORK) << path_trace_works_[i]->get_device()->info.description << ": "
<< work_balance_infos_[i].weight;
LOG_WORK << path_trace_works_[i]->get_device()->info.description << ": "
<< work_balance_infos_[i].weight;
}
}
if (!did_rebalance) {
LOG(WORK) << "Balance in path trace works did not change.";
LOG_WORK << "Balance in path trace works did not change.";
render_scheduler_.report_rebalance_time(render_work, time_dt() - start_time, false);
return;
}
@@ -796,7 +796,7 @@ void PathTrace::write_tile_buffer(const RenderWork &render_work)
return;
}
LOG(WORK) << "Write tile result.";
LOG_WORK << "Write tile result.";
render_state_.tile_written = true;
@@ -810,13 +810,13 @@ void PathTrace::write_tile_buffer(const RenderWork &render_work)
*
* Important thing is: tile should be written to the software via callback only once. */
if (!has_multiple_tiles) {
LOG(WORK) << "Write tile result via buffer write callback.";
LOG_WORK << "Write tile result via buffer write callback.";
tile_buffer_write();
}
/* Write tile to disk, so that the render work's render buffer can be re-used for the next tile.
*/
else {
LOG(WORK) << "Write tile result to disk.";
LOG_WORK << "Write tile result to disk.";
tile_buffer_write_to_disk();
}
}
@@ -827,10 +827,10 @@ void PathTrace::finalize_full_buffer_on_disk(const RenderWork &render_work)
return;
}
LOG(WORK) << "Handle full-frame render buffer work.";
LOG_WORK << "Handle full-frame render buffer work.";
if (!tile_manager_.has_written_tiles()) {
LOG(WORK) << "No tiles on disk.";
LOG_WORK << "No tiles on disk.";
return;
}
@@ -1030,7 +1030,7 @@ static string get_layer_view_name(const RenderBuffers &buffers)
void PathTrace::process_full_buffer_from_disk(string_view filename)
{
LOG(WORK) << "Processing full frame buffer file " << filename;
LOG_WORK << "Processing full frame buffer file " << filename;
progress_set_status("Reading full buffer from disk");
@@ -1044,7 +1044,7 @@ void PathTrace::process_full_buffer_from_disk(string_view filename)
progress_->set_cancel(error_message);
}
else {
LOG(ERROR) << error_message;
LOG_ERROR << error_message;
}
return;
}
@@ -1461,10 +1461,10 @@ void PathTrace::guiding_prepare_structures()
void PathTrace::guiding_update_structures()
{
#if defined(WITH_PATH_GUIDING)
LOG(WORK) << "Update path guiding structures";
LOG_WORK << "Update path guiding structures";
LOG(DEBUG) << "Number of surface samples: " << guiding_sample_data_storage_->GetSizeSurface();
LOG(DEBUG) << "Number of volume samples: " << guiding_sample_data_storage_->GetSizeVolume();
LOG_DEBUG << "Number of surface samples: " << guiding_sample_data_storage_->GetSizeSurface();
LOG_DEBUG << "Number of volume samples: " << guiding_sample_data_storage_->GetSizeVolume();
const size_t num_valid_samples = guiding_sample_data_storage_->GetSizeSurface() +
guiding_sample_data_storage_->GetSizeVolume();
@@ -1474,7 +1474,7 @@ void PathTrace::guiding_update_structures()
guiding_field_->Update(*guiding_sample_data_storage_);
guiding_update_count++;
LOG(DEBUG) << "Path guiding field valid: " << guiding_field_->Validate();
LOG_DEBUG << "Path guiding field valid: " << guiding_field_->Validate();
guiding_sample_data_storage_->Clear();
}

View File

@@ -45,7 +45,7 @@ bool PathTraceDisplay::update_begin(const int texture_width, const int texture_h
DCHECK(!update_state_.is_active);
if (update_state_.is_active) {
LOG(ERROR) << "Attempt to re-activate update process.";
LOG_ERROR << "Attempt to re-activate update process.";
return false;
}
@@ -60,7 +60,7 @@ bool PathTraceDisplay::update_begin(const int texture_width, const int texture_h
}
if (!driver_->update_begin(params, texture_width, texture_height)) {
LOG(ERROR) << "PathTraceDisplay implementation could not begin update.";
LOG_ERROR << "PathTraceDisplay implementation could not begin update.";
return false;
}
@@ -74,7 +74,7 @@ void PathTraceDisplay::update_end()
DCHECK(update_state_.is_active);
if (!update_state_.is_active) {
LOG(ERROR) << "Attempt to deactivate inactive update process.";
LOG_ERROR << "Attempt to deactivate inactive update process.";
return;
}
@@ -101,7 +101,7 @@ void PathTraceDisplay::copy_pixels_to_texture(const half4 *rgba_pixels,
DCHECK(update_state_.is_active);
if (!update_state_.is_active) {
LOG(ERROR) << "Attempt to copy pixels data outside of PathTraceDisplay update.";
LOG_ERROR << "Attempt to copy pixels data outside of PathTraceDisplay update.";
return;
}
@@ -150,12 +150,12 @@ half4 *PathTraceDisplay::map_texture_buffer()
DCHECK(update_state_.is_active);
if (texture_buffer_state_.is_mapped) {
LOG(ERROR) << "Attempt to re-map an already mapped texture buffer.";
LOG_ERROR << "Attempt to re-map an already mapped texture buffer.";
return nullptr;
}
if (!update_state_.is_active) {
LOG(ERROR) << "Attempt to copy pixels data outside of PathTraceDisplay update.";
LOG_ERROR << "Attempt to copy pixels data outside of PathTraceDisplay update.";
return nullptr;
}
@@ -173,7 +173,7 @@ void PathTraceDisplay::unmap_texture_buffer()
DCHECK(texture_buffer_state_.is_mapped);
if (!texture_buffer_state_.is_mapped) {
LOG(ERROR) << "Attempt to unmap non-mapped texture buffer.";
LOG_ERROR << "Attempt to unmap non-mapped texture buffer.";
return;
}
@@ -199,14 +199,14 @@ GraphicsInteropBuffer &PathTraceDisplay::graphics_interop_get_buffer()
DCHECK(update_state_.is_active);
if (texture_buffer_state_.is_mapped) {
LOG(ERROR)
LOG_ERROR
<< "Attempt to use graphics interoperability mode while the texture buffer is mapped.";
interop_buffer.clear();
return interop_buffer;
}
if (!update_state_.is_active) {
LOG(ERROR) << "Attempt to use graphics interoperability outside of PathTraceDisplay update.";
LOG_ERROR << "Attempt to use graphics interoperability outside of PathTraceDisplay update.";
interop_buffer.clear();
return interop_buffer;
}

View File

@@ -353,11 +353,11 @@ void PathTraceWorkCPU::guiding_push_sample_data_to_global_storage(ThreadKernelGl
render_buffer)
{
# ifdef WITH_CYCLES_DEBUG
if (LOG_IS_ON(WORK)) {
if (LOG_IS_ON(LOG_LEVEL_WORK)) {
/* Check if the generated path segments contain valid values. */
const bool validSegments = kg->opgl_path_segment_storage->ValidateSegments();
if (!validSegments) {
LOG(WORK) << "Guiding: invalid path segments!";
LOG_WORK << "Guiding: invalid path segments!";
}
}
@@ -382,10 +382,10 @@ void PathTraceWorkCPU::guiding_push_sample_data_to_global_storage(ThreadKernelGl
# ifdef WITH_CYCLES_DEBUG
/* Check if the training/radiance samples generated by the path segment storage are valid. */
if (LOG_IS_ON(WORK)) {
if (LOG_IS_ON(LOG_LEVEL_WORK)) {
const bool validSamples = kg->opgl_path_segment_storage->ValidateSamples();
if (!validSamples) {
LOG(WORK)
LOG_WORK
<< "Guiding: path segment storage generated/contains invalid radiance/training samples!";
}
}

View File

@@ -157,8 +157,8 @@ void PathTraceWorkGPU::alloc_integrator_soa()
if ((kernel_features & (feature))) { \
string name_str = string_printf("%sintegrator_state_" #parent_struct "_" #name, \
shadow ? "shadow_" : ""); \
LOG(DEBUG) << "Skipping " << name_str \
<< " -- data is packed inside integrator_state_" #parent_struct "_packed"; \
LOG_DEBUG << "Skipping " << name_str \
<< " -- data is packed inside integrator_state_" #parent_struct "_packed"; \
}
# define KERNEL_STRUCT_BEGIN_PACKED(parent_struct, feature) \
KERNEL_STRUCT_BEGIN(parent_struct) \
@@ -206,13 +206,13 @@ void PathTraceWorkGPU::alloc_integrator_soa()
#undef KERNEL_STRUCT_END_ARRAY
#undef KERNEL_STRUCT_VOLUME_STACK_SIZE
if (LOG_IS_ON(STATS)) {
if (LOG_IS_ON(LOG_LEVEL_STATS)) {
size_t total_soa_size = 0;
for (auto &&soa_memory : integrator_state_soa_) {
total_soa_size += soa_memory->memory_size();
}
LOG(STATS) << "GPU SoA state size: " << string_human_readable_size(total_soa_size);
LOG_STATS << "GPU SoA state size: " << string_human_readable_size(total_soa_size);
}
}
@@ -571,8 +571,8 @@ void PathTraceWorkGPU::enqueue_path_iteration(DeviceKernel kernel, const int num
}
default:
LOG(FATAL) << "Unhandled kernel " << device_kernel_as_string(kernel)
<< " used for path iteration, should never happen.";
LOG_FATAL << "Unhandled kernel " << device_kernel_as_string(kernel)
<< " used for path iteration, should never happen.";
break;
}
}
@@ -943,10 +943,10 @@ bool PathTraceWorkGPU::should_use_graphics_interop(PathTraceDisplay *display)
true);
if (interop_use_) {
LOG(INFO) << "Using graphics interop GPU display update.";
LOG_INFO << "Using graphics interop GPU display update.";
}
else {
LOG(INFO) << "Using naive GPU display update.";
LOG_INFO << "Using naive GPU display update.";
}
interop_use_checked_ = true;
@@ -966,7 +966,7 @@ void PathTraceWorkGPU::copy_to_display(PathTraceDisplay *display,
}
if (!buffers_->buffer.device_pointer) {
LOG(WARNING) << "Request for GPU display update without allocated render buffers.";
LOG_WARNING << "Request for GPU display update without allocated render buffers.";
return;
}

View File

@@ -229,7 +229,7 @@ bool RenderScheduler::render_work_reschedule_on_idle(RenderWork &render_work)
void RenderScheduler::render_work_reschedule_on_cancel(RenderWork &render_work)
{
LOG(WORK) << "Schedule work for cancel.";
LOG_WORK << "Schedule work for cancel.";
/* Un-schedule samples: they will not be rendered and should not be counted. */
state_.num_rendered_samples -= render_work.path_trace.num_samples;
@@ -495,7 +495,7 @@ void RenderScheduler::report_path_trace_time(const RenderWork &render_work,
path_trace_time_.add_average(final_time_approx, render_work.path_trace.num_samples);
LOG(WORK) << "Average path tracing time: " << path_trace_time_.get_average() << " seconds.";
LOG_WORK << "Average path tracing time: " << path_trace_time_.get_average() << " seconds.";
}
void RenderScheduler::report_path_trace_occupancy(const RenderWork &render_work,
@@ -503,7 +503,7 @@ void RenderScheduler::report_path_trace_occupancy(const RenderWork &render_work,
{
state_.occupancy_num_samples = render_work.path_trace.num_samples;
state_.occupancy = occupancy;
LOG(WORK) << "Measured path tracing occupancy: " << occupancy;
LOG_WORK << "Measured path tracing occupancy: " << occupancy;
}
void RenderScheduler::report_adaptive_filter_time(const RenderWork &render_work,
@@ -524,8 +524,8 @@ void RenderScheduler::report_adaptive_filter_time(const RenderWork &render_work,
adaptive_filter_time_.add_average(final_time_approx, render_work.path_trace.num_samples);
LOG(WORK) << "Average adaptive sampling filter time: " << adaptive_filter_time_.get_average()
<< " seconds.";
LOG_WORK << "Average adaptive sampling filter time: " << adaptive_filter_time_.get_average()
<< " seconds.";
}
void RenderScheduler::report_denoise_time(const RenderWork &render_work, const double time)
@@ -544,7 +544,7 @@ void RenderScheduler::report_denoise_time(const RenderWork &render_work, const d
denoise_time_.add_average(final_time_approx);
LOG(WORK) << "Average denoising time: " << denoise_time_.get_average() << " seconds.";
LOG_WORK << "Average denoising time: " << denoise_time_.get_average() << " seconds.";
}
void RenderScheduler::report_display_update_time(const RenderWork &render_work, const double time)
@@ -563,8 +563,7 @@ void RenderScheduler::report_display_update_time(const RenderWork &render_work,
display_update_time_.add_average(final_time_approx);
LOG(WORK) << "Average display update time: " << display_update_time_.get_average()
<< " seconds.";
LOG_WORK << "Average display update time: " << display_update_time_.get_average() << " seconds.";
/* Move the display update moment further in time, so that logic which checks when last update
* did happen have more reliable point in time (without path tracing and denoising parts of the
@@ -590,7 +589,7 @@ void RenderScheduler::report_rebalance_time(const RenderWork &render_work,
state_.last_rebalance_changed = balance_changed;
LOG(WORK) << "Average rebalance time: " << rebalance_time_.get_average() << " seconds.";
LOG_WORK << "Average rebalance time: " << rebalance_time_.get_average() << " seconds.";
}
string RenderScheduler::full_report() const
@@ -1115,7 +1114,7 @@ void RenderScheduler::update_start_resolution_divider()
* that we have a somewhat good initial behavior, giving a chance to collect real numbers. */
start_resolution_divider_ = min(default_start_resolution_divider_,
max_res_divider_for_desired_size);
LOG(WORK) << "Initial resolution divider is " << start_resolution_divider_;
LOG_WORK << "Initial resolution divider is " << start_resolution_divider_;
return;
}
@@ -1144,7 +1143,7 @@ void RenderScheduler::update_start_resolution_divider()
* unreadable viewport render. */
start_resolution_divider_ = min(resolution_divider_for_update, max_res_divider_for_desired_size);
LOG(WORK) << "Calculated resolution divider is " << start_resolution_divider_;
LOG_WORK << "Calculated resolution divider is " << start_resolution_divider_;
}
double RenderScheduler::guess_viewport_navigation_update_interval_in_seconds() const

View File

@@ -33,8 +33,8 @@ bool ShaderEval::eval(const ShaderEvalType type,
device_->foreach_device([&](Device *device) {
if (!first_device) {
LOG(WORK) << "Multi-devices are not yet fully implemented, will evaluate shader on a "
"single device.";
LOG_WORK << "Multi-devices are not yet fully implemented, will evaluate shader on a "
"single device.";
return;
}
first_device = false;

View File

@@ -57,19 +57,19 @@ void WorkTileScheduler::reset_scheduler_state()
tile_size_.num_samples;
if (num_path_states_in_tile == 0) {
LOG(WORK) << "Will not schedule any tiles: no work remained for the device";
LOG_WORK << "Will not schedule any tiles: no work remained for the device";
num_tiles_x_ = 0;
num_tiles_y_ = 0;
num_tiles_per_sample_range_ = 0;
}
else {
const int num_tiles = max_num_path_states_ / num_path_states_in_tile;
LOG(WORK) << "Will schedule " << num_tiles << " tiles of " << tile_size_;
LOG_WORK << "Will schedule " << num_tiles << " tiles of " << tile_size_;
/* The logging is based on multiple tiles scheduled, ignoring overhead of multi-tile
* scheduling and purely focusing on the number of used path states. */
LOG(WORK) << "Number of unused path states: "
<< max_num_path_states_ - num_tiles * num_path_states_in_tile;
LOG_WORK << "Number of unused path states: "
<< max_num_path_states_ - num_tiles * num_path_states_in_tile;
num_tiles_x_ = divide_up(image_size_px_.x, tile_size_.width);
num_tiles_y_ = divide_up(image_size_px_.y, tile_size_.height);

View File

@@ -134,7 +134,7 @@ OSLRenderServices::OSLRenderServices(OSL::TextureSystem *texture_system, const i
OSLRenderServices::~OSLRenderServices()
{
if (m_texturesys) {
LOG(INFO) << "OSL texture system stats:\n" << m_texturesys->getstats();
LOG_INFO << "OSL texture system stats:\n" << m_texturesys->getstats();
}
}

View File

@@ -1558,7 +1558,7 @@ void AlembicProcedural::build_caches(Progress &progress)
}
}
LOG(WORK) << "AlembicProcedural memory usage : " << string_human_readable_size(memory_used);
LOG_WORK << "AlembicProcedural memory usage : " << string_human_readable_size(memory_used);
}
CCL_NAMESPACE_END

View File

@@ -536,7 +536,7 @@ void Camera::device_update_volume(Device * /*device*/, DeviceScene *dscene, Scen
if (object->get_geometry()->has_volume &&
viewplane_boundbox.intersects(object->bounds)) {
/* TODO(sergey): Consider adding more grained check. */
LOG(INFO) << "Detected camera inside volume.";
LOG_INFO << "Detected camera inside volume.";
kernel_camera.is_inside_volume = 1;
parallel_for_cancel();
break;
@@ -545,7 +545,7 @@ void Camera::device_update_volume(Device * /*device*/, DeviceScene *dscene, Scen
});
if (!kernel_camera.is_inside_volume) {
LOG(INFO) << "Camera is outside of the volume.";
LOG_INFO << "Camera is outside of the volume.";
}
}

View File

@@ -47,7 +47,7 @@ ColorSpaceProcessor *ColorSpaceManager::get_processor(ustring colorspace)
config = OCIO::GetCurrentConfig();
}
catch (const OCIO::Exception &exception) {
LOG(WARNING) << "OCIO config error: " << exception.what();
LOG_WARNING << "OCIO config error: " << exception.what();
return nullptr;
}
@@ -64,8 +64,8 @@ ColorSpaceProcessor *ColorSpaceManager::get_processor(ustring colorspace)
}
catch (const OCIO::Exception &exception) {
cached_processors[colorspace] = OCIO::ConstProcessorRcPtr();
LOG(WARNING) << "Colorspace " << colorspace.c_str()
<< " can't be converted to scene_linear: " << exception.what();
LOG_WARNING << "Colorspace " << colorspace.c_str()
<< " can't be converted to scene_linear: " << exception.what();
}
}
@@ -92,7 +92,7 @@ bool ColorSpaceManager::colorspace_is_data(ustring colorspace)
config = OCIO::GetCurrentConfig();
}
catch (const OCIO::Exception &exception) {
LOG(WARNING) << "OCIO config error: " << exception.what();
LOG_WARNING << "OCIO config error: " << exception.what();
return false;
}
@@ -153,12 +153,12 @@ ustring ColorSpaceManager::detect_known_colorspace(ustring colorspace,
const thread_scoped_lock cache_lock(cache_colorspaces_mutex);
if (is_scene_linear) {
LOG(INFO) << "Colorspace " << colorspace.string() << " is no-op";
LOG_INFO << "Colorspace " << colorspace.string() << " is no-op";
cached_colorspaces[colorspace] = u_colorspace_raw;
return u_colorspace_raw;
}
if (is_srgb) {
LOG(INFO) << "Colorspace " << colorspace.string() << " is sRGB";
LOG_INFO << "Colorspace " << colorspace.string() << " is sRGB";
cached_colorspaces[colorspace] = u_colorspace_srgb;
return u_colorspace_srgb;
}
@@ -170,28 +170,28 @@ ustring ColorSpaceManager::detect_known_colorspace(ustring colorspace,
config = OCIO::GetCurrentConfig();
}
catch (const OCIO::Exception &exception) {
LOG(WARNING) << "OCIO config error: " << exception.what();
LOG_WARNING << "OCIO config error: " << exception.what();
return u_colorspace_raw;
}
if (!config || !config->getColorSpace(colorspace.c_str())) {
LOG(WARNING) << "Colorspace " << colorspace.c_str() << " not found, using raw instead";
LOG_WARNING << "Colorspace " << colorspace.c_str() << " not found, using raw instead";
}
else {
LOG(WARNING) << "Colorspace " << colorspace.c_str()
<< " can't be converted to scene_linear, using raw instead";
LOG_WARNING << "Colorspace " << colorspace.c_str()
<< " can't be converted to scene_linear, using raw instead";
}
cached_colorspaces[colorspace] = u_colorspace_raw;
return u_colorspace_raw;
}
/* Convert to/from colorspace with OpenColorIO. */
LOG(INFO) << "Colorspace " << colorspace.string() << " handled through OpenColorIO";
LOG_INFO << "Colorspace " << colorspace.string() << " handled through OpenColorIO";
cached_colorspaces[colorspace] = colorspace;
return colorspace;
#else
LOG(WARNING) << "Colorspace " << colorspace.c_str()
<< " not available, built without OpenColorIO";
LOG_WARNING << "Colorspace " << colorspace.c_str()
<< " not available, built without OpenColorIO";
return u_colorspace_raw;
#endif
}

View File

@@ -30,8 +30,8 @@ bool ConstantFolder::all_inputs_constant() const
void ConstantFolder::make_constant(const float value) const
{
LOG(DEBUG) << "Folding " << node->name << "::" << output->name() << " to constant (" << value
<< ").";
LOG_DEBUG << "Folding " << node->name << "::" << output->name() << " to constant (" << value
<< ").";
for (ShaderInput *sock : output->links) {
sock->set(value);
@@ -43,8 +43,8 @@ void ConstantFolder::make_constant(const float value) const
void ConstantFolder::make_constant(const float3 value) const
{
LOG(DEBUG) << "Folding " << node->name << "::" << output->name() << " to constant " << value
<< ".";
LOG_DEBUG << "Folding " << node->name << "::" << output->name() << " to constant " << value
<< ".";
for (ShaderInput *sock : output->links) {
sock->set(value);
@@ -56,8 +56,8 @@ void ConstantFolder::make_constant(const float3 value) const
void ConstantFolder::make_constant(const int value) const
{
LOG(DEBUG) << "Folding " << node->name << "::" << output->name() << " to constant (" << value
<< ").";
LOG_DEBUG << "Folding " << node->name << "::" << output->name() << " to constant (" << value
<< ").";
for (ShaderInput *sock : output->links) {
sock->set(value);
@@ -113,8 +113,8 @@ void ConstantFolder::bypass(ShaderOutput *new_output) const
{
assert(new_output);
LOG(DEBUG) << "Folding " << node->name << "::" << output->name() << " to socket "
<< new_output->parent->name << "::" << new_output->name() << ".";
LOG_DEBUG << "Folding " << node->name << "::" << output->name() << " to socket "
<< new_output->parent->name << "::" << new_output->name() << ".";
/* Remove all outgoing links from socket and connect them to new_output instead.
* The graph->relink method affects node inputs, so it's not safe to use in constant
@@ -132,7 +132,7 @@ void ConstantFolder::discard() const
{
assert(output->type() == SocketType::CLOSURE);
LOG(DEBUG) << "Discarding closure " << node->name << ".";
LOG_DEBUG << "Discarding closure " << node->name << ".";
graph->disconnect(output);
}

View File

@@ -599,10 +599,10 @@ void Film::update_passes(Scene *scene)
tag_modified();
/* Debug logging. */
if (LOG_IS_ON(INFO)) {
LOG(INFO) << "Effective scene passes:";
if (LOG_IS_ON(LOG_LEVEL_INFO)) {
LOG_INFO << "Effective scene passes:";
for (const Pass *pass : scene->passes) {
LOG(INFO) << "- " << *pass;
LOG_INFO << "- " << *pass;
}
}
}

View File

@@ -688,7 +688,7 @@ void GeometryManager::device_update(Device *device,
return;
}
LOG(INFO) << "Total " << scene->geometry.size() << " meshes.";
LOG_INFO << "Total " << scene->geometry.size() << " meshes.";
bool true_displacement_used = false;
bool curve_shadow_transparency_used = false;
@@ -972,7 +972,7 @@ void GeometryManager::device_update(Device *device,
TaskPool::Summary summary;
pool.wait_work(&summary);
LOG(WORK) << "Objects BVH build pool statistics:\n" << summary.full_report();
LOG_WORK << "Objects BVH build pool statistics:\n" << summary.full_report();
}
for (Shader *shader : scene->shaders) {

View File

@@ -113,7 +113,7 @@ void GeometryManager::device_update_bvh(Device *device,
bparams.bvh_type = scene->params.bvh_type;
bparams.curve_subdivisions = scene->params.curve_subdivisions();
LOG(INFO) << "Using " << bvh_layout_name(bparams.bvh_layout) << " layout.";
LOG_INFO << "Using " << bvh_layout_name(bparams.bvh_layout) << " layout.";
const bool can_refit = scene->bvh != nullptr && scene->params.bvh_type == BVH_TYPE_DYNAMIC &&
(bparams.bvh_layout == BVHLayout::BVH_LAYOUT_OPTIX ||

View File

@@ -662,8 +662,8 @@ bool ImageManager::file_load_image(Image *img, const int texture_limit)
while (max_size * scale_factor > texture_limit) {
scale_factor *= 0.5f;
}
LOG(WORK) << "Scaling image " << img->loader->name() << " by a factor of " << scale_factor
<< ".";
LOG_WORK << "Scaling image " << img->loader->name() << " by a factor of " << scale_factor
<< ".";
vector<StorageType> scaled_pixels;
size_t scaled_width;
size_t scaled_height;

View File

@@ -20,11 +20,11 @@ bool OIIOImageLoader::load_metadata(const ImageDeviceFeatures & /*features*/,
{
/* Perform preliminary checks, with meaningful logging. */
if (!path_exists(filepath.string())) {
LOG(WARNING) << "File '" << filepath.string() << "' does not exist.";
LOG_WARNING << "File '" << filepath.string() << "' does not exist.";
return false;
}
if (path_is_directory(filepath.string())) {
LOG(WARNING) << "File '" << filepath.string() << "' is a directory, can't use as image.";
LOG_WARNING << "File '" << filepath.string() << "' is a directory, can't use as image.";
return false;
}

View File

@@ -400,7 +400,7 @@ AdaptiveSampling Integrator::get_adaptive_sampling() const
if (clamped_aa_samples > 0 && adaptive_threshold == 0.0f) {
adaptive_sampling.threshold = max(0.001f, 1.0f / (float)aa_samples);
LOG(INFO) << "Adaptive sampling: automatic threshold = " << adaptive_sampling.threshold;
LOG_INFO << "Adaptive sampling: automatic threshold = " << adaptive_sampling.threshold;
}
else {
adaptive_sampling.threshold = adaptive_threshold;
@@ -421,7 +421,7 @@ AdaptiveSampling Integrator::get_adaptive_sampling() const
* in various test scenes. */
const int min_samples = (int)ceilf(16.0f / powf(adaptive_sampling.threshold, 0.3f));
adaptive_sampling.min_samples = max(4, min_samples);
LOG(INFO) << "Adaptive sampling: automatic min samples = " << adaptive_sampling.min_samples;
LOG_INFO << "Adaptive sampling: automatic min samples = " << adaptive_sampling.min_samples;
}
else {
adaptive_sampling.min_samples = max(4, adaptive_min_samples);

View File

@@ -274,7 +274,7 @@ void LightManager::test_enabled_lights(Scene *scene)
num_lights++;
}
LOG(INFO) << "Total " << num_lights << " lights.";
LOG_INFO << "Total " << num_lights << " lights.";
bool background_enabled = false;
int background_resolution = 0;
@@ -287,7 +287,7 @@ void LightManager::test_enabled_lights(Scene *scene)
Shader *shader = scene->background->get_shader(scene);
const bool disable_mis = !(has_portal || shader->has_surface_spatial_varying);
if (disable_mis) {
LOG(INFO) << "Background MIS has been disabled.";
LOG_INFO << "Background MIS has been disabled.";
}
for (Light *light : background_lights) {
light->is_enabled = !disable_mis;
@@ -387,7 +387,7 @@ void LightManager::device_update_distribution(Device * /*unused*/,
/* Distribution size. */
kintegrator->num_distribution = num_distribution;
LOG(INFO) << "Use light distribution with " << num_distribution << " emitters.";
LOG_INFO << "Use light distribution with " << num_distribution << " emitters.";
/* Emission area. */
KernelLightDistribution *distribution = dscene->light_distribution.alloc(num_distribution + 1);
@@ -862,8 +862,8 @@ void LightManager::device_update_tree(Device * /*unused*/,
KernelLightLinkSet *klight_link_sets = dscene->data.light_link_sets;
memset(klight_link_sets, 0, sizeof(dscene->data.light_link_sets));
LOG(INFO) << "Use light tree with " << num_emitters << " emitters and " << light_tree.num_nodes
<< " nodes.";
LOG_INFO << "Use light tree with " << num_emitters << " emitters and " << light_tree.num_nodes
<< " nodes.";
if (!use_light_linking) {
/* Regular light tree without linking. */
@@ -908,8 +908,8 @@ void LightManager::device_update_tree(Device * /*unused*/,
KernelLightTreeNode *knodes = dscene->light_tree_nodes.alloc(light_link_nodes.size());
memcpy(knodes, light_link_nodes.data(), light_link_nodes.size() * sizeof(*knodes));
LOG(INFO) << "Specialized light tree for light linking, with "
<< light_link_nodes.size() - light_tree.num_nodes << " additional nodes.";
LOG_INFO << "Specialized light tree for light linking, with "
<< light_link_nodes.size() - light_tree.num_nodes << " additional nodes.";
}
/* Copy arrays to device. */
@@ -1074,13 +1074,13 @@ void LightManager::device_update_background(Device *device,
if (res.x == 0) {
res = environment_res;
if (res.x > 0 && res.y > 0) {
LOG(INFO) << "Automatically set World MIS resolution to " << res.x << " by " << res.y;
LOG_INFO << "Automatically set World MIS resolution to " << res.x << " by " << res.y;
}
}
/* If it's still unknown, just use the default. */
if (res.x == 0 || res.y == 0) {
res = make_int2(1024, 512);
LOG(INFO) << "Setting World MIS resolution to default";
LOG_INFO << "Setting World MIS resolution to default";
}
kbackground->map_res_x = res.x;
kbackground->map_res_y = res.y;
@@ -1139,7 +1139,7 @@ void LightManager::device_update_background(Device *device,
marg_cdf[res.y].y = 1.0f;
LOG(WORK) << "Background MIS build time " << time_dt() - time_start;
LOG_WORK << "Background MIS build time " << time_dt() - time_start;
/* update device */
dscene->light_background_marginal_cdf.copy_to_device();
@@ -1396,7 +1396,7 @@ void LightManager::device_update_lights(DeviceScene *dscene, Scene *scene)
light_index++;
}
LOG(INFO) << "Number of lights sent to the device: " << num_lights;
LOG_INFO << "Number of lights sent to the device: " << num_lights;
dscene->lights.copy_to_device();
}

View File

@@ -780,7 +780,7 @@ void ObjectManager::device_update(Device *device,
dscene->objects.tag_modified();
}
LOG(INFO) << "Total " << scene->objects.size() << " objects.";
LOG_INFO << "Total " << scene->objects.size() << " objects.";
device_free(device, dscene, false);

View File

@@ -385,7 +385,7 @@ void OSLManager::shading_system_init()
ss->attribute("max_optix_groupdata_alloc", 2048);
}
LOG(INFO) << "Using shader search path: " << shader_path;
LOG_INFO << "Using shader search path: " << shader_path;
/* our own ray types */
static const char *raytypes[] = {
@@ -567,7 +567,7 @@ const char *OSLManager::shader_load_filepath(string filepath)
string bytecode;
if (!path_read_text(filepath, bytecode)) {
LOG(ERROR) << "Shader graph: failed to read file " << filepath;
LOG_ERROR << "Shader graph: failed to read file " << filepath;
const OSLShaderInfo info;
loaded_shaders[bytecode_hash] = info; /* to avoid repeat tries */
return nullptr;
@@ -588,7 +588,7 @@ const char *OSLManager::shader_load_bytecode(const string &hash, const string &b
OSLShaderInfo info;
if (!info.query.open_bytecode(bytecode)) {
LOG(ERROR) << "OSL query error: " << info.query.geterror();
LOG_ERROR << "OSL query error: " << info.query.geterror();
}
/* this is a bit weak, but works */
@@ -628,7 +628,7 @@ void OSLShaderManager::device_update_specific(Device *device,
}
});
LOG(INFO) << "Total " << scene->shaders.size() << " shaders.";
LOG_INFO << "Total " << scene->shaders.size() << " shaders.";
/* setup shader engine */
OSLManager::foreach_osl_device(device, [](Device *, OSLGlobals *og) {

View File

@@ -99,7 +99,7 @@ void ParticleSystemManager::device_update(Device *device,
}
});
LOG(INFO) << "Total " << scene->particle_systems.size() << " particle systems.";
LOG_INFO << "Total " << scene->particle_systems.size() << " particle systems.";
device_free(device, dscene);

View File

@@ -15,7 +15,7 @@ const char *pass_type_as_string(const PassType type)
const NodeEnum *type_enum = Pass::get_type_enum();
if (!type_enum->exists(type_int)) {
LOG(DFATAL) << "Unhandled pass type " << static_cast<int>(type) << ", not supposed to happen.";
LOG_DFATAL << "Unhandled pass type " << static_cast<int>(type) << ", not supposed to happen.";
return "UNKNOWN";
}
@@ -31,7 +31,7 @@ const char *pass_mode_as_string(PassMode mode)
return "DENOISED";
}
LOG(DFATAL) << "Unhandled pass mode " << static_cast<int>(mode) << ", should never happen.";
LOG_DFATAL << "Unhandled pass mode " << static_cast<int>(mode) << ", should never happen.";
return "UNKNOWN";
}
@@ -347,7 +347,7 @@ PassInfo Pass::get_info(const PassType type, const bool include_albedo, const bo
case PASS_CATEGORY_DATA_END:
case PASS_CATEGORY_BAKE_END:
case PASS_NUM:
LOG(DFATAL) << "Unexpected pass type is used " << type;
LOG_DFATAL << "Unexpected pass type is used " << type;
pass_info.num_components = 0;
break;
case PASS_GUIDING_COLOR:

View File

@@ -374,11 +374,11 @@ void Scene::device_update(Device *device_, Progress &progress)
const size_t mem_used = util_guarded_get_mem_used();
const size_t mem_peak = util_guarded_get_mem_peak();
LOG(INFO) << "System memory statistics after full device sync:\n"
<< " Usage: " << string_human_readable_number(mem_used) << " ("
<< string_human_readable_size(mem_used) << ")\n"
<< " Peak: " << string_human_readable_number(mem_peak) << " ("
<< string_human_readable_size(mem_peak) << ")";
LOG_INFO << "System memory statistics after full device sync:\n"
<< " Usage: " << string_human_readable_number(mem_used) << " ("
<< string_human_readable_size(mem_used) << ")\n"
<< " Peak: " << string_human_readable_number(mem_peak) << " ("
<< string_human_readable_size(mem_peak) << ")";
}
}
@@ -603,24 +603,24 @@ bool Scene::update_camera_resolution(Progress &progress, int width, int height)
static void log_kernel_features(const uint features)
{
LOG(INFO) << "Requested features:";
LOG(INFO) << "Use BSDF " << string_from_bool(features & KERNEL_FEATURE_NODE_BSDF);
LOG(INFO) << "Use Emission " << string_from_bool(features & KERNEL_FEATURE_NODE_EMISSION);
LOG(INFO) << "Use Volume " << string_from_bool(features & KERNEL_FEATURE_NODE_VOLUME);
LOG(INFO) << "Use Bump " << string_from_bool(features & KERNEL_FEATURE_NODE_BUMP);
LOG(INFO) << "Use Voronoi " << string_from_bool(features & KERNEL_FEATURE_NODE_VORONOI_EXTRA);
LOG(INFO) << "Use Shader Raytrace " << string_from_bool(features & KERNEL_FEATURE_NODE_RAYTRACE);
LOG(INFO) << "Use MNEE " << string_from_bool(features & KERNEL_FEATURE_MNEE);
LOG(INFO) << "Use Transparent " << string_from_bool(features & KERNEL_FEATURE_TRANSPARENT);
LOG(INFO) << "Use Denoising " << string_from_bool(features & KERNEL_FEATURE_DENOISING);
LOG(INFO) << "Use Path Tracing " << string_from_bool(features & KERNEL_FEATURE_PATH_TRACING);
LOG(INFO) << "Use Hair " << string_from_bool(features & KERNEL_FEATURE_HAIR);
LOG(INFO) << "Use Pointclouds " << string_from_bool(features & KERNEL_FEATURE_POINTCLOUD);
LOG(INFO) << "Use Object Motion " << string_from_bool(features & KERNEL_FEATURE_OBJECT_MOTION);
LOG(INFO) << "Use Baking " << string_from_bool(features & KERNEL_FEATURE_BAKING);
LOG(INFO) << "Use Subsurface " << string_from_bool(features & KERNEL_FEATURE_SUBSURFACE);
LOG(INFO) << "Use Volume " << string_from_bool(features & KERNEL_FEATURE_VOLUME);
LOG(INFO) << "Use Shadow Catcher " << string_from_bool(features & KERNEL_FEATURE_SHADOW_CATCHER);
LOG_INFO << "Requested features:";
LOG_INFO << "Use BSDF " << string_from_bool(features & KERNEL_FEATURE_NODE_BSDF);
LOG_INFO << "Use Emission " << string_from_bool(features & KERNEL_FEATURE_NODE_EMISSION);
LOG_INFO << "Use Volume " << string_from_bool(features & KERNEL_FEATURE_NODE_VOLUME);
LOG_INFO << "Use Bump " << string_from_bool(features & KERNEL_FEATURE_NODE_BUMP);
LOG_INFO << "Use Voronoi " << string_from_bool(features & KERNEL_FEATURE_NODE_VORONOI_EXTRA);
LOG_INFO << "Use Shader Raytrace " << string_from_bool(features & KERNEL_FEATURE_NODE_RAYTRACE);
LOG_INFO << "Use MNEE " << string_from_bool(features & KERNEL_FEATURE_MNEE);
LOG_INFO << "Use Transparent " << string_from_bool(features & KERNEL_FEATURE_TRANSPARENT);
LOG_INFO << "Use Denoising " << string_from_bool(features & KERNEL_FEATURE_DENOISING);
LOG_INFO << "Use Path Tracing " << string_from_bool(features & KERNEL_FEATURE_PATH_TRACING);
LOG_INFO << "Use Hair " << string_from_bool(features & KERNEL_FEATURE_HAIR);
LOG_INFO << "Use Pointclouds " << string_from_bool(features & KERNEL_FEATURE_POINTCLOUD);
LOG_INFO << "Use Object Motion " << string_from_bool(features & KERNEL_FEATURE_OBJECT_MOTION);
LOG_INFO << "Use Baking " << string_from_bool(features & KERNEL_FEATURE_BAKING);
LOG_INFO << "Use Subsurface " << string_from_bool(features & KERNEL_FEATURE_SUBSURFACE);
LOG_INFO << "Use Volume " << string_from_bool(features & KERNEL_FEATURE_VOLUME);
LOG_INFO << "Use Shadow Catcher " << string_from_bool(features & KERNEL_FEATURE_SHADOW_CATCHER);
}
bool Scene::load_kernels(Progress &progress)
@@ -675,8 +675,8 @@ int Scene::get_max_closure_count()
* closures discarded due to mixing or low weights. We need to limit
* to MAX_CLOSURE as this is hardcoded in CPU/mega kernels, and it
* avoids excessive memory usage for split kernels. */
LOG(WARNING) << "Maximum number of closures exceeded: " << max_closure_global << " > "
<< MAX_CLOSURE;
LOG_WARNING << "Maximum number of closures exceeded: " << max_closure_global << " > "
<< MAX_CLOSURE;
max_closure_global = MAX_CLOSURE;
}
@@ -726,7 +726,7 @@ int Scene::get_volume_stack_size() const
volume_stack_size = min(volume_stack_size, MAX_VOLUME_STACK_SIZE);
LOG(WORK) << "Detected required volume stack size " << volume_stack_size;
LOG_WORK << "Detected required volume stack size " << volume_stack_size;
return volume_stack_size;
}

View File

@@ -979,7 +979,7 @@ void ShaderManager::init_xyz_transforms()
config = OCIO::GetCurrentConfig();
}
catch (OCIO::Exception &exception) {
LOG(WARNING) << "OCIO config error: " << exception.what();
LOG_WARNING << "OCIO config error: " << exception.what();
return;
}

View File

@@ -241,16 +241,16 @@ void ShaderGraph::connect(ShaderOutput *from, ShaderInput *to)
assert(from && to);
if (to->link) {
LOG(WARNING) << "Graph connect: input already connected.";
LOG_WARNING << "Graph connect: input already connected.";
return;
}
if (from->type() != to->type()) {
/* can't do automatic conversion from closure */
if (from->type() == SocketType::CLOSURE) {
LOG(WARNING) << "Shader graph connect: can only connect closure to closure ("
<< from->parent->name.c_str() << "." << from->name().c_str() << " to "
<< to->parent->name.c_str() << "." << to->name().c_str() << ")";
LOG_WARNING << "Shader graph connect: can only connect closure to closure ("
<< from->parent->name.c_str() << "." << from->name().c_str() << " to "
<< to->parent->name.c_str() << "." << to->name().c_str() << ")";
return;
}
@@ -659,7 +659,7 @@ void ShaderGraph::deduplicate_nodes()
}
if (num_deduplicated > 0) {
LOG(DEBUG) << "Deduplicated " << num_deduplicated << " nodes.";
LOG_DEBUG << "Deduplicated " << num_deduplicated << " nodes.";
}
}
@@ -713,19 +713,19 @@ void ShaderGraph::optimize_volume_output()
}
}
if (LOG_IS_ON(DEBUG)) {
if (LOG_IS_ON(LOG_LEVEL_DEBUG)) {
for (ShaderNode *node : nodes) {
if (node->type == AttributeNode::get_node_type() &&
static_cast<AttributeNode *>(node)->stochastic_sample)
{
LOG(DEBUG) << "Volume attribute node " << node->name << " uses stochastic sampling";
LOG_DEBUG << "Volume attribute node " << node->name << " uses stochastic sampling";
}
}
}
if (!has_valid_volume) {
/* We can remove the entire volume shader. */
LOG(DEBUG) << "Disconnect meaningless volume output.";
LOG_DEBUG << "Disconnect meaningless volume output.";
disconnect(volume_in->link);
}
}
@@ -742,7 +742,7 @@ void ShaderGraph::break_cycles(ShaderNode *node, vector<bool> &visited, vector<b
if (on_stack[depnode->id]) {
/* break cycle */
disconnect(input);
LOG(WARNING) << "Shader graph: detected cycle in graph, connection removed.";
LOG_WARNING << "Shader graph: detected cycle in graph, connection removed.";
}
else if (!visited[depnode->id]) {
/* visit dependencies */
@@ -1221,7 +1221,7 @@ void ShaderGraph::dump_graph(const char *filename)
FILE *fd = fopen(filename, "w");
if (fd == nullptr) {
LOG(ERROR) << "Error opening file for dumping the graph: " << filename;
LOG_ERROR << "Error opening file for dumping the graph: " << filename;
return;
}

View File

@@ -43,9 +43,9 @@ void SVMShaderManager::device_update_shader(Scene *scene,
compiler.background = (shader == scene->background->get_shader(scene));
compiler.compile(shader, *svm_nodes, 0, &summary);
LOG(WORK) << "Compilation summary:\n"
<< "Shader name: " << shader->name << "\n"
<< summary.full_report();
LOG_WORK << "Compilation summary:\n"
<< "Shader name: " << shader->name << "\n"
<< summary.full_report();
}
void SVMShaderManager::device_update_specific(Device *device,
@@ -65,7 +65,7 @@ void SVMShaderManager::device_update_specific(Device *device,
const int num_shaders = scene->shaders.size();
LOG(INFO) << "Total " << num_shaders << " shaders.";
LOG_INFO << "Total " << num_shaders << " shaders.";
const double start_time = time_dt();
@@ -136,8 +136,8 @@ void SVMShaderManager::device_update_specific(Device *device,
update_flags = UPDATE_NONE;
LOG(INFO) << "Shader manager updated " << num_shaders << " shaders in " << time_dt() - start_time
<< " seconds.";
LOG_INFO << "Shader manager updated " << num_shaders << " shaders in " << time_dt() - start_time
<< " seconds.";
}
void SVMShaderManager::device_free(Device *device, DeviceScene *dscene, Scene *scene)
@@ -217,8 +217,8 @@ int SVMCompiler::stack_find_offset(const int size)
if (!compile_failed) {
compile_failed = true;
LOG(ERROR) << "Shader graph: out of SVM stack space, shader \"" << current_shader->name
<< "\" too big.";
LOG_ERROR << "Shader graph: out of SVM stack space, shader \"" << current_shader->name
<< "\" too big.";
}
return 0;

View File

@@ -36,7 +36,7 @@ void LookupTables::device_update(Device * /*unused*/, DeviceScene *dscene, Scene
}
});
LOG(INFO) << "Total " << lookup_tables.size() << " lookup tables.";
LOG_INFO << "Total " << lookup_tables.size() << " lookup tables.";
if (!lookup_tables.empty()) {
dscene->lookup_table.copy_to_device();

View File

@@ -544,7 +544,7 @@ void GeometryManager::create_volume_mesh(const Scene *scene, Volume *volume, Pro
/* If nothing to build, early out. */
if (builder.empty_grid()) {
LOG(WORK) << "Memory usage volume mesh: 0 Mb. (empty grid)";
LOG_WORK << "Memory usage volume mesh: 0 Mb. (empty grid)";
return;
}
@@ -575,10 +575,9 @@ void GeometryManager::create_volume_mesh(const Scene *scene, Volume *volume, Pro
}
/* Print stats. */
LOG(WORK) << "Memory usage volume mesh: "
<< (vertices.size() * sizeof(float3) + indices.size() * sizeof(int)) /
(1024.0 * 1024.0)
<< "Mb.";
LOG_WORK << "Memory usage volume mesh: "
<< (vertices.size() * sizeof(float3) + indices.size() * sizeof(int)) / (1024.0 * 1024.0)
<< "Mb.";
#else
(void)scene;
#endif /* defined(WITH_OPENVDB) && defined(WITH_NANOVDB) */

View File

@@ -165,12 +165,12 @@ void Session::run_main_render_loop()
RenderWork render_work = run_update_for_next_iteration();
if (!render_work) {
if (LOG_IS_ON(INFO)) {
if (LOG_IS_ON(LOG_LEVEL_INFO)) {
double total_time;
double render_time;
progress.get_time(total_time, render_time);
LOG(INFO) << "Rendering in main loop is done in " << render_time << " seconds.";
LOG(INFO) << path_trace_->full_report();
LOG_INFO << "Rendering in main loop is done in " << render_time << " seconds.";
LOG_INFO << path_trace_->full_report();
}
if (params.background) {

View File

@@ -108,7 +108,7 @@ static bool node_socket_to_image_spec_atttributes(ImageSpec *image_spec,
/* Validate that the node is consistent with the node type definition. */
const NodeEnum &enum_values = *socket.enum_values;
if (!enum_values.exists(value)) {
LOG(DFATAL) << "Node enum contains invalid value " << value;
LOG_DFATAL << "Node enum contains invalid value " << value;
return false;
}
@@ -134,7 +134,7 @@ static bool node_socket_to_image_spec_atttributes(ImageSpec *image_spec,
return true;
default:
LOG(DFATAL) << "Unhandled socket type " << socket.type << ", should never happen.";
LOG_DFATAL << "Unhandled socket type " << socket.type << ", should never happen.";
return false;
}
}
@@ -154,7 +154,7 @@ static bool node_socket_from_image_spec_atttributes(Node *node,
/* Validate that the node is consistent with the node type definition. */
const NodeEnum &enum_values = *socket.enum_values;
if (!enum_values.exists(value)) {
LOG(ERROR) << "Invalid enumerator value " << value;
LOG_ERROR << "Invalid enumerator value " << value;
return false;
}
@@ -181,7 +181,7 @@ static bool node_socket_from_image_spec_atttributes(Node *node,
return true;
default:
LOG(DFATAL) << "Unhandled socket type " << socket.type << ", should never happen.";
LOG_DFATAL << "Unhandled socket type " << socket.type << ", should never happen.";
return false;
}
}
@@ -247,7 +247,7 @@ static bool buffer_params_from_image_spec_atttributes(BufferParams *buffer_param
const int num_passes = image_spec.get_int_attribute(ATTR_PASSES_COUNT, 0);
if (num_passes == 0) {
LOG(ERROR) << "Missing passes count attribute.";
LOG_ERROR << "Missing passes count attribute.";
return false;
}
@@ -334,7 +334,7 @@ int TileManager::compute_render_tile_size(const int suggested_tile_size) const
void TileManager::reset_scheduling(const BufferParams &params, const int2 tile_size)
{
LOG(WORK) << "Using tile size of " << tile_size;
LOG_WORK << "Using tile size of " << tile_size;
close_tile_output();
@@ -450,24 +450,24 @@ bool TileManager::open_tile_output()
write_state_.tile_out = ImageOutput::create(write_state_.filename);
if (!write_state_.tile_out) {
LOG(ERROR) << "Error creating image output for " << write_state_.filename;
LOG_ERROR << "Error creating image output for " << write_state_.filename;
return false;
}
if (!write_state_.tile_out->supports("tiles")) {
LOG(ERROR) << "Progress tile file format does not support tiling.";
LOG_ERROR << "Progress tile file format does not support tiling.";
return false;
}
if (!write_state_.tile_out->open(write_state_.filename, write_state_.image_spec)) {
LOG(ERROR) << "Error opening tile file: " << write_state_.tile_out->geterror();
LOG_ERROR << "Error opening tile file: " << write_state_.tile_out->geterror();
write_state_.tile_out = nullptr;
return false;
}
write_state_.num_tiles_written = 0;
LOG(WORK) << "Opened tile file " << write_state_.filename;
LOG_WORK << "Opened tile file " << write_state_.filename;
return true;
}
@@ -482,11 +482,11 @@ bool TileManager::close_tile_output()
write_state_.tile_out = nullptr;
if (!success) {
LOG(ERROR) << "Error closing tile file.";
LOG_ERROR << "Error closing tile file.";
return false;
}
LOG(WORK) << "Tile output is closed.";
LOG_WORK << "Tile output is closed.";
return true;
}
@@ -538,7 +538,7 @@ bool TileManager::write_tile(const RenderBuffers &tile_buffers)
pixels = pixel_storage.data();
}
LOG(WORK) << "Write tile at " << tile_x << ", " << tile_y;
LOG_WORK << "Write tile at " << tile_x << ", " << tile_y;
/* The image tile sizes in the OpenEXR file are different from the size of our big tiles. The
* write_tiles() method expects a contiguous image region that will be split into tiles
@@ -564,13 +564,13 @@ bool TileManager::write_tile(const RenderBuffers &tile_buffers)
ystride,
zstride))
{
LOG(ERROR) << "Error writing tile " << write_state_.tile_out->geterror();
LOG_ERROR << "Error writing tile " << write_state_.tile_out->geterror();
return false;
}
++write_state_.num_tiles_written;
LOG(WORK) << "Tile written in " << time_dt() - time_start << " seconds.";
LOG_WORK << "Tile written in " << time_dt() - time_start << " seconds.";
return true;
}
@@ -595,7 +595,7 @@ void TileManager::finish_write_tiles()
const int tile_x = tile.x + tile.window_x;
const int tile_y = tile.y + tile.window_y;
LOG(WORK) << "Write dummy tile at " << tile_x << ", " << tile_y;
LOG_WORK << "Write dummy tile at " << tile_x << ", " << tile_y;
write_state_.tile_out->write_tiles(tile_x,
tile_x + tile.window_width,
@@ -614,8 +614,8 @@ void TileManager::finish_write_tiles()
full_buffer_written_cb(write_state_.filename);
}
LOG(WORK) << "Tile file size is "
<< string_human_readable_number(path_file_size(write_state_.filename)) << " bytes.";
LOG_WORK << "Tile file size is "
<< string_human_readable_number(path_file_size(write_state_.filename)) << " bytes.";
/* Advance the counter upon explicit finish of the file.
* Makes it possible to re-use tile manager for another scene, and avoids unnecessary increments
@@ -631,7 +631,7 @@ bool TileManager::read_full_buffer_from_disk(const string_view filename,
{
unique_ptr<ImageInput> in(ImageInput::open(filename));
if (!in) {
LOG(ERROR) << "Error opening tile file " << filename;
LOG_ERROR << "Error opening tile file " << filename;
return false;
}
@@ -649,12 +649,12 @@ bool TileManager::read_full_buffer_from_disk(const string_view filename,
const int num_channels = in->spec().nchannels;
if (!in->read_image(0, 0, 0, num_channels, TypeDesc::FLOAT, buffers->buffer.data())) {
LOG(ERROR) << "Error reading pixels from the tile file " << in->geterror();
LOG_ERROR << "Error reading pixels from the tile file " << in->geterror();
return false;
}
if (!in->close()) {
LOG(ERROR) << "Error closing tile file " << in->geterror();
LOG_ERROR << "Error closing tile file " << in->geterror();
return false;
}

View File

@@ -244,7 +244,7 @@ void TopologyRefinerFactory<OsdMesh>::reportInvalidTopology(TopologyError /*err_
OsdMesh const &osd_mesh)
{
const Mesh &mesh = osd_mesh.mesh;
LOG(WARNING) << "Invalid subdivision topology for '" << mesh.name.c_str() << "': " << msg;
LOG_WARNING << "Invalid subdivision topology for '" << mesh.name.c_str() << "': " << msg;
}
} // namespace OpenSubdiv::OPENSUBDIV_VERSION::Far

View File

@@ -177,7 +177,7 @@ class ScopedMockLog {
return;
}
}
LOG(FATAL) << "Message \"" << pattern << "\" not found";
LOG_FATAL << "Message \"" << pattern << "\" not found";
}
/* Check messages do not contain this pattern. */
@@ -185,7 +185,7 @@ class ScopedMockLog {
{
for (const string &msg : messages) {
if (msg.find(pattern) == string::npos) {
LOG(FATAL) << "Invalid message \"" << pattern << "\" found";
LOG_FATAL << "Invalid message \"" << pattern << "\" found";
return;
}
}
@@ -228,14 +228,14 @@ class RenderGraph : public testing::Test {
/* Initialize logging after the creation of the essential resources. This way the logging
* mock sink does not warn about uninteresting messages which happens prior to the setup of
* the actual mock sinks. */
log_level_set(DEBUG);
log_level_set(LOG_LEVEL_DEBUG);
}
void TearDown() override
{
/* Effectively disable logging, so that the next test suit starts in an environment which is
* not logging by default. */
log_level_set(FATAL);
log_level_set(LOG_LEVEL_FATAL);
scene.reset();
device_cpu.reset();

View File

@@ -22,7 +22,7 @@ void DebugFlags::CPU::reset()
do { \
flag = (getenv(env) == nullptr); \
if (!flag) { \
LOG(INFO) << "Disabling " << STRINGIFY(flag) << " instruction set."; \
LOG_INFO << "Disabling " << STRINGIFY(flag) << " instruction set."; \
} \
} while (0)

View File

@@ -152,7 +152,7 @@ size_t util_guarded_get_mem_peak();
(func)(__VA_ARGS__); \
} \
catch (std::bad_alloc &) { \
LOG(ERROR) << "Out of memory"; \
LOG_ERROR << "Out of memory"; \
fflush(stderr); \
(progress)->set_error("Out of memory"); \
} \

View File

@@ -14,32 +14,32 @@
CCL_NAMESPACE_BEGIN
LogLevel LOG_LEVEL = INFO_IMPORTANT;
LogLevel LOG_LEVEL = LOG_LEVEL_INFO_IMPORTANT;
static LogFunction LOG_FUNCTION;
static double LOG_START_TIME = time_dt();
const char *log_level_to_string(const LogLevel level)
{
switch (level) {
case FATAL:
case DFATAL:
case LOG_LEVEL_FATAL:
case LOG_LEVEL_DFATAL:
return "FATAL";
case ERROR:
case DERROR:
case LOG_LEVEL_ERROR:
case LOG_LEVEL_DERROR:
return "ERROR";
case WARNING:
case DWARNING:
case LOG_LEVEL_WARNING:
case LOG_LEVEL_DWARNING:
return "WARNING";
case INFO_IMPORTANT:
case INFO:
case LOG_LEVEL_INFO_IMPORTANT:
case LOG_LEVEL_INFO:
return "INFO";
case WORK:
case LOG_LEVEL_WORK:
return "WORK";
case STATS:
case LOG_LEVEL_STATS:
return "STATS";
case DEBUG:
case LOG_LEVEL_DEBUG:
return "DEBUG";
case UNKNOWN:
case LOG_LEVEL_UNKNOWN:
return "UNKNOWN";
}
@@ -51,27 +51,27 @@ LogLevel log_string_to_level(const string &str)
const std::string str_lower = string_to_lower(str);
if (str_lower == "fatal") {
return FATAL;
return LOG_LEVEL_FATAL;
}
if (str_lower == "error") {
return ERROR;
return LOG_LEVEL_ERROR;
}
if (str_lower == "warning") {
return WARNING;
return LOG_LEVEL_WARNING;
}
if (str_lower == "info") {
return INFO;
return LOG_LEVEL_INFO;
}
if (str_lower == "work") {
return WORK;
return LOG_LEVEL_WORK;
}
if (str_lower == "stats") {
return STATS;
return LOG_LEVEL_STATS;
}
if (str_lower == "debug") {
return DEBUG;
return LOG_LEVEL_DEBUG;
}
return UNKNOWN;
return LOG_LEVEL_UNKNOWN;
}
void log_init(const LogFunction func)
@@ -88,8 +88,8 @@ void log_level_set(const LogLevel level)
void log_level_set(const std::string &level)
{
const LogLevel new_level = log_string_to_level(level);
if (new_level == UNKNOWN) {
LOG(ERROR) << "Unknown log level specified: " << level;
if (new_level == LOG_LEVEL_UNKNOWN) {
LOG_ERROR << "Unknown log level specified: " << level;
return;
}
LOG_LEVEL = new_level;
@@ -97,7 +97,7 @@ void log_level_set(const std::string &level)
static void log_default(const LogLevel level, const std::string &time_str, const char *msg)
{
if (level >= INFO) {
if (level >= LOG_LEVEL_INFO) {
printf("%s | %s\n", time_str.c_str(), msg);
}
else {
@@ -128,7 +128,7 @@ void _log_message(const LogLevel level, const char *file_line, const char *func,
log_default(level, time_str, line.c_str());
}
if (level == FATAL || level == DFATAL) {
if (level == LOG_LEVEL_FATAL || level == LOG_LEVEL_DFATAL) {
abort();
}
}

View File

@@ -15,18 +15,18 @@ CCL_NAMESPACE_BEGIN
/* Log Levels */
enum LogLevel {
FATAL = 0, /* Fatal error, application will abort */
DFATAL = 1, /* Fatal error in debug build only */
ERROR = 2, /* Error */
DERROR = 3, /* Error in debug build only */
WARNING = 4, /* Warning */
DWARNING = 5, /* Warning in debug build only */
INFO_IMPORTANT = 6, /* Important info that is printed by default */
INFO = 7, /* Info about devices, scene contents and features used. */
WORK = 8, /* Work being performed and timing/memory stats about that work. */
STATS = 9, /* Detailed device timing stats. */
DEBUG = 10, /* Verbose debug messages. */
UNKNOWN = -1,
LOG_LEVEL_FATAL = 0, /* Fatal error, application will abort */
LOG_LEVEL_DFATAL = 1, /* Fatal error in debug build only */
LOG_LEVEL_ERROR = 2, /* Error */
LOG_LEVEL_DERROR = 3, /* Error in debug build only */
LOG_LEVEL_WARNING = 4, /* Warning */
LOG_LEVEL_DWARNING = 5, /* Warning in debug build only */
LOG_LEVEL_INFO_IMPORTANT = 6, /* Important info that is printed by default */
LOG_LEVEL_INFO = 7, /* Info about devices, scene contents and features used. */
LOG_LEVEL_WORK = 8, /* Work being performed and timing/memory stats about that work. */
LOG_LEVEL_STATS = 9, /* Detailed device timing stats. */
LOG_LEVEL_DEBUG = 10, /* Verbose debug messages. */
LOG_LEVEL_UNKNOWN = -1,
};
const char *log_level_to_string(const LogLevel level);
@@ -82,7 +82,8 @@ extern LogLevel LOG_LEVEL;
/* Macro to ensure lazy evaluation of both condition and logging text. */
#ifdef NDEBUG
# define LOG_IF(level, condition) \
if constexpr (level != DFATAL && level != DERROR && level != DWARNING) \
if constexpr (level != LOG_LEVEL_DFATAL && level != LOG_LEVEL_DERROR && \
level != LOG_LEVEL_DWARNING) \
if (UNLIKELY(level <= LOG_LEVEL && (condition))) \
LogMessage(level, __FILE__ ":" LOG_STRINGIFY(__LINE__), __func__).stream()
#else
@@ -93,17 +94,29 @@ extern LogLevel LOG_LEVEL;
/* Log a message at the desired level.
*
* Example: LOG(INFO) << "This is a log message"; */
* Example: LOG_INFO << "This is a log message"; */
#define LOG(level) LOG_IF(level, true)
#define LOG_FATAL LOG(LOG_LEVEL_FATAL)
#define LOG_DFATAL LOG(LOG_LEVEL_DFATAL)
#define LOG_ERROR LOG(LOG_LEVEL_ERROR)
#define LOG_DERROR LOG(LOG_LEVEL_DERROR)
#define LOG_WARNING LOG(LOG_LEVEL_WARNING)
#define LOG_DWARNING LOG(LOG_LEVEL_DWARNING)
#define LOG_INFO_IMPORTANT LOG(LOG_LEVEL_INFO_IMPORTANT)
#define LOG_INFO LOG(LOG_LEVEL_INFO)
#define LOG_WORK LOG(LOG_LEVEL_WORK)
#define LOG_STATS LOG(LOG_LEVEL_STATS)
#define LOG_DEBUG LOG(LOG_LEVEL_DEBUG)
/* Check if logging is enabled, to avoid doing expensive work to compute
* the logging message. Note that any work to the right of LOG(level) will
* not be evaulated if logging for that level is disabled. */
#define LOG_IS_ON(level) ((level) <= LOG_LEVEL)
/* Check if expression and conditions hold true, failure will exit the program. */
#define CHECK(expression) LOG_IF(FATAL, !(expression))
#define CHECK_OP(op, a, b) LOG_IF(FATAL, !((a)op(b)))
#define CHECK(expression) LOG_IF(LOG_LEVEL_FATAL, !(expression))
#define CHECK_OP(op, a, b) LOG_IF(LOG_LEVEL_FATAL, !((a)op(b)))
#define CHECK_GE(a, b) CHECK_OP(>=, a, b)
#define CHECK_NE(a, b) CHECK_OP(!=, a, b)
#define CHECK_EQ(a, b) CHECK_OP(==, a, b)
@@ -116,17 +129,18 @@ extern LogLevel LOG_LEVEL;
template<typename T> T DCheckNotNull(T &&t, const char *expression)
{
if (t == nullptr) {
LOG(FATAL) << "Failed " << expression << "is not null";
LOG_FATAL << "Failed " << expression << "is not null";
}
return std::forward<T>(t);
}
# define DCHECK(expression) LOG_IF(DFATAL, !(expression)) << LOG_STRINGIFY(expression) << " "
# define DCHECK(expression) \
LOG_IF(LOG_LEVEL_DFATAL, !(expression)) << LOG_STRINGIFY(expression) << " "
# define DCHECK_NOTNULL(expression) DCheckNotNull(expression, LOG_STRINGIFY(expression))
# define DCHECK_OP(op, a, b) \
LOG_IF(DFATAL, !((a)op(b))) << "Failed " << LOG_STRINGIFY(a) << " (" << a << ") " \
<< LOG_STRINGIFY(op) << " " << LOG_STRINGIFY(b) << " (" << b \
<< ") "
LOG_IF(LOG_LEVEL_DFATAL, !((a)op(b))) \
<< "Failed " << LOG_STRINGIFY(a) << " (" << a << ") " << LOG_STRINGIFY(op) << " " \
<< LOG_STRINGIFY(b) << " (" << b << ") "
# define DCHECK_GE(a, b) DCHECK_OP(>=, a, b)
# define DCHECK_NE(a, b) DCHECK_OP(!=, a, b)
# define DCHECK_EQ(a, b) DCHECK_OP(==, a, b)

View File

@@ -312,7 +312,7 @@ bool MD5Hash::append_file(const string &filepath)
FILE *f = path_fopen(filepath, "rb");
if (!f) {
LOG(ERROR) << "MD5: failed to open file " << filepath;
LOG_ERROR << "MD5: failed to open file " << filepath;
return false;
}

View File

@@ -272,10 +272,10 @@ struct ToNanoOp {
# endif
}
catch (const std::exception &e) {
LOG(WARNING) << "Error converting OpenVDB to NanoVDB grid: " << e.what();
LOG_WARNING << "Error converting OpenVDB to NanoVDB grid: " << e.what();
}
catch (...) {
LOG(WARNING) << "Error converting OpenVDB to NanoVDB grid: Unknown error";
LOG_WARNING << "Error converting OpenVDB to NanoVDB grid: Unknown error";
}
return true;
}

View File

@@ -68,7 +68,7 @@ void TaskScheduler::init(const int num_threads)
}
if (num_threads > 0) {
/* Automatic number of threads. */
LOG(INFO) << "Overriding number of TBB threads to " << num_threads << ".";
LOG_INFO << "Overriding number of TBB threads to " << num_threads << ".";
global_control = make_unique<tbb::global_control>(tbb::global_control::max_allowed_parallelism,
num_threads);
active_num_threads = num_threads;