Tomato Cycles: revert some changes of r49835

Apparently for viewport rendering on GPU tile size of 1024 gives
much better performance than using single tile. Not sure why it
doesn't work for background rendering, need to be investigated
further.

Meanwhile use old debug value of 1024 for tile size.
This commit is contained in:
Sergey Sharybin
2012-08-13 14:51:39 +00:00
parent 41b0ec6c94
commit 5c29f5bfcf
2 changed files with 24 additions and 3 deletions

View File

@@ -235,6 +235,13 @@ class CyclesRenderSettings(bpy.types.PropertyGroup):
default=0.0,
)
cls.debug_tile_size = IntProperty(
name="Tile Size",
description="",
min=1, max=4096,
default=1024,
)
cls.resolution_divider = IntProperty(
name="Resolution Divider",
description="Start viewport rendering with lower resolution which would be real resolution divided by two in power of this value",

View File

@@ -344,10 +344,24 @@ SessionParams BlenderSync::get_session_params(BL::RenderEngine b_engine, BL::Use
}
/* tiles */
int tile_x = b_engine.tile_x();
int tile_y = b_engine.tile_y();
if(params.device.type != DEVICE_CPU && !background) {
printf("%d\n", get_int(cscene, "debug_tile_size"));
/* currently GPU could be much slower than CPU when using tiles,
* still need to be investigated, but meanwhile make it possible
* to work in viewport smoothly
*/
int debug_tile_size = get_int(cscene, "debug_tile_size");
params.tile_size = make_int2(debug_tile_size, debug_tile_size);
}
else {
int tile_x = b_engine.tile_x();
int tile_y = b_engine.tile_y();
params.tile_size = make_int2(tile_x, tile_y);
}
params.tile_size = make_int2(tile_x, tile_y);
params.resolution = 1 << get_int(cscene, "resolution_divider");
/* other parameters */