Cleanup: consistent TODO/FIXME formatting for names

Following the most widely used convention for including todo's in
the code, that is: `TODO(name):`, `FIXME(name)` ... etc.
This commit is contained in:
Campbell Barton
2020-09-19 14:32:41 +10:00
parent a78130c610
commit 17a2820da8
81 changed files with 126 additions and 126 deletions

View File

@@ -35,7 +35,7 @@ import os.path, shutil, math, sys, gc, multiprocessing, platform, time\n\
withMPBake = False # Bake files asynchronously\n\
withMPSave = False # Save files asynchronously\n\
isWindows = platform.system() != 'Darwin' and platform.system() != 'Linux'\n\
# TODO (sebbas): Use this to simulate Windows multiprocessing (has default mode spawn)\n\
# TODO(sebbas): Use this to simulate Windows multiprocessing (has default mode spawn)\n\
#try:\n\
# multiprocessing.set_start_method('spawn')\n\
#except:\n\
@@ -583,7 +583,7 @@ def bake_mesh_process_$ID$(framenr, format_data, format_mesh, path_mesh):\n\
sm$ID$.timestep = frameLength_s$ID$ # no adaptive timestep for mesh\n\
\n\
#if using_smoke_s$ID$:\n\
# TODO (sebbas): Future update could include smoke mesh (vortex sheets)\n\
# TODO(sebbas): Future update could include smoke mesh (vortex sheets)\n\
if using_liquid_s$ID$:\n\
liquid_step_mesh_$ID$()\n\
liquid_save_mesh_$ID$(path_mesh, framenr, format_mesh)\n\
@@ -607,7 +607,7 @@ def bake_particles_process_$ID$(framenr, format_particles, path_particles, resum
sp$ID$.timestep = frameLength_s$ID$ # no adaptive timestep for particles\n\
\n\
#if using_smoke_s$ID$:\n\
# TODO (sebbas): Future update could include smoke particles (e.g. fire sparks)\n\
# TODO(sebbas): Future update could include smoke particles (e.g. fire sparks)\n\
if using_liquid_s$ID$:\n\
liquid_step_particles_$ID$()\n\
liquid_save_particles_$ID$(path_particles, framenr, format_particles, resumable)\n\

View File

@@ -205,7 +205,7 @@ def liquid_adaptive_step_$ID$(framenr):\n\
setObstacleFlags(flags=flags_s$ID$, phiObs=phiObs_s$ID$, phiOut=phiOut_s$ID$, fractions=fractions_s$ID$, phiIn=phiIn_s$ID$)\n\
\n\
if using_obstacle_s$ID$:\n\
# TODO (sebbas): Enable flags check again, currently produces unstable particle behavior\n\
# TODO(sebbas): Enable flags check again, currently produces unstable particle behavior\n\
phi_s$ID$.subtract(o=phiObsIn_s$ID$) #, flags=flags_s$ID$, subtractType=FlagObstacle)\n\
\n\
# add initial velocity: set invel as source grid to ensure const vels in inflow region, sampling makes use of this\n\

View File

@@ -558,7 +558,7 @@ bool OCIOImpl::setupGLSLDraw(OCIO_GLSLDrawState **state_r,
/* Bind UBO. */
GPU_uniformbuf_bind(shader_curvemap->buffer, shader->ubo_bind);
/* TODO(fclem) remove remains of IMM. */
/* TODO(fclem): remove remains of IMM. */
immBindShader(shader->shader);
/* Bind Shader and set uniforms. */

View File

@@ -1018,7 +1018,7 @@ static void obstacles_from_mesh(Object *coll_ob,
looptri = BKE_mesh_runtime_looptri_ensure(me);
numverts = me->totvert;
/* TODO (sebbas): Make initialization of vertex velocities optional? */
/* TODO(sebbas): Make initialization of vertex velocities optional? */
{
vert_vel = MEM_callocN(sizeof(float[3]) * numverts, "manta_obs_velocity");
@@ -1269,7 +1269,7 @@ static void compute_obstaclesemission(Scene *scene,
# endif
/* Update frame time, this is considering current subframe fraction
* BLI_mutex_lock() called in manta_step(), so safe to update subframe here
* TODO (sebbas): Using BKE_scene_frame_get(scene) instead of new DEG_get_ctime(depsgraph)
* TODO(sebbas): Using BKE_scene_frame_get(scene) instead of new DEG_get_ctime(depsgraph)
* as subframes don't work with the latter yet. */
BKE_object_modifier_update_subframe(
depsgraph, scene, effecobj, true, 5, BKE_scene_frame_get(scene), eModifierType_Fluid);
@@ -2729,7 +2729,7 @@ static bool escape_flowsobject(Object *flowobj,
return true;
}
/* Optimization: Static liquid flow objects don't need emission after first frame.
* TODO (sebbas): Also do not use static mode if initial velocities are enabled. */
* TODO(sebbas): Also do not use static mode if initial velocities are enabled. */
if (liquid_flow && is_static && !is_first_frame && !is_resume && !use_velocity) {
return true;
}
@@ -2811,7 +2811,7 @@ static void compute_flowsemission(Scene *scene,
# endif
/* Update frame time, this is considering current subframe fraction
* BLI_mutex_lock() called in manta_step(), so safe to update subframe here
* TODO (sebbas): Using BKE_scene_frame_get(scene) instead of new DEG_get_ctime(depsgraph)
* TODO(sebbas): Using BKE_scene_frame_get(scene) instead of new DEG_get_ctime(depsgraph)
* as subframes don't work with the latter yet. */
BKE_object_modifier_update_subframe(
depsgraph, scene, flowobj, true, 5, BKE_scene_frame_get(scene), eModifierType_Fluid);
@@ -3788,7 +3788,7 @@ static void BKE_fluid_modifier_processDomain(FluidModifierData *fmd,
MEM_freeN(objs);
}
/* TODO (sebbas): Cache reset for when flow / effector object need update flag is set. */
/* TODO(sebbas): Cache reset for when flow / effector object need update flag is set. */
# if 0
/* If the just updated flags now carry the 'outdated' flag, reset the cache here!
* Plus sanity check: Do not clear cache on file load. */

View File

@@ -4587,7 +4587,7 @@ bool BKE_object_modifier_gpencil_use_time(Object *ob, GpencilModifierData *md)
}
/* Check whether modifier is animated. */
/* TODO (Aligorith): this should be handled as part of build_animdata() */
/* TODO(Aligorith): this should be handled as part of build_animdata() */
if (ob->adt) {
AnimData *adt = ob->adt;
FCurve *fcu;
@@ -4622,7 +4622,7 @@ bool BKE_object_shaderfx_use_time(Object *ob, ShaderFxData *fx)
}
/* Check whether effect is animated. */
/* TODO (Aligorith): this should be handled as part of build_animdata() */
/* TODO(Aligorith): this should be handled as part of build_animdata() */
if (ob->adt) {
AnimData *adt = ob->adt;
FCurve *fcu;

View File

@@ -1983,7 +1983,7 @@ static bool foreach_object_modifier_ptcache(Object *object,
}
}
else if (md->type == eModifierType_Simulation) {
/* TODO(jacques) */
/* TODO(jacques): */
}
}
return true;

View File

@@ -3873,7 +3873,7 @@ static ImBuf *seq_render_preprocess_ibuf(const SeqRenderData *context,
if (use_preprocess) {
float cost = seq_estimate_render_cost_end(context->scene, begin);
/* TODO (Richard): It should be possible to store in cache if image is proxy,
/* TODO(Richard): It should be possible to store in cache if image is proxy,
* but it adds quite a bit of complexity. Since proxies are fast to read, I would
* rather simplify existing code a bit. */
if (!is_proxy_image) {

View File

@@ -1492,7 +1492,7 @@ void BKE_tracking_stabilization_data_to_mat4(int buffer_width,
* applied after rotation/scale anyway. Thus effectively the image gets
* rotated around the desired pivot point
*/
/* TODO(sergey) pivot shouldn't be calculated here, rather received
/* TODO(sergey): pivot shouldn't be calculated here, rather received
* as a parameter.
*/
float pivot[2];

View File

@@ -123,7 +123,7 @@ enum class NodeType {
/* TODO(sergey); Verify that we really need this. */
CACHE,
/* Batch Cache Component.
* TODO (dfelinto/sergey): rename to make it more generic. */
* TODO(dfelinto/sergey): rename to make it more generic. */
BATCH_CACHE,
/* Duplication system. Used to force duplicated objects visible when
* when duplicator is visible. */

View File

@@ -159,7 +159,7 @@ static void basic_cache_populate(void *vedata, Object *ob)
{
BASIC_StorageList *stl = ((BASIC_Data *)vedata)->stl;
/* TODO(fclem) fix selection of smoke domains. */
/* TODO(fclem): fix selection of smoke domains. */
if (!DRW_object_is_renderable(ob) || (ob->dt < OB_SOLID)) {
return;

View File

@@ -377,7 +377,7 @@ void EEVEE_lightprobes_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedat
DRW_shgroup_uniform_vec3(grp, "screen_vecs", DRW_viewport_screenvecs_get(), 2);
DRW_shgroup_uniform_float_copy(
grp, "sphere_size", scene_eval->eevee.gi_cubemap_draw_size * 0.5f);
/* TODO (fclem) get rid of those UBO. */
/* TODO(fclem): get rid of those UBO. */
DRW_shgroup_uniform_block(grp, "planar_block", sldata->planar_ubo);
DRW_shgroup_uniform_block(grp, "grid_block", sldata->grid_ubo);
DRW_shgroup_uniform_block(grp, "renderpass_block", sldata->renderpass_ubo.combined);
@@ -402,7 +402,7 @@ void EEVEE_lightprobes_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedat
DRW_shgroup_uniform_texture_ref(shgrp, "irradianceGrid", &lcache->grid_tx.tex);
DRW_shgroup_uniform_float_copy(
shgrp, "sphere_size", scene_eval->eevee.gi_irradiance_draw_size * 0.5f);
/* TODO (fclem) get rid of those UBO. */
/* TODO(fclem): get rid of those UBO. */
DRW_shgroup_uniform_block(shgrp, "probe_block", sldata->probe_ubo);
DRW_shgroup_uniform_block(shgrp, "planar_block", sldata->planar_ubo);
DRW_shgroup_uniform_block(shgrp, "grid_block", sldata->grid_ubo);

View File

@@ -909,7 +909,7 @@ static void material_renderpass_init(EEVEE_FramebufferList *fbl,
/* Clear texture. */
if (do_clear) {
const float clear[4] = {0.0f, 0.0f, 0.0f, 0.0f};
/* TODO(fclem) replace by GPU_texture_clear once it is fast. */
/* TODO(fclem): replace by GPU_texture_clear once it is fast. */
GPU_framebuffer_texture_attach(fbl->material_accum_fb, *output_tx, 0, 0);
GPU_framebuffer_bind(fbl->material_accum_fb);
GPU_framebuffer_clear_color(fbl->material_accum_fb, clear);

View File

@@ -413,7 +413,7 @@ void EEVEE_motion_blur_cache_finish(EEVEE_Data *vedata)
switch (mb_geom->type) {
case EEVEE_MOTION_DATA_HAIR:
if (mb_step == MB_CURR) {
/* TODO(fclem) Check if vertex count mismatch. */
/* TODO(fclem): Check if vertex count mismatch. */
mb_hair->use_deform = true;
}
else {

View File

@@ -709,7 +709,7 @@ typedef struct EEVEE_EffectsInfo {
int motion_blur_max; /* Maximum distance in pixels a motion blured pixel can cover. */
float motion_blur_near_far[2]; /* Camera near/far clip distances (positive). */
bool cam_params_init;
/* TODO(fclem) Only used in render mode for now.
/* TODO(fclem): Only used in render mode for now.
* This is because we are missing a per scene persistent place to hold this. */
struct EEVEE_MotionBlurData motion_blur;
/* Velocity Pass */

View File

@@ -145,7 +145,7 @@ void EEVEE_shadows_caster_register(EEVEE_ViewLayerData *sldata, Object *ob)
if (ob->base_flag & BASE_FROM_DUPLI) {
/* Duplis will always refresh the shadowmaps as if they were deleted each frame. */
/* TODO(fclem) fix this. */
/* TODO(fclem): fix this. */
update = true;
}
else {
@@ -257,7 +257,7 @@ void EEVEE_shadows_update(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
}
}
/* TODO(fclem) This part can be slow, optimize it. */
/* TODO(fclem): This part can be slow, optimize it. */
EEVEE_BoundBox *bbox = backbuffer->bbox;
BoundSphere *bsphere = linfo->shadow_bounds;
/* Search for deleted shadow casters or if shcaster WAS in shadow radius. */

View File

@@ -209,7 +209,7 @@ void EEVEE_shadows_draw_cubemap(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata,
if (evli->light_type != LA_LOCAL && j == 4) {
continue;
}
/* TODO(fclem) some cube sides can be invisible in the main views. Cull them. */
/* TODO(fclem): some cube sides can be invisible in the main views. Cull them. */
// if (frustum_intersect(g_data->cube_views[j], main_view))
// continue;

View File

@@ -313,7 +313,7 @@ void EEVEE_subsurface_compute(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
if (!DRW_pass_is_empty(psl->sss_translucency_ps)) {
/* We sample the shadow-maps using normal sampler. We need to disable Comparison mode.
* TODO(fclem) avoid this by using sampler objects.*/
* TODO(fclem): avoid this by using sampler objects.*/
GPU_texture_compare_mode(sldata->shadow_cube_pool, false);
GPU_texture_compare_mode(sldata->shadow_cascade_pool, false);

View File

@@ -288,7 +288,7 @@ void EEVEE_volumes_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
if (grp) {
DRW_shgroup_uniform_block(grp, "common_block", sldata->common_ubo);
/* TODO (fclem): remove those (need to clean the GLSL files). */
/* TODO(fclem): remove those (need to clean the GLSL files). */
DRW_shgroup_uniform_block(grp, "grid_block", sldata->grid_ubo);
DRW_shgroup_uniform_block(grp, "probe_block", sldata->probe_ubo);
DRW_shgroup_uniform_block(grp, "planar_block", sldata->planar_ubo);
@@ -530,7 +530,7 @@ void EEVEE_volumes_cache_object_add(EEVEE_ViewLayerData *sldata,
DRWShadingGroup *grp = DRW_shgroup_material_create(mat, vedata->psl->volumetric_objects_ps);
/* TODO(fclem) remove those "unnecessary" UBOs */
/* TODO(fclem): remove those "unnecessary" UBOs */
DRW_shgroup_uniform_block(grp, "planar_block", sldata->planar_ubo);
DRW_shgroup_uniform_block(grp, "probe_block", sldata->probe_ubo);
DRW_shgroup_uniform_block(grp, "shadow_block", sldata->shadow_ubo);
@@ -707,7 +707,7 @@ void EEVEE_volumes_compute(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
DRW_stats_group_start("Volumetrics");
/* We sample the shadow-maps using shadow sampler. We need to enable Comparison mode.
* TODO(fclem) avoid this by using sampler objects.*/
* TODO(fclem): avoid this by using sampler objects.*/
GPU_texture_compare_mode(sldata->shadow_cube_pool, true);
GPU_texture_compare_mode(sldata->shadow_cascade_pool, true);

View File

@@ -89,7 +89,7 @@ vec3 light_translucent(LightData ld, vec3 W, vec3 N, vec4 l_vector, vec2 rand, f
/* We use the full l_vector.xyz so that the spread is minimize
* if the shading point is further away from the light source */
/* TODO(fclem) do something better than this. */
/* TODO(fclem): do something better than this. */
vec3 T, B;
make_orthonormal_basis(L.xyz / L.w, T, B);

View File

@@ -61,7 +61,7 @@ void main()
#ifdef USE_SSS
float fac = float(!sssToggle);
/* TODO(fclem) we shouldn't need this.
/* TODO(fclem): we shouldn't need this.
* Just disable USE_SSS when USE_REFRACTION is enabled. */
# ifdef USE_REFRACTION
/* SSRefraction pass is done after the SSS pass.

View File

@@ -270,7 +270,7 @@ static void external_draw_scene(void *vedata)
if (draw_ctx->evil_C) {
const float clear_col[4] = {0, 0, 0, 0};
/* This is to keep compatibility with external engine. */
/* TODO(fclem) remove it eventually. */
/* TODO(fclem): remove it eventually. */
GPU_framebuffer_bind(dfbl->default_fb);
GPU_framebuffer_clear_color(dfbl->default_fb, clear_col);

View File

@@ -299,7 +299,7 @@ GPENCIL_tLayer *gpencil_layer_cache_add(GPENCIL_PrivateData *pd,
if (is_masked) {
bool valid_mask = false;
/* Warning: only GP_MAX_MASKBITS amount of bits.
* TODO(fclem) Find a better system without any limitation. */
* TODO(fclem): Find a better system without any limitation. */
tgp_layer->mask_bits = BLI_memblock_alloc(pd->gp_maskbit_pool);
tgp_layer->mask_invert_bits = BLI_memblock_alloc(pd->gp_maskbit_pool);
BLI_bitmap_set_all(tgp_layer->mask_bits, false, GP_MAX_MASKBITS);

View File

@@ -428,7 +428,7 @@ GPENCIL_LightPool *gpencil_light_pool_create(GPENCIL_PrivateData *pd, Object *UN
if (lightpool == NULL) {
lightpool = gpencil_light_pool_add(pd);
}
/* TODO(fclem) Light linking. */
/* TODO(fclem): Light linking. */
// gpencil_light_pool_populate(lightpool, ob);
return lightpool;

View File

@@ -115,7 +115,7 @@ void main()
discard;
}
/* FIXME(fclem) Grrr. This is bad for performance but it's the easiest way to not get
/* FIXME(fclem): Grrr. This is bad for performance but it's the easiest way to not get
* depth written where the mask obliterate the layer. */
float mask = texture(gpMaskTexture, uvs).r;
if (mask < 0.001) {

View File

@@ -28,7 +28,7 @@ in vec2 uvs;
out vec4 fragColor;
#ifdef TILED_IMAGE
/* TODO(fclem) deduplicate code. */
/* TODO(fclem): deduplicate code. */
bool node_tex_tile_lookup(inout vec3 co, sampler2DArray ima, sampler1DArray map)
{
vec2 tile_pos = floor(co.xy);

View File

@@ -209,7 +209,7 @@ void OVERLAY_antialiasing_start(OVERLAY_Data *vedata)
/* If we are not in solid shading mode, we clear the depth. */
if (DRW_state_is_fbo() && pd->clear_in_front) {
/* TODO(fclem) This clear should be done in a global place. */
/* TODO(fclem): This clear should be done in a global place. */
GPU_framebuffer_bind(fbl->overlay_in_front_fb);
GPU_framebuffer_clear_depth(fbl->overlay_in_front_fb, 1.0f);
}

View File

@@ -534,7 +534,7 @@ static void drw_shgroup_bone_custom_solid(ArmatureDrawContext *ctx,
const float outline_color[4],
Object *custom)
{
/* TODO(fclem) arg... less than ideal but we never iter on this object
/* TODO(fclem): arg... less than ideal but we never iter on this object
* to assure batch cache is valid. */
drw_batch_cache_validate(custom);
@@ -568,7 +568,7 @@ static void drw_shgroup_bone_custom_solid(ArmatureDrawContext *ctx,
DRW_buffer_add_entry_struct(buf, inst_data.mat);
}
/* TODO(fclem) needs to be moved elsewhere. */
/* TODO(fclem): needs to be moved elsewhere. */
drw_batch_cache_generate_requested_delayed(custom);
}
@@ -577,7 +577,7 @@ static void drw_shgroup_bone_custom_wire(ArmatureDrawContext *ctx,
const float color[4],
Object *custom)
{
/* TODO(fclem) arg... less than ideal but we never iter on this object
/* TODO(fclem): arg... less than ideal but we never iter on this object
* to assure batch cache is valid. */
drw_batch_cache_validate(custom);
@@ -592,7 +592,7 @@ static void drw_shgroup_bone_custom_wire(ArmatureDrawContext *ctx,
DRW_buffer_add_entry_struct(buf, inst_data.mat);
}
/* TODO(fclem) needs to be moved elsewhere. */
/* TODO(fclem): needs to be moved elsewhere. */
drw_batch_cache_generate_requested_delayed(custom);
}

View File

@@ -61,7 +61,7 @@ void OVERLAY_background_cache_init(OVERLAY_Data *vedata)
}
else if (v3d->shading.background_type == V3D_SHADING_BACKGROUND_WORLD && scene->world) {
background_type = BG_SOLID;
/* TODO(fclem) this is a scene referred linear color. we should convert
/* TODO(fclem): this is a scene referred linear color. we should convert
* it to display linear here. */
copy_v3_v3(color_override, &scene->world->horr);
color_override[3] = 1.0f;

View File

@@ -478,7 +478,7 @@ static void OVERLAY_cache_finish(void *vedata)
return;
}
/* TODO(fclem) Only do this when really needed. */
/* TODO(fclem): Only do this when really needed. */
{
/* HACK we allocate the in front depth here to avoid the overhead when if is not needed. */
DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();

View File

@@ -183,7 +183,7 @@ void OVERLAY_particle_cache_populate(OVERLAY_Data *vedata, Object *ob)
struct GPUBatch *shape = NULL;
DRWShadingGroup *grp;
/* TODO(fclem) Here would be a good place for preemptive culling. */
/* TODO(fclem): Here would be a good place for preemptive culling. */
/* fclem: Is color even usefull in our modern context? */
Material *ma = BKE_object_material_get(ob, part->omat);

View File

@@ -286,7 +286,7 @@ void OVERLAY_wireframe_cache_populate(OVERLAY_Data *vedata,
}
if (ob->type == OB_GPENCIL) {
/* TODO (fclem) Make GPencil objects have correct boundbox. */
/* TODO(fclem): Make GPencil objects have correct bound-box. */
DRW_shgroup_call_no_cull(shgrp, geom, ob);
}
else if (use_sculpt_pbvh) {

View File

@@ -224,7 +224,7 @@ void main()
#ifdef SELECT_EDGES
/* HACK: to avoid loosing sub pixel object in selections, we add a bit of randomness to the
* wire to at least create one fragment that will pass the occlusion query. */
/* TODO(fclem) Limit this workaround to selection. It's not very noticeable but still... */
/* TODO(fclem): Limit this workaround to selection. It's not very noticeable but still... */
gl_Position.xy += sizeViewportInv.xy * gl_Position.w * ((gl_VertexID % 2 == 0) ? -1.0 : 1.0);
#endif

View File

@@ -20,7 +20,7 @@ void main()
#ifdef SELECT_EDGES
/* HACK: to avoid loosing sub pixel object in selections, we add a bit of randomness to the
* wire to at least create one fragment that will pass the occlusion query. */
/* TODO(fclem) Limit this workaround to selection. It's not very noticeable but still... */
/* TODO(fclem): Limit this workaround to selection. It's not very noticeable but still... */
gl_Position.xy += sizeViewportInv.xy * gl_Position.w * ((gl_VertexID % 2 == 0) ? -1.0 : 1.0);
#endif

View File

@@ -1,5 +1,5 @@
/* TODO(fclem) deduplicate code. */
/* TODO(fclem): deduplicate code. */
bool node_tex_tile_lookup(inout vec3 co, sampler2DArray ima, sampler1DArray map)
{
vec2 tile_pos = floor(co.xy);

View File

@@ -156,7 +156,7 @@ void workbench_dof_engine_init(WORKBENCH_Data *vedata)
const float *full_size = DRW_viewport_size_get();
const int size[2] = {max_ii(1, (int)full_size[0] / 2), max_ii(1, (int)full_size[1] / 2)};
#if 0 /* TODO(fclem) finish COC min_max optimization */
#if 0 /* TODO(fclem): finish COC min_max optimization. */
/* NOTE: We Ceil here in order to not miss any edge texel if using a NPO2 texture. */
int shrink_h_size[2] = {ceilf(size[0] / 8.0f), size[1]};
int shrink_w_size[2] = {shrink_h_size[0], ceilf(size[1] / 8.0f)};
@@ -168,7 +168,7 @@ void workbench_dof_engine_init(WORKBENCH_Data *vedata)
&txl->coc_halfres_tx, size[0], size[1], GPU_RG8, DRW_TEX_FILTER | DRW_TEX_MIPMAP);
wpd->dof_blur_tx = DRW_texture_pool_query_2d(
size[0], size[1], GPU_RGBA16F, &draw_engine_workbench);
#if 0 /* TODO(fclem) finish COC min_max optimization. */
#if 0 /* TODO(fclem): finish COC min_max optimization. */
wpd->coc_temp_tx = DRW_texture_pool_query_2d(
shrink_h_size[0], shrink_h_size[1], GPU_RG8, &draw_engine_workbench);
wpd->coc_tiles_tx[0] = DRW_texture_pool_query_2d(
@@ -183,7 +183,7 @@ void workbench_dof_engine_init(WORKBENCH_Data *vedata)
GPU_ATTACHMENT_TEXTURE(txl->dof_source_tx),
GPU_ATTACHMENT_TEXTURE(txl->coc_halfres_tx),
});
#if 0 /* TODO(fclem) finish COC min_max optimization. */
#if 0 /* TODO(fclem): finish COC min_max optimization. */
GPU_framebuffer_ensure_config(&fbl->dof_coc_tile_h_fb,
{
GPU_ATTACHMENT_NONE,
@@ -218,7 +218,7 @@ void workbench_dof_engine_init(WORKBENCH_Data *vedata)
float focus_dist = BKE_camera_object_dof_distance(camera);
float focal_len = cam->lens;
/* TODO(fclem) deduplicate with eevee */
/* TODO(fclem): deduplicate with eevee */
const float scale_camera = 0.001f;
/* we want radius here for the aperture number */
float aperture = 0.5f * scale_camera * focal_len / fstop;
@@ -290,7 +290,7 @@ void workbench_dof_cache_init(WORKBENCH_Data *vedata)
DRW_shgroup_uniform_texture(grp, "inputCocTex", txl->coc_halfres_tx);
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
}
#if 0 /* TODO(fclem) finish COC min_max optimization */
#if 0 /* TODO(fclem): finish COC min_max optimization */
{
psl->dof_flatten_h_ps = DRW_pass_create("DoF Flatten Coc H", DRW_STATE_WRITE_COLOR);
@@ -385,7 +385,7 @@ void workbench_dof_draw_pass(WORKBENCH_Data *vedata)
GPU_framebuffer_recursive_downsample(
fbl->dof_downsample_fb, 2, workbench_dof_downsample_level, psl);
#if 0 /* TODO(fclem) finish COC min_max optimization */
#if 0 /* TODO(fclem): finish COC min_max optimization */
GPU_framebuffer_bind(fbl->dof_coc_tile_h_fb);
DRW_draw_pass(psl->dof_flatten_h_ps);

View File

@@ -98,7 +98,7 @@ void workbench_cache_init(void *ved)
workbench_volume_cache_init(vedata);
}
/* TODO(fclem) DRW_cache_object_surface_material_get needs a refactor to allow passing NULL
/* TODO(fclem): DRW_cache_object_surface_material_get needs a refactor to allow passing NULL
* instead of gpumat_array. Avoiding all this boilerplate code. */
static struct GPUBatch **workbench_object_surface_material_get(Object *ob)
{
@@ -422,7 +422,7 @@ void workbench_cache_finish(void *ved)
WORKBENCH_FramebufferList *fbl = vedata->fbl;
WORKBENCH_PrivateData *wpd = stl->wpd;
/* TODO(fclem) Only do this when really needed. */
/* TODO(fclem): Only do this when really needed. */
{
/* HACK we allocate the in front depth here to avoid the overhead when if is not needed. */
DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();

View File

@@ -384,7 +384,7 @@ void workbench_shader_depth_of_field_get(GPUShader **prepare_sh,
datatoc_workbench_effect_dof_frag_glsl, e_data.lib, "#define PREPARE\n");
e_data.dof_downsample_sh = DRW_shader_create_fullscreen_with_shaderlib(
datatoc_workbench_effect_dof_frag_glsl, e_data.lib, "#define DOWNSAMPLE\n");
#if 0 /* TODO(fclem) finish COC min_max optimization */
#if 0 /* TODO(fclem): finish COC min_max optimization */
e_data.dof_flatten_v_sh = DRW_shader_create_fullscreen_with_shaderlib(
datatoc_workbench_effect_dof_frag_glsl, e_data.lib, "#define FLATTEN_VERTICAL\n");
e_data.dof_flatten_h_sh = DRW_shader_create_fullscreen_with_shaderlib(

View File

@@ -157,7 +157,7 @@ void workbench_shadow_cache_init(WORKBENCH_Data *data)
DRWState state = DRW_STATE_DEPTH_LESS | DRW_STATE_STENCIL_ALWAYS;
#endif
/* TODO(fclem) Merge into one pass with subpasses. */
/* TODO(fclem): Merge into one pass with subpasses. */
DRW_PASS_CREATE(psl->shadow_ps[0], state | depth_pass_state);
DRW_PASS_CREATE(psl->shadow_ps[1], state | depth_fail_state);

View File

@@ -164,13 +164,13 @@ void workbench_transparent_draw_depth_pass(WORKBENCH_Data *data)
if (!DRW_pass_is_empty(psl->transp_depth_ps)) {
GPU_framebuffer_bind(fbl->opaque_fb);
/* TODO(fclem) Disable writing to first two buffers. Unnecessary waste of bandwidth. */
/* TODO(fclem): Disable writing to first two buffers. Unnecessary waste of bandwidth. */
DRW_draw_pass(psl->transp_depth_ps);
}
if (!DRW_pass_is_empty(psl->transp_depth_infront_ps)) {
GPU_framebuffer_bind(fbl->opaque_infront_fb);
/* TODO(fclem) Disable writing to first two buffers. Unnecessary waste of bandwidth. */
/* TODO(fclem): Disable writing to first two buffers. Unnecessary waste of bandwidth. */
DRW_draw_pass(psl->transp_depth_infront_ps);
}
}

View File

@@ -423,7 +423,7 @@ void DRW_shgroup_call_ex(DRWShadingGroup *shgroup,
#define DRW_shgroup_call_obmat(shgroup, geom, obmat) \
DRW_shgroup_call_ex(shgroup, NULL, obmat, geom, false, NULL)
/* TODO(fclem) remove this when we have DRWView */
/* TODO(fclem): remove this when we have DRWView */
/* user_data is used by DRWCallVisibilityFn defined in DRWView. */
#define DRW_shgroup_call_with_callback(shgroup, geom, ob, user_data) \
DRW_shgroup_call_ex(shgroup, ob, NULL, geom, false, user_data)

View File

@@ -159,7 +159,7 @@ void DRW_shape_cache_free(void)
GPUBatch *drw_cache_procedural_points_get(void)
{
if (!SHC.drw_procedural_verts) {
/* TODO(fclem) get rid of this dummy VBO. */
/* TODO(fclem): get rid of this dummy VBO. */
GPUVertFormat format = {0};
GPU_vertformat_attr_add(&format, "dummy", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
@@ -173,7 +173,7 @@ GPUBatch *drw_cache_procedural_points_get(void)
GPUBatch *drw_cache_procedural_lines_get(void)
{
if (!SHC.drw_procedural_lines) {
/* TODO(fclem) get rid of this dummy VBO. */
/* TODO(fclem): get rid of this dummy VBO. */
GPUVertFormat format = {0};
GPU_vertformat_attr_add(&format, "dummy", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
@@ -187,7 +187,7 @@ GPUBatch *drw_cache_procedural_lines_get(void)
GPUBatch *drw_cache_procedural_triangles_get(void)
{
if (!SHC.drw_procedural_tris) {
/* TODO(fclem) get rid of this dummy VBO. */
/* TODO(fclem): get rid of this dummy VBO. */
GPUVertFormat format = {0};
GPU_vertformat_attr_add(&format, "dummy", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);

View File

@@ -179,7 +179,8 @@ static void pointcloud_batch_cache_ensure_pos(Object *ob, PointCloudBatchCache *
float(*vbo_data)[4] = (float(*)[4])GPU_vertbuf_get_data(cache->pos);
for (int i = 0; i < pointcloud->totpoint; i++) {
copy_v3_v3(vbo_data[i], pointcloud->co[i]);
/* TODO(fclem) remove multiplication here. Here only for keeping the size correct for now. */
/* TODO(fclem): remove multiplication here.
* Here only for keeping the size correct for now. */
vbo_data[i][3] = pointcloud->radius[i] * 100.0f;
}
}

View File

@@ -308,7 +308,7 @@ struct DupliObject *DRW_object_get_dupli(const Object *UNUSED(ob))
/** \name Color Management
* \{ */
/* TODO(fclem) This should be a render engine callback to determine if we need CM or not. */
/* TODO(fclem): This should be a render engine callback to determine if we need CM or not. */
static void drw_viewport_colormanagement_set(void)
{
Scene *scene = DST.draw_ctx.scene;
@@ -525,7 +525,7 @@ static void draw_unit_state_create(void)
infos->ob_flag = 1.0f;
copy_v3_fl(infos->ob_color, 1.0f);
/* TODO(fclem) get rid of this. */
/* TODO(fclem): get rid of this. */
culling->bsphere.radius = -1.0f;
culling->user_data = NULL;
@@ -2594,7 +2594,7 @@ void DRW_draw_select_id(Depsgraph *depsgraph, ARegion *region, View3D *v3d, cons
GPUViewport *viewport = WM_draw_region_get_viewport(region);
if (!viewport) {
/* Selection engine requires a viewport.
* TODO (germano): This should be done internally in the engine. */
* TODO(germano): This should be done internally in the engine. */
sel_ctx->is_dirty = true;
sel_ctx->objects_drawn_len = 0;
sel_ctx->index_drawn_len = 1;

View File

@@ -495,7 +495,7 @@ typedef struct DRWManager {
struct Object *dupli_origin;
/** Ghash containing original objects. */
struct GHash *dupli_ghash;
/** TODO(fclem) try to remove usage of this. */
/** TODO(fclem): try to remove usage of this. */
DRWInstanceData *object_instance_data[MAX_INSTANCE_DATA_SIZE];
/* Array of dupli_data (one for each enabled engine) to handle duplis. */
void **dupli_datas;
@@ -542,7 +542,7 @@ typedef struct DRWManager {
DRWView *view_active;
DRWView *view_previous;
uint primary_view_ct;
/** TODO(fclem) Remove this. Only here to support
/** TODO(fclem): Remove this. Only here to support
* shaders without common_view_lib.glsl */
DRWViewUboStorage view_storage_cpy;
@@ -567,7 +567,7 @@ typedef struct DRWManager {
GPUDrawList *draw_list;
struct {
/* TODO(fclem) optimize: use chunks. */
/* TODO(fclem): optimize: use chunks. */
DRWDebugLine *lines;
DRWDebugSphere *spheres;
} debug;

View File

@@ -510,7 +510,7 @@ static void drw_call_obinfos_init(DRWObjectInfos *ob_infos, Object *ob)
/* Random float value. */
uint random = (DST.dupli_source) ?
DST.dupli_source->random_id :
/* TODO(fclem) this is rather costly to do at runtime. Maybe we can
/* TODO(fclem): this is rather costly to do at runtime. Maybe we can
* put it in ob->runtime and make depsgraph ensure it is up to date. */
BLI_hash_int_2d(BLI_hash_string(ob->id.name + 2), 0);
ob_infos->ob_random = random * (1.0f / (float)0xFFFFFFFF);

View File

@@ -483,8 +483,8 @@ static void draw_compute_culling(DRWView *view)
{
view = view->parent ? view->parent : view;
/* TODO(fclem) multithread this. */
/* TODO(fclem) compute all dirty views at once. */
/* TODO(fclem): multi-thread this. */
/* TODO(fclem): compute all dirty views at once. */
if (!view->is_dirty) {
return;
}
@@ -1042,7 +1042,7 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
static void drw_update_view(void)
{
/* TODO(fclem) update a big UBO and only bind ranges here. */
/* TODO(fclem): update a big UBO and only bind ranges here. */
GPU_uniformbuf_update(G_draw.view_ubo, &DST.view_active->storage);
/* TODO get rid of this. */

View File

@@ -25,7 +25,7 @@ void pointcloud_get_pos_and_nor(out vec3 outpos, out vec3 outnor)
mat3 facing_mat = pointcloud_get_facing_matrix(p);
float radius = dot(abs(mat3(ModelMatrix) * pos.www), vec3(1.0 / 3.0));
/* TODO(fclem) remove multiplication here. Here only for keeping the size correct for now. */
/* TODO(fclem): remove multiplication here. Here only for keeping the size correct for now. */
radius *= 0.01;
outpos = p + (facing_mat * pos_inst) * radius;
outnor = facing_mat * nor;

View File

@@ -516,7 +516,7 @@ static int gizmo_cage3d_modal(bContext *C,
(point_local[2] - data->orig_mouse[2]);
}
else if (gz->highlight_part == ED_GIZMO_CAGE3D_PART_ROTATE) {
/* TODO (if needed) */
/* Add this (if we need it). */
}
else {
/* scale */

View File

@@ -698,10 +698,9 @@ static int edbm_shortest_path_pick_invoke(bContext *C, wmOperator *op, const wmE
/* If nothing is selected, let's select the picked vertex/edge/face. */
if ((vc.em->bm->totvertsel == 0) && (eve || eed || efa)) {
/* TODO (dfelinto) right now we try to find the closest element twice.
/* TODO(dfelinto): right now we try to find the closest element twice.
* The ideal is to refactor EDBM_select_pick so it doesn't
* have to pick the nearest vert/edge/face again.
*/
* have to pick the nearest vert/edge/face again. */
EDBM_select_pick(C, event->mval, true, false, false);
return OPERATOR_FINISHED;
}

View File

@@ -5671,7 +5671,7 @@ static void do_brush_action(Sculpt *sd, Object *ob, Brush *brush, UnifiedPaintSe
SCULPT_stroke_is_first_brush_step(ss->cache) && !ss->cache->alt_smooth) {
/* Dynamic-topology does not support Face Sets data, so it can't store/restore it from undo. */
/* TODO (pablodp606): This check should be done in the undo code and not here, but the rest of
/* TODO(pablodp606): This check should be done in the undo code and not here, but the rest of
* the sculpt code is not checking for unsupported undo types that may return a null node. */
if (BKE_pbvh_type(ss->pbvh) != PBVH_BMESH) {
SCULPT_undo_push_node(ob, NULL, SCULPT_UNDO_FACE_SETS);

View File

@@ -1754,7 +1754,7 @@ static bool outliner_id_operation_item_poll(bContext *C,
if (!space_outliner || ELEM(space_outliner->outlinevis, SO_SCENES, SO_VIEW_LAYER)) {
return true;
}
/* TODO (dalai): enable in the few cases where this can be supported
/* TODO(dalai): enable in the few cases where this can be supported
(i.e., when we have a valid parent for the tselem). */
return false;
}

View File

@@ -931,7 +931,7 @@ static void view3d_main_region_listener(
switch (wmn->data) {
case ND_SHADING:
case ND_NODES:
/* TODO(sergey) This is a bit too much updates, but needed to
/* TODO(sergey): This is a bit too much updates, but needed to
* have proper material drivers update in the viewport.
*
* How to solve?

View File

@@ -241,7 +241,7 @@ static void axis_geom_draw(const wmGizmo *gz,
const float axis_depth_bias = 0.01f;
const float sphere_scale = 1.15f;
/* TODO(fclem) Is there a way to get the widget radius? */
/* TODO(fclem): Is there a way to get the widget radius? */
const float widget_pix_size = 40.0f * U.dpi_fac;
#ifdef USE_AXIS_FONT

View File

@@ -75,7 +75,7 @@ extern "C" {
/**
* IMPORTANT: Do not allocate manually as the real struct is bigger (i.e: GLBatch). This is only
* the common and "public" part of the struct. Use the provided allocator.
* TODO(fclem) Make the content of this struct hidden and expose getters/setters.
* TODO(fclem): Make the content of this struct hidden and expose getters/setters.
**/
typedef struct GPUBatch {
/** verts[0] is required, others can be NULL */
@@ -128,7 +128,7 @@ void GPU_batch_program_set_builtin_with_config(GPUBatch *batch,
eGPUShaderConfig sh_cfg);
/* Will only work after setting the batch program. */
/* TODO(fclem) Theses needs to be replaced by GPU_shader_uniform_* with explicit shader. */
/* TODO(fclem): Theses needs to be replaced by GPU_shader_uniform_* with explicit shader. */
#define GPU_batch_uniform_1i(batch, name, x) GPU_shader_uniform_1i((batch)->shader, name, x);
#define GPU_batch_uniform_1b(batch, name, x) GPU_shader_uniform_1b((batch)->shader, name, x);
#define GPU_batch_uniform_1f(batch, name, x) GPU_shader_uniform_1f((batch)->shader, name, x);

View File

@@ -216,7 +216,7 @@ int GPU_batch_vertbuf_add_ex(GPUBatch *batch, GPUVertBuf *verts, bool own_vbo)
/* -------------------------------------------------------------------- */
/** \name Uniform setters
*
* TODO(fclem) port this to GPUShader.
* TODO(fclem): port this to GPUShader.
* \{ */
void GPU_batch_set_shader(GPUBatch *batch, GPUShader *shader)

View File

@@ -421,7 +421,7 @@ static void codegen_call_functions(DynStr *ds, GPUNodeGraph *graph, GPUOutput *f
ds, input->link->output->type, input->type, "tmp", input->link->output->id);
}
else if (input->source == GPU_SOURCE_BUILTIN) {
/* TODO(fclem) get rid of that. */
/* TODO(fclem): get rid of that. */
if (input->builtin == GPU_INVERSE_VIEW_MATRIX) {
BLI_dynstr_append(ds, "viewinv");
}
@@ -527,7 +527,7 @@ static char *code_generate_fragment(GPUMaterial *material,
if (builtins & GPU_BARYCENTRIC_TEXCO) {
BLI_dynstr_append(ds, " vec2 barytexco = barycentric_resolve(barycentricTexCo);\n");
}
/* TODO(fclem) get rid of that. */
/* TODO(fclem): get rid of that. */
if (builtins & GPU_VIEW_MATRIX) {
BLI_dynstr_append(ds, " #define viewmat ViewMatrix\n");
}

View File

@@ -112,7 +112,7 @@ void FrameBuffer::attachment_set(GPUAttachmentType type, const GPUAttachment &ne
return; /* Exact same texture already bound here. */
}
/* Unbind previous and bind new. */
/* TODO(fclem) cleanup the casts. */
/* TODO(fclem): cleanup the casts. */
if (attachment.tex) {
reinterpret_cast<Texture *>(attachment.tex)->detach_from(this);
}
@@ -411,7 +411,7 @@ void GPU_framebuffer_read_color(GPUFrameBuffer *gpu_fb,
unwrap(gpu_fb)->read(GPU_COLOR_BIT, format, rect, channels, slot, data);
}
/* TODO(fclem) rename to read_color. */
/* TODO(fclem): rename to read_color. */
void GPU_frontbuffer_read_pixels(
int x, int y, int w, int h, int channels, eGPUDataFormat format, void *data)
{
@@ -420,7 +420,7 @@ void GPU_frontbuffer_read_pixels(
}
/* read_slot and write_slot are only used for color buffers. */
/* TODO(fclem) port as texture operation. */
/* TODO(fclem): port as texture operation. */
void GPU_framebuffer_blit(GPUFrameBuffer *gpufb_read,
int read_slot,
GPUFrameBuffer *gpufb_write,

View File

@@ -170,7 +170,7 @@ static void wide_line_workaround_start(GPUPrimType prim_type)
immUnbindProgram();
/* TODO(fclem) Don't use geometry shader and use quad instancing with double load. */
/* TODO(fclem): Don't use geometry shader and use quad instancing with double load. */
// GPU_vertformat_multiload_enable(imm->vertex_format, 2);
immBindBuiltinProgram(polyline_sh);

View File

@@ -25,4 +25,4 @@
using namespace blender::gpu;
/* TODO(fclem) Make the associated C-API to use inside DRW profiler. */
/* TODO(fclem): Make the associated C-API to use inside DRW profiler. */

View File

@@ -487,7 +487,7 @@ void GPU_shader_unbind(void)
/* -------------------------------------------------------------------- */
/** \name Transform feedback
*
* TODO(fclem) Should be replaced by compute shaders.
* TODO(fclem): Should be replaced by compute shaders.
* \{ */
bool GPU_shader_transform_feedback_enable(GPUShader *shader, GPUVertBuf *vertbuf)

View File

@@ -34,7 +34,7 @@ namespace blender::gpu {
ShaderInterface::ShaderInterface(void)
{
/* TODO(fclem) add unique ID for debugging. */
/* TODO(fclem): add unique ID for debugging. */
}
ShaderInterface::~ShaderInterface(void)

View File

@@ -50,7 +50,7 @@ typedef struct ShaderInput {
* Base class which is then specialized for each implementation (GL, VK, ...).
**/
class ShaderInterface {
/* TODO(fclem) should be protected. */
/* TODO(fclem): should be protected. */
public:
/** Flat array. In this order: Attributes, Ubos, Uniforms. */
ShaderInput *inputs_ = NULL;

View File

@@ -292,7 +292,7 @@ bool GPU_depth_mask_get(void)
bool GPU_mipmap_enabled(void)
{
/* TODO(fclem) this used to be a userdef option. */
/* TODO(fclem): this used to be a userdef option. */
return true;
}

View File

@@ -56,9 +56,9 @@ union GPUState {
uint32_t invert_facing : 1;
uint32_t shadow_bias : 1;
/** Number of clip distances enabled. */
/* TODO(fclem) This should be a shader property. */
/* TODO(fclem): This should be a shader property. */
uint32_t clip_distances : 3;
/* TODO(fclem) remove, old opengl features. */
/* TODO(fclem): remove, old opengl features. */
uint32_t polygon_smooth : 1;
uint32_t line_smooth : 1;
};
@@ -99,7 +99,7 @@ union GPUStateMutable {
/** TODO remove */
float depth_range[2];
/** Positive if using program point size. */
/* TODO(fclem) should be passed as uniform to all shaders. */
/* TODO(fclem): should be passed as uniform to all shaders. */
float point_size;
/** Not supported on every platform. Prefer using wideline shader. */
float line_width;

View File

@@ -186,7 +186,7 @@ using namespace blender::gpu;
uint GPU_texture_memory_usage_get(void)
{
/* TODO(fclem) Do that inside the new Texture class. */
/* TODO(fclem): Do that inside the new Texture class. */
return 0;
}

View File

@@ -93,7 +93,7 @@ class Texture {
eGPUTextureType type_;
/** Number of mipmaps this texture has (Max miplvl). */
/* TODO(fclem) Should become immutable and the need for mipmaps should be specified upfront. */
/* TODO(fclem): Should become immutable and the need for mipmaps should be specified upfront. */
int mipmaps_ = -1;
/** For error checking */
int mip_min_ = 0, mip_max_ = 0;
@@ -130,7 +130,7 @@ class Texture {
virtual void update_sub(
int mip, int offset[3], int extent[3], eGPUDataFormat format, const void *data) = 0;
/* TODO(fclem) Legacy. Should be removed at some point. */
/* TODO(fclem): Legacy. Should be removed at some point. */
virtual uint gl_bindcode_get(void) const = 0;
int width_get(void) const

View File

@@ -112,7 +112,7 @@ struct GPUViewport {
ColorManagedDisplaySettings display_settings;
CurveMapping *orig_curve_mapping;
float dither;
/* TODO(fclem) the uvimage display use the viewport but do not set any view transform for the
/* TODO(fclem): the uvimage display use the viewport but do not set any view transform for the
* moment. The end goal would be to let the GPUViewport do the color management. */
bool do_color_management;
struct GPUViewportBatch batch;

View File

@@ -46,8 +46,8 @@ using namespace blender::gpu;
/* -------------------------------------------------------------------- */
/** \name Vao cache
*
* Each GLBatch has a small cache of VAO objects that are used to avoid VAO reconfiguration.
* TODO(fclem) Could be revisited to avoid so much cross references.
* Each #GLBatch has a small cache of VAO objects that are used to avoid VAO reconfiguration.
* TODO(fclem): Could be revisited to avoid so much cross references.
* \{ */
GLVaoCache::GLVaoCache(void)
@@ -166,7 +166,7 @@ void GLVaoCache::clear(void)
glDeleteVertexArrays(1, &vao_base_instance_);
}
else {
/* TODO(fclem) Slow way. Could avoid multiple mutex lock here */
/* TODO(fclem): Slow way. Could avoid multiple mutex lock here */
for (int i = 0; i < count; i++) {
context_->vao_free(vaos[i]);
}

View File

@@ -405,7 +405,7 @@ void GLFrameBuffer::clear_multi(const float (*clear_cols)[4])
{
/* WATCH: This can easily access clear_cols out of bounds it clear_cols is not big enough for
* all attachments.
* TODO(fclem) fix this insecurity? */
* TODO(fclem): fix this insecurity? */
int type = GPU_FB_COLOR_ATTACHMENT0;
for (int i = 0; type < GPU_FB_MAX_ATTACHMENT; i++, type++) {
if (attachments_[type].tex != NULL) {

View File

@@ -59,11 +59,11 @@ class GLQueryPool : public QueryPool {
static inline GLenum to_gl(GPUQueryType type)
{
if (type == GPU_QUERY_OCCLUSION) {
/* TODO(fclem) try with GL_ANY_SAMPLES_PASSED. */
/* TODO(fclem): try with GL_ANY_SAMPLES_PASSED. */
return GL_SAMPLES_PASSED;
}
BLI_assert(0);
return GL_SAMPLES_PASSED;
}
} // namespace blender::gpu
} // namespace blender::gpu

View File

@@ -219,7 +219,7 @@ void GLShader::unbind(void)
/* -------------------------------------------------------------------- */
/** \name Transform feedback
*
* TODO(fclem) Should be replaced by compute shaders.
* TODO(fclem): Should be replaced by compute shaders.
* \{ */
/* Should be called before linking. */

View File

@@ -446,7 +446,7 @@ struct GPUFrameBuffer *GLTexture::framebuffer_get(void)
return framebuffer_;
}
BLI_assert(!(type_ & (GPU_TEXTURE_ARRAY | GPU_TEXTURE_CUBE | GPU_TEXTURE_1D | GPU_TEXTURE_3D)));
/* TODO(fclem) cleanup this. Don't use GPU object but blender::gpu ones. */
/* TODO(fclem): cleanup this. Don't use GPU object but blender::gpu ones. */
GPUTexture *gputex = reinterpret_cast<GPUTexture *>(static_cast<Texture *>(this));
framebuffer_ = GPU_framebuffer_create(name_);
GPU_framebuffer_texture_attach(framebuffer_, gputex, 0, 0);
@@ -550,7 +550,7 @@ void GLTexture::samplers_free(void)
* \{ */
/* NOTE: This only checks if this mipmap is valid / supported.
* TODO(fclem) make the check cover the whole mipmap chain. */
* TODO(fclem): make the check cover the whole mipmap chain. */
bool GLTexture::proxy_check(int mip)
{
/* Manual validation first, since some implementation have issues with proxy creation. */
@@ -678,7 +678,7 @@ void GLTexture::check_feedback_loop(void)
}
}
/* TODO(fclem) Legacy. Should be removed at some point. */
/* TODO(fclem): Legacy. Should be removed at some point. */
uint GLTexture::gl_bindcode_get(void) const
{
return tex_id_;

View File

@@ -59,7 +59,7 @@ class GLTexture : public Texture {
/** Legacy workaround for texture copy. Created when using framebuffer_get(). */
struct GPUFrameBuffer *framebuffer_ = NULL;
/** True if this texture is bound to at least one texture unit. */
/* TODO(fclem) How do we ensure thread safety here? */
/* TODO(fclem): How do we ensure thread safety here? */
bool is_bound_ = false;
public:
@@ -78,7 +78,7 @@ class GLTexture : public Texture {
void check_feedback_loop(void);
/* TODO(fclem) Legacy. Should be removed at some point. */
/* TODO(fclem): Legacy. Should be removed at some point. */
uint gl_bindcode_get(void) const override;
static void samplers_init(void);

View File

@@ -7,7 +7,7 @@ void node_vector_displacement_tangent(vec4 vector,
mat4 viewmat,
out vec3 result)
{
/* TODO(fclem) this is broken. revisit latter. */
/* TODO(fclem): this is broken. revisit latter. */
vec3 N_object = normalize(((vec4(normal, 0.0) * viewmat) * obmat).xyz);
vec3 T_object = normalize(((vec4(tangent.xyz, 0.0) * viewmat) * obmat).xyz);
vec3 B_object = tangent.w * normalize(cross(N_object, T_object));

View File

@@ -660,8 +660,8 @@ static void rna_Fluid_cache_directory_set(struct PointerRNA *ptr, const char *va
BLI_strncpy(settings->cache_directory, value, sizeof(settings->cache_directory));
/* TODO (sebbas): Read cache state in order to set cache bake flags and cache pause frames
* correctly */
/* TODO(sebbas): Read cache state in order to set cache bake flags and cache pause frames
* correctly. */
// settings->cache_flag = 0;
}

View File

@@ -224,7 +224,7 @@ static int rna_Image_gl_load(Image *image, ReportList *reports, int frame)
if (tex == NULL) {
BKE_reportf(reports, RPT_ERROR, "Failed to load image texture '%s'", image->id.name + 2);
/* TODO(fclem) this error code makes no sense for vulkan. */
/* TODO(fclem): this error code makes no sense for vulkan. */
return 0x0502; /* GL_INVALID_OPERATION */
}

View File

@@ -57,7 +57,7 @@ static int node_shader_gpu_tex_environment(GPUMaterial *mat,
NodeTexImage *tex_original = node_original->storage;
ImageUser *iuser = &tex_original->iuser;
eGPUSamplerState sampler = GPU_SAMPLER_REPEAT | GPU_SAMPLER_ANISO | GPU_SAMPLER_FILTER;
/* TODO(fclem) For now assume mipmap is always enabled. */
/* TODO(fclem): For now assume mipmap is always enabled. */
if (true) {
sampler |= GPU_SAMPLER_MIPMAP;
}

View File

@@ -93,7 +93,7 @@ static int node_shader_gpu_tex_image(GPUMaterial *mat,
if (tex->interpolation != SHD_INTERP_CLOSEST) {
sampler_state |= GPU_SAMPLER_ANISO | GPU_SAMPLER_FILTER;
/* TODO(fclem) For now assume mipmap is always enabled. */
/* TODO(fclem): For now assume mipmap is always enabled. */
sampler_state |= true ? GPU_SAMPLER_MIPMAP : 0;
}
const bool use_cubic = ELEM(tex->interpolation, SHD_INTERP_CUBIC, SHD_INTERP_SMART);

View File

@@ -5855,7 +5855,7 @@ static PyObject *pyrna_param_to_py(PointerRNA *ptr, PropertyRNA *prop, void *dat
/* Resolve the array from a new pytype. */
/* TODO(Kazanbas) make multi-dimensional sequences here. */
/* TODO(Kazanbas): make multi-dimensional sequences here. */
switch (type) {
case PROP_BOOLEAN:

View File

@@ -633,7 +633,7 @@ void GIZMOGROUP_OT_gizmo_tweak(wmOperatorType *ot)
ot->invoke = gizmo_tweak_invoke;
ot->modal = gizmo_tweak_modal;
/* TODO(campbell) This causes problems tweaking settings for operators,
/* TODO(campbell): This causes problems tweaking settings for operators,
* need to find a way to support this. */
#if 0
ot->flag = OPTYPE_UNDO;

View File

@@ -527,7 +527,7 @@ static int wm_link_append_exec(bContext *C, wmOperator *op)
/* TODO(sergey): Use proper flag for tagging here. */
/* TODO (dalai): Temporary solution!
/* TODO(dalai): Temporary solution!
* Ideally we only need to tag the new objects themselves, not the scene.
* This way we'll avoid flush of collection properties
* to all objects and limit update to the particular object only.