From b7028617ad1eceb62c34ce09402769711d0ae874 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sybren=20A=2E=20St=C3=BCvel?= Date: Tue, 10 Dec 2024 14:52:34 +0100 Subject: [PATCH] Refactor: render tests, change how argparse is used Some changes to how argparse is used in render tests: 1. Use the common approach of one dash for single-letter options (`-b`) and two dashes for longer options (`--blender`). In this commit that just means changing single-dashed (`-testdir`) to double-dashed (`--testdir`). 2. Remove unnecessary `nargs` arguments. The code was telling `argparse` to put CLI arguments into a list of one item, and then had code to turn that one-item list into the item itself. I've just removed the `nargs` argument altogether, as that just produces the desired value without requiring more code. I've also removed `nargs="+"` from the handling of the `--blender` parameter, as that allowed for multiple occurrences of `--blender {path}` but was silently ignoring all of those except the first. To ensure that required arguments are present, the code now uses `required=True` instead of `nargs`. 3. Add a `description` parameter so that `--help` shows what the test script actually does. Also it helps people (like me) who want to figure out which blend file is actually being opened by the test, without making the test itself more verbose. No functional changes, except that you now cannot add multiple `--blender` arguments any more (the CLI invocation will fail). This wasn't used anywhere I could find, though. Pull Request: https://projects.blender.org/blender/blender/pulls/131666 --- tests/python/CMakeLists.txt | 70 +++++++++---------- tests/python/bl_io_curve_svg_test.py | 23 +++--- tests/python/compositor_cpu_render_tests.py | 25 +++---- .../compositor_realtime_render_tests.py | 21 +++--- tests/python/cycles_render_tests.py | 30 ++++---- tests/python/eevee_next_render_tests.py | 30 ++++---- tests/python/opengl_draw_tests.py | 21 +++--- tests/python/sequencer_render_tests.py | 22 +++--- tests/python/storm_render_tests.py | 32 ++++----- tests/python/workbench_render_tests.py | 29 ++++---- 10 files changed, 136 insertions(+), 167 deletions(-) diff --git a/tests/python/CMakeLists.txt b/tests/python/CMakeLists.txt index 48f90f67998..6f2a6cece3d 100644 --- a/tests/python/CMakeLists.txt +++ b/tests/python/CMakeLists.txt @@ -109,7 +109,7 @@ endfunction() # Run Python render test. function(add_render_test testname testscript) - set(_args ${ARGN} -blender "${TEST_BLENDER_EXE}" -oiiotool "${OPENIMAGEIO_TOOL}") + set(_args ${ARGN} --blender "${TEST_BLENDER_EXE}" --oiiotool "${OPENIMAGEIO_TOOL}") if(WITH_TESTS_BATCHED) list(APPEND _args --batch) endif() @@ -622,8 +622,8 @@ if(TRUE) add_render_test( io_curve_svg_${render_test} ${CMAKE_CURRENT_LIST_DIR}/bl_io_curve_svg_test.py - -testdir "${TEST_SRC_DIR}/io_tests/svg/${render_test}" - -outdir "${TEST_OUT_DIR}/io_curve_svg" + --testdir "${TEST_SRC_DIR}/io_tests/svg/${render_test}" + --outdir "${TEST_OUT_DIR}/io_curve_svg" ) endforeach() @@ -714,10 +714,10 @@ if(WITH_CYCLES OR WITH_GPU_RENDER_TESTS) add_render_test( ${_cycles_test_name} ${CMAKE_CURRENT_LIST_DIR}/cycles_render_tests.py - -testdir "${TEST_SRC_DIR}/render/${render_test}" - -outdir "${TEST_OUT_DIR}/cycles" - -device ${_cycles_device} - -blocklist ${_cycles_blocklist} + --testdir "${TEST_SRC_DIR}/render/${render_test}" + --outdir "${TEST_OUT_DIR}/cycles" + --device ${_cycles_device} + --blocklist ${_cycles_blocklist} ) if(NOT ("${_cycles_device_lower}" STREQUAL "cpu")) set_tests_properties(${_cycles_test_name} PROPERTIES RUN_SERIAL TRUE) @@ -731,10 +731,10 @@ if(WITH_CYCLES OR WITH_GPU_RENDER_TESTS) add_render_test( ${_cycles_test_name}_osl ${CMAKE_CURRENT_LIST_DIR}/cycles_render_tests.py - -testdir "${TEST_SRC_DIR}/render/${render_test}" - -outdir "${TEST_OUT_DIR}/cycles_osl" - -device ${_cycles_device} - -osl + --testdir "${TEST_SRC_DIR}/render/${render_test}" + --outdir "${TEST_OUT_DIR}/cycles_osl" + --device ${_cycles_device} + --osl ) # Doesn't do anything until OptiX is enabled if(NOT ("${_cycles_device_lower}" STREQUAL "cpu")) @@ -764,8 +764,8 @@ if(WITH_CYCLES OR WITH_GPU_RENDER_TESTS) add_render_test( eevee_next_${render_test}_opengl ${CMAKE_CURRENT_LIST_DIR}/eevee_next_render_tests.py - -testdir "${TEST_SRC_DIR}/render/${render_test}" - -outdir "${TEST_OUT_DIR}/eevee_next" + --testdir "${TEST_SRC_DIR}/render/${render_test}" + --outdir "${TEST_OUT_DIR}/eevee_next" --gpu-backend opengl ${_gpu_render_tests_arguments} ) @@ -777,8 +777,8 @@ if(WITH_CYCLES OR WITH_GPU_RENDER_TESTS) add_render_test( eevee_next_${render_test}_metal ${CMAKE_CURRENT_LIST_DIR}/eevee_next_render_tests.py - -testdir "${TEST_SRC_DIR}/render/${render_test}" - -outdir "${TEST_OUT_DIR}/eevee_next" + --testdir "${TEST_SRC_DIR}/render/${render_test}" + --outdir "${TEST_OUT_DIR}/eevee_next" --gpu-backend metal ${_gpu_render_tests_arguments} ) @@ -790,8 +790,8 @@ if(WITH_CYCLES OR WITH_GPU_RENDER_TESTS) add_render_test( eevee_next_${render_test}_vulkan ${CMAKE_CURRENT_LIST_DIR}/eevee_next_render_tests.py - -testdir "${TEST_SRC_DIR}/render/${render_test}" - -outdir "${TEST_OUT_DIR}/eevee_next" + --testdir "${TEST_SRC_DIR}/render/${render_test}" + --outdir "${TEST_OUT_DIR}/eevee_next" --gpu-backend vulkan ${_gpu_render_tests_arguments} ) @@ -804,8 +804,8 @@ if(WITH_CYCLES OR WITH_GPU_RENDER_TESTS) add_render_test( workbench_${render_test}_opengl ${CMAKE_CURRENT_LIST_DIR}/workbench_render_tests.py - -testdir "${TEST_SRC_DIR}/render/${render_test}" - -outdir "${TEST_OUT_DIR}/workbench" + --testdir "${TEST_SRC_DIR}/render/${render_test}" + --outdir "${TEST_OUT_DIR}/workbench" --gpu-backend opengl ${_gpu_render_tests_arguments} ) @@ -817,8 +817,8 @@ if(WITH_CYCLES OR WITH_GPU_RENDER_TESTS) add_render_test( workbench_${render_test}_metal ${CMAKE_CURRENT_LIST_DIR}/workbench_render_tests.py - -testdir "${TEST_SRC_DIR}/render/${render_test}" - -outdir "${TEST_OUT_DIR}/workbench" + --testdir "${TEST_SRC_DIR}/render/${render_test}" + --outdir "${TEST_OUT_DIR}/workbench" --gpu-backend metal ${_gpu_render_tests_arguments} ) @@ -830,8 +830,8 @@ if(WITH_CYCLES OR WITH_GPU_RENDER_TESTS) add_render_test( workbench_${render_test}_vulkan ${CMAKE_CURRENT_LIST_DIR}/workbench_render_tests.py - -testdir "${TEST_SRC_DIR}/render/${render_test}" - -outdir "${TEST_OUT_DIR}/workbench" + --testdir "${TEST_SRC_DIR}/render/${render_test}" + --outdir "${TEST_OUT_DIR}/workbench" --gpu-backend vulkan ${_gpu_render_tests_arguments} ) @@ -844,8 +844,8 @@ if(WITH_CYCLES OR WITH_GPU_RENDER_TESTS) add_render_test( storm_hydra_${render_test} ${CMAKE_CURRENT_LIST_DIR}/storm_render_tests.py - -testdir "${TEST_SRC_DIR}/render/${render_test}" - -outdir "${TEST_OUT_DIR}/storm_hydra" + --testdir "${TEST_SRC_DIR}/render/${render_test}" + --outdir "${TEST_OUT_DIR}/storm_hydra" -export_method "HYDRA" ${_gpu_render_tests_arguments} ) @@ -855,8 +855,8 @@ if(WITH_CYCLES OR WITH_GPU_RENDER_TESTS) add_render_test( storm_usd_${render_test} ${CMAKE_CURRENT_LIST_DIR}/storm_render_tests.py - -testdir "${TEST_SRC_DIR}/render/${render_test}" - -outdir "${TEST_OUT_DIR}/storm_usd" + --testdir "${TEST_SRC_DIR}/render/${render_test}" + --outdir "${TEST_OUT_DIR}/storm_usd" -export_method "USD" ${_gpu_render_tests_arguments} ) @@ -890,8 +890,8 @@ if(WITH_COMPOSITOR_CPU) add_render_test( compositor_${comp_test}_cpu ${CMAKE_CURRENT_LIST_DIR}/compositor_cpu_render_tests.py - -testdir "${TEST_SRC_DIR}/compositor/${comp_test}" - -outdir "${TEST_OUT_DIR}/compositor_cpu" + --testdir "${TEST_SRC_DIR}/compositor/${comp_test}" + --outdir "${TEST_OUT_DIR}/compositor_cpu" ) endforeach() @@ -922,8 +922,8 @@ if(WITH_COMPOSITOR_REALTIME_TESTS AND WITH_COMPOSITOR_CPU) add_render_test( compositor_${comp_test}_realtime ${CMAKE_CURRENT_LIST_DIR}/compositor_realtime_render_tests.py - -testdir "${TEST_SRC_DIR}/compositor/${comp_test}" - -outdir "${TEST_OUT_DIR}/compositor_realtime" + --testdir "${TEST_SRC_DIR}/compositor/${comp_test}" + --outdir "${TEST_OUT_DIR}/compositor_realtime" ) endforeach() endif() @@ -1013,8 +1013,8 @@ if(WITH_GPU_DRAW_TESTS) add_render_test( opengl_draw_${child} ${CMAKE_CURRENT_LIST_DIR}/opengl_draw_tests.py - -testdir "${child_path}" - -outdir "${TEST_OUT_DIR}/opengl_draw" + --testdir "${child_path}" + --outdir "${TEST_OUT_DIR}/opengl_draw" ) endif() endif() @@ -1128,8 +1128,8 @@ else() add_render_test( sequencer_render_${render_test} ${CMAKE_CURRENT_LIST_DIR}/sequencer_render_tests.py - -testdir "${TEST_SRC_DIR}/sequence_editing/${render_test}" - -outdir "${TEST_OUT_DIR}/sequence_editing" + --testdir "${TEST_SRC_DIR}/sequence_editing/${render_test}" + --outdir "${TEST_OUT_DIR}/sequence_editing" ) endforeach() endif() diff --git a/tests/python/bl_io_curve_svg_test.py b/tests/python/bl_io_curve_svg_test.py index 461829db049..7f82c214034 100644 --- a/tests/python/bl_io_curve_svg_test.py +++ b/tests/python/bl_io_curve_svg_test.py @@ -31,11 +31,13 @@ def get_arguments(filepath, output_filepath): def create_argparse(): - parser = argparse.ArgumentParser() - parser.add_argument("-blender", nargs="+") - parser.add_argument("-testdir", nargs=1) - parser.add_argument("-outdir", nargs=1) - parser.add_argument("-oiiotool", nargs=1) + parser = argparse.ArgumentParser( + description="Run test script for each blend file in TESTDIR, comparing the render result with known output." + ) + parser.add_argument("--blender", required=True) + parser.add_argument("--testdir", required=True) + parser.add_argument("--outdir", required=True) + parser.add_argument("--oiiotool", required=True) parser.add_argument('--batch', default=False, action='store_true') return parser @@ -44,20 +46,15 @@ def main(): parser = create_argparse() args = parser.parse_args() - blender = args.blender[0] - test_dir = args.testdir[0] - oiiotool = args.oiiotool[0] - output_dir = args.outdir[0] - from modules import render_report - report = render_report.Report('IO Curve SVG', output_dir, oiiotool) + report = render_report.Report('IO Curve SVG', args.outdir, args.oiiotool) report.set_pixelated(True) - test_dir_name = Path(test_dir).name + test_dir_name = Path(args.testdir).name if test_dir_name == 'complex': report.set_fail_percent(0.01) - ok = report.run(test_dir, blender, get_arguments, batch=args.batch) + ok = report.run(args.testdir, args.blender, get_arguments, batch=args.batch) sys.exit(not ok) diff --git a/tests/python/compositor_cpu_render_tests.py b/tests/python/compositor_cpu_render_tests.py index ab5ca4ec03e..f457163a5fe 100644 --- a/tests/python/compositor_cpu_render_tests.py +++ b/tests/python/compositor_cpu_render_tests.py @@ -35,11 +35,13 @@ def get_arguments(filepath, output_filepath): def create_argparse(): - parser = argparse.ArgumentParser() - parser.add_argument("-blender", nargs="+") - parser.add_argument("-testdir", nargs=1) - parser.add_argument("-outdir", nargs=1) - parser.add_argument("-oiiotool", nargs=1) + parser = argparse.ArgumentParser( + description="Run test script for each blend file in TESTDIR, comparing the render result with known output." + ) + parser.add_argument("--blender", required=True) + parser.add_argument("--testdir", required=True) + parser.add_argument("--outdir", required=True) + parser.add_argument("--oiiotool", required=True) parser.add_argument('--batch', default=False, action='store_true') return parser @@ -48,20 +50,15 @@ def main(): parser = create_argparse() args = parser.parse_args() - blender = args.blender[0] - test_dir = args.testdir[0] - oiiotool = args.oiiotool[0] - output_dir = args.outdir[0] - from modules import render_report - report = render_report.Report("Compositor CPU", output_dir, oiiotool) + report = render_report.Report("Compositor CPU", args.outdir, args.oiiotool) report.set_pixelated(True) report.set_reference_dir("compositor_cpu_renders") - if os.path.basename(test_dir) == 'filter': + if os.path.basename(args.testdir) == 'filter': # Temporary change to pass OpenImageDenoise test with both 1.3 and 1.4. report.set_fail_threshold(0.05) - elif os.path.basename(test_dir) == 'matte': + elif os.path.basename(args.testdir) == 'matte': # The node_keying_matte.blend test is very sensitive to the exact values in the # input image. It makes it hard to precisely match results on different systems # (with and without SSE, i.e.), especially when OCIO has different precision for @@ -69,7 +66,7 @@ def main(): report.set_fail_threshold(0.06) report.set_fail_percent(2) - ok = report.run(test_dir, blender, get_arguments, batch=args.batch) + ok = report.run(args.testdir, args.blender, get_arguments, batch=args.batch) sys.exit(not ok) diff --git a/tests/python/compositor_realtime_render_tests.py b/tests/python/compositor_realtime_render_tests.py index 04e67e21bb1..773ff8e32a0 100644 --- a/tests/python/compositor_realtime_render_tests.py +++ b/tests/python/compositor_realtime_render_tests.py @@ -36,11 +36,13 @@ def get_arguments(filepath, output_filepath): def create_argparse(): - parser = argparse.ArgumentParser() - parser.add_argument("-blender", nargs="+") - parser.add_argument("-testdir", nargs=1) - parser.add_argument("-outdir", nargs=1) - parser.add_argument("-oiiotool", nargs=1) + parser = argparse.ArgumentParser( + description="Run test script for each blend file in TESTDIR, comparing the render result with known output." + ) + parser.add_argument("--blender", required=True) + parser.add_argument("--testdir", required=True) + parser.add_argument("--outdir", required=True) + parser.add_argument("--oiiotool", required=True) parser.add_argument('--batch', default=False, action='store_true') return parser @@ -49,16 +51,11 @@ def main(): parser = create_argparse() args = parser.parse_args() - blender = args.blender[0] - test_dir = args.testdir[0] - oiiotool = args.oiiotool[0] - output_dir = args.outdir[0] - from modules import render_report - report = render_report.Report("Compositor Realtime", output_dir, oiiotool) + report = render_report.Report("Compositor Realtime", args.outdir, args.oiiotool) report.set_reference_dir("compositor_realtime_renders") - ok = report.run(test_dir, blender, get_arguments, batch=args.batch) + ok = report.run(args.testdir, args.blender, get_arguments, batch=args.batch) sys.exit(not ok) diff --git a/tests/python/cycles_render_tests.py b/tests/python/cycles_render_tests.py index 9366dea4d50..912d3231a43 100644 --- a/tests/python/cycles_render_tests.py +++ b/tests/python/cycles_render_tests.py @@ -209,14 +209,16 @@ def get_arguments(filepath, output_filepath, use_hwrt=False, osl=False): def create_argparse(): - parser = argparse.ArgumentParser() - parser.add_argument("-blender", nargs="+") - parser.add_argument("-testdir", nargs=1) - parser.add_argument("-outdir", nargs=1) - parser.add_argument("-oiiotool", nargs=1) - parser.add_argument("-device", nargs=1) - parser.add_argument("-blocklist", nargs="*", default=[]) - parser.add_argument("-osl", default=False, action='store_true') + parser = argparse.ArgumentParser( + description="Run test script for each blend file in TESTDIR, comparing the render result with known output." + ) + parser.add_argument("--blender", required=True) + parser.add_argument("--testdir", required=True) + parser.add_argument("--outdir", required=True) + parser.add_argument("--oiiotool", required=True) + parser.add_argument("--device", required=True) + parser.add_argument("--blocklist", nargs="*", default=[]) + parser.add_argument("--osl", default=False, action='store_true') parser.add_argument('--batch', default=False, action='store_true') return parser @@ -225,11 +227,7 @@ def main(): parser = create_argparse() args = parser.parse_args() - blender = args.blender[0] - test_dir = args.testdir[0] - oiiotool = args.oiiotool[0] - output_dir = args.outdir[0] - device = args.device[0] + device = args.device blocklist = BLOCKLIST_ALL if device != 'CPU': @@ -245,7 +243,7 @@ def main(): if args.osl: blocklist += BLOCKLIST_OSL - report = CyclesReport('Cycles', output_dir, oiiotool, device, blocklist, args.osl) + report = CyclesReport('Cycles', args.outdir, args.oiiotool, device, blocklist, args.osl) report.set_pixelated(True) report.set_reference_dir("cycles_renders") if device == 'CPU': @@ -262,11 +260,11 @@ def main(): # Blackbody is slightly different between SVM and OSL. # Microfacet hair renders slightly differently, and fails on Windows and Linux with OSL - test_dir_name = Path(test_dir).name + test_dir_name = Path(args.testdir).name if (test_dir_name in {'motion_blur', 'integrator'}) or ((args.osl) and (test_dir_name in {'shader', 'hair'})): report.set_fail_threshold(0.032) - ok = report.run(test_dir, blender, get_arguments, batch=args.batch) + ok = report.run(args.testdir, args.blender, get_arguments, batch=args.batch) sys.exit(not ok) diff --git a/tests/python/eevee_next_render_tests.py b/tests/python/eevee_next_render_tests.py index 3b3cf4f100c..59b92d227ae 100644 --- a/tests/python/eevee_next_render_tests.py +++ b/tests/python/eevee_next_render_tests.py @@ -183,14 +183,16 @@ def get_arguments(filepath, output_filepath, gpu_backend): def create_argparse(): - parser = argparse.ArgumentParser() - parser.add_argument("-blender", nargs="+") - parser.add_argument("-testdir", nargs=1) - parser.add_argument("-outdir", nargs=1) - parser.add_argument("-oiiotool", nargs=1) + parser = argparse.ArgumentParser( + description="Run test script for each blend file in TESTDIR, comparing the render result with known output." + ) + parser.add_argument("--blender", required=True) + parser.add_argument("--testdir", required=True) + parser.add_argument("--outdir", required=True) + parser.add_argument("--oiiotool", required=True) parser.add_argument('--batch', default=False, action='store_true') parser.add_argument('--fail-silently', default=False, action='store_true') - parser.add_argument('--gpu-backend', nargs=1) + parser.add_argument('--gpu-backend') return parser @@ -198,19 +200,13 @@ def main(): parser = create_argparse() args = parser.parse_args() - blender = args.blender[0] - test_dir = args.testdir[0] - oiiotool = args.oiiotool[0] - output_dir = args.outdir[0] - gpu_backend = args.gpu_backend[0] - - gpu_device_type = get_gpu_device_type(blender) + gpu_device_type = get_gpu_device_type(args.blender) reference_override_dir = None if gpu_device_type == "AMD": reference_override_dir = "eevee_next_renders/amd" - report = EEVEEReport("Eevee Next", output_dir, oiiotool, device=gpu_backend, blocklist=BLOCKLIST) - if gpu_backend == "vulkan": + report = EEVEEReport("Eevee Next", args.outdir, args.oiiotool, device=args.gpu_backend, blocklist=BLOCKLIST) + if args.gpu_backend == "vulkan": report.set_compare_engine('eevee_next', 'opengl') else: report.set_compare_engine('cycles', 'CPU') @@ -219,7 +215,7 @@ def main(): report.set_reference_dir("eevee_next_renders") report.set_reference_override_dir(reference_override_dir) - test_dir_name = Path(test_dir).name + test_dir_name = Path(args.testdir).name if test_dir_name.startswith('image_mapping'): # Platform dependent border values. To be fixed report.set_fail_threshold(0.2) @@ -241,7 +237,7 @@ def main(): # points transparent report.set_fail_threshold(0.06) - ok = report.run(test_dir, blender, get_arguments, batch=args.batch, fail_silently=args.fail_silently) + ok = report.run(args.testdir, args.blender, get_arguments, batch=args.batch, fail_silently=args.fail_silently) sys.exit(not ok) diff --git a/tests/python/opengl_draw_tests.py b/tests/python/opengl_draw_tests.py index 7e4414bf4d4..13bb0f781be 100644 --- a/tests/python/opengl_draw_tests.py +++ b/tests/python/opengl_draw_tests.py @@ -50,11 +50,13 @@ def get_arguments(filepath, output_filepath): def create_argparse(): - parser = argparse.ArgumentParser() - parser.add_argument("-blender", nargs="+") - parser.add_argument("-testdir", nargs=1) - parser.add_argument("-outdir", nargs=1) - parser.add_argument("-oiiotool", nargs=1) + parser = argparse.ArgumentParser( + description="Run test script for each blend file in TESTDIR, comparing the render result with known output." + ) + parser.add_argument("--blender", required=True) + parser.add_argument("--testdir", required=True) + parser.add_argument("--outdir", required=True) + parser.add_argument("--oiiotool", required=True) return parser @@ -62,14 +64,9 @@ def main(): parser = create_argparse() args = parser.parse_args() - blender = args.blender[0] - test_dir = args.testdir[0] - oiiotool = args.oiiotool[0] - output_dir = args.outdir[0] - from modules import render_report - report = render_report.Report("OpenGL Draw", output_dir, oiiotool) - ok = report.run(test_dir, blender, get_arguments) + report = render_report.Report("OpenGL Draw", args.outdir, args.oiiotool) + ok = report.run(args.testdir, args.blender, get_arguments) sys.exit(not ok) diff --git a/tests/python/sequencer_render_tests.py b/tests/python/sequencer_render_tests.py index 0f713619baa..ff5e65957fa 100644 --- a/tests/python/sequencer_render_tests.py +++ b/tests/python/sequencer_render_tests.py @@ -28,11 +28,13 @@ def get_arguments(filepath, output_filepath): def create_argparse(): - parser = argparse.ArgumentParser() - parser.add_argument("-blender", nargs="+") - parser.add_argument("-testdir", nargs=1) - parser.add_argument("-outdir", nargs=1) - parser.add_argument("-oiiotool", nargs=1) + parser = argparse.ArgumentParser( + description="Run test script for each blend file in TESTDIR, comparing the render result with known output." + ) + parser.add_argument("--blender", required=True) + parser.add_argument("--testdir", required=True) + parser.add_argument("--outdir", required=True) + parser.add_argument("--oiiotool", required=True) parser.add_argument("--batch", default=False, action="store_true") return parser @@ -41,21 +43,15 @@ def main(): parser = create_argparse() args = parser.parse_args() - blender = args.blender[0] - test_dir = args.testdir[0] - oiiotool = args.oiiotool[0] - output_dir = args.outdir[0] - from modules import render_report - report = render_report.Report("Sequencer", output_dir, oiiotool) + report = render_report.Report("Sequencer", args.outdir, args.oiiotool) report.set_pixelated(True) # Default error tolerances are quite large, lower them. report.set_fail_threshold(2.0 / 255.0) report.set_fail_percent(0.01) report.set_reference_dir("reference") - test_dir_name = Path(test_dir).name - ok = report.run(test_dir, blender, get_arguments, batch=args.batch) + ok = report.run(args.testdir, args.blender, get_arguments, batch=args.batch) sys.exit(not ok) diff --git a/tests/python/storm_render_tests.py b/tests/python/storm_render_tests.py index a058268adee..6847cf9c55c 100644 --- a/tests/python/storm_render_tests.py +++ b/tests/python/storm_render_tests.py @@ -51,12 +51,14 @@ def get_arguments(filepath, output_filepath): def create_argparse(): - parser = argparse.ArgumentParser() - parser.add_argument("-blender", nargs="+") - parser.add_argument("-testdir", nargs=1) - parser.add_argument("-outdir", nargs=1) - parser.add_argument("-oiiotool", nargs=1) - parser.add_argument("-export_method", nargs=1) + parser = argparse.ArgumentParser( + description="Run test script for each blend file in TESTDIR, comparing the render result with known output." + ) + parser.add_argument("--blender", required=True) + parser.add_argument("--testdir", required=True) + parser.add_argument("--outdir", required=True) + parser.add_argument("--oiiotool", required=True) + parser.add_argument("--export_method", required=True) parser.add_argument('--batch', default=False, action='store_true') parser.add_argument('--fail-silently', default=False, action='store_true') return parser @@ -66,30 +68,24 @@ def main(): parser = create_argparse() args = parser.parse_args() - blender = args.blender[0] - test_dir = args.testdir[0] - oiiotool = args.oiiotool[0] - output_dir = args.outdir[0] - export_method = args.export_method[0] - from modules import render_report - if export_method == 'HYDRA': - report = render_report.Report("Storm Hydra", output_dir, oiiotool) + if args.export_method == 'HYDRA': + report = render_report.Report("Storm Hydra", args.outdir, args.oiiotool) report.set_reference_dir("storm_hydra_renders") report.set_compare_engine('cycles', 'CPU') else: - report = render_report.Report("Storm USD", output_dir, oiiotool) + report = render_report.Report("Storm USD", args.outdir, args.oiiotool) report.set_reference_dir("storm_usd_renders") report.set_compare_engine('storm_hydra') report.set_pixelated(True) - test_dir_name = Path(test_dir).name + test_dir_name = Path(args.testdir).name - os.environ['BLENDER_HYDRA_EXPORT_METHOD'] = export_method + os.environ['BLENDER_HYDRA_EXPORT_METHOD'] = args.export_method - ok = report.run(test_dir, blender, get_arguments, batch=args.batch, fail_silently=args.fail_silently) + ok = report.run(args.testdir, args.blender, get_arguments, batch=args.batch, fail_silently=args.fail_silently) sys.exit(not ok) diff --git a/tests/python/workbench_render_tests.py b/tests/python/workbench_render_tests.py index 197ec36cd11..3538fd67439 100644 --- a/tests/python/workbench_render_tests.py +++ b/tests/python/workbench_render_tests.py @@ -74,14 +74,16 @@ def get_arguments(filepath, output_filepath, gpu_backend): def create_argparse(): - parser = argparse.ArgumentParser() - parser.add_argument("-blender", nargs="+") - parser.add_argument("-testdir", nargs=1) - parser.add_argument("-outdir", nargs=1) - parser.add_argument("-oiiotool", nargs=1) + parser = argparse.ArgumentParser( + description="Run test script for each blend file in TESTDIR, comparing the render result with known output." + ) + parser.add_argument("--blender", required=True) + parser.add_argument("--testdir", required=True) + parser.add_argument("--outdir", required=True) + parser.add_argument("--oiiotool", required=True) parser.add_argument('--batch', default=False, action='store_true') parser.add_argument('--fail-silently', default=False, action='store_true') - parser.add_argument('--gpu-backend', nargs=1) + parser.add_argument('--gpu-backend') return parser @@ -89,26 +91,19 @@ def main(): parser = create_argparse() args = parser.parse_args() - blender = args.blender[0] - test_dir = args.testdir[0] - oiiotool = args.oiiotool[0] - output_dir = args.outdir[0] - gpu_backend = args.gpu_backend[0] - - from modules import render_report - report = WorkbenchReport("Workbench", output_dir, oiiotool, device=gpu_backend) - if gpu_backend == "vulkan": + report = WorkbenchReport("Workbench", args.outdir, args.oiiotool, device=args.gpu_backend) + if args.gpu_backend == "vulkan": report.set_compare_engine('workbench', 'opengl') else: report.set_compare_engine('eevee_next', 'opengl') report.set_pixelated(True) report.set_reference_dir("workbench_renders") - test_dir_name = Path(test_dir).name + test_dir_name = Path(args.testdir).name if test_dir_name.startswith('hair') and platform.system() == "Darwin": report.set_fail_threshold(0.050) - ok = report.run(test_dir, blender, get_arguments, batch=args.batch, fail_silently=args.fail_silently) + ok = report.run(args.testdir, args.blender, get_arguments, batch=args.batch, fail_silently=args.fail_silently) sys.exit(not ok)