From ba3749ad47f91fd41a4559e4b391e053ba3a573a Mon Sep 17 00:00:00 2001 From: Miguel Pozo Date: Thu, 20 Feb 2025 17:18:59 +0100 Subject: [PATCH] Overlay: Add tests This adds support for Overlay tests. There are some differences with how we handle tests for other engines: - The renders are captured using `bpy.ops.render.opengl()`, but this won't work on our GPU build bots. - A single blend file can run multiple tests by outputting a txt list with the test names. - Each overlay test blend file requires a matching script file with the same name inside `tests/python/overlay/`. - To reproduce a specific test state you can run `blender "(...)/tests/data/overlay/.blend" -P "(...)/tests/python/overlay/.py" -- --test `. Note: The current test permutations are WIP, so reference images are not committed to the data repo for now. Pull Request: https://projects.blender.org/blender/blender/pulls/133879 --- CMakeLists.txt | 7 + tests/data | 2 +- tests/python/CMakeLists.txt | 36 ++++ tests/python/modules/render_report.py | 167 +++++++++-------- tests/python/overlay/modes-gen.py | 19 ++ tests/python/overlay/overlay_common.py | 246 +++++++++++++++++++++++++ tests/python/overlay_render_tests.py | 94 ++++++++++ 7 files changed, 495 insertions(+), 76 deletions(-) mode change 100755 => 100644 tests/python/modules/render_report.py create mode 100644 tests/python/overlay/modes-gen.py create mode 100644 tests/python/overlay/overlay_common.py create mode 100644 tests/python/overlay_render_tests.py diff --git a/CMakeLists.txt b/CMakeLists.txt index 63d16b245a1..0c3328507ef 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -827,6 +827,13 @@ Run GPU render tests silently (finished tests will pass). \ Generated report will show failing tests" ON ) +option(WITH_GPU_RENDER_TESTS_HEADED "\ +Enable GPU render related unit testing that doesn't work in background mode. \ +These tests require an environment with a display server to run. \ +Requires WITH_GPU_RENDER_TESTS " + OFF +) +mark_as_advanced(WITH_GPU_RENDER_TESTS_HEADED) option(WITH_GPU_BACKEND_TESTS "\ Enable GPU backend related unit testing" OFF diff --git a/tests/data b/tests/data index 6a67e8b51c4..cc6c8227910 160000 --- a/tests/data +++ b/tests/data @@ -1 +1 @@ -Subproject commit 6a67e8b51c445c27838f94d4fddc0dd8ba7e07ab +Subproject commit cc6c82279104eedf7720b1bdaa0c9fd82dc5c61e diff --git a/tests/python/CMakeLists.txt b/tests/python/CMakeLists.txt index 026ac71b6cc..db00c1fb6cb 100644 --- a/tests/python/CMakeLists.txt +++ b/tests/python/CMakeLists.txt @@ -764,6 +764,42 @@ if(WITH_CYCLES OR WITH_GPU_RENDER_TESTS) endforeach() endif() + # Overlay + if(WITH_GPU_RENDER_TESTS_HEADED) + if(WITH_OPENGL_BACKEND) + add_render_test( + overlay_opengl + ${CMAKE_CURRENT_LIST_DIR}/overlay_render_tests.py + --testdir "${TEST_SRC_DIR}/overlay" + --outdir "${TEST_OUT_DIR}/overlay" + --gpu-backend opengl + ${_gpu_render_tests_arguments} + ) + endif() + + if(WITH_METAL_BACKEND) + add_render_test( + overlay_metal + ${CMAKE_CURRENT_LIST_DIR}/overlay_render_tests.py + --testdir "${TEST_SRC_DIR}/overlay" + --outdir "${TEST_OUT_DIR}/overlay" + --gpu-backend metal + ${_gpu_render_tests_arguments} + ) + endif() + + if(WITH_VULKAN_BACKEND AND WITH_GPU_RENDER_TESTS_VULKAN) + add_render_test( + overlay_vulkan + ${CMAKE_CURRENT_LIST_DIR}/overlay_render_tests.py + --testdir "${TEST_SRC_DIR}/overlay" + --outdir "${TEST_OUT_DIR}/overlay" + --gpu-backend vulkan + ${_gpu_render_tests_arguments} + ) + endif() + endif() + if(WITH_HYDRA) # Hydra Storm foreach(render_test ${gpu_render_tests}) diff --git a/tests/python/modules/render_report.py b/tests/python/modules/render_report.py old mode 100755 new mode 100644 index 86fe76b4bc3..a2b3b50ea3b --- a/tests/python/modules/render_report.py +++ b/tests/python/modules/render_report.py @@ -42,8 +42,7 @@ def test_get_name(filepath): return os.path.splitext(filename)[0] -def test_get_images(output_dir, filepath, reference_dir, reference_override_dir): - testname = test_get_name(filepath) +def test_get_images(output_dir, filepath, testname, reference_dir, reference_override_dir): dirpath = os.path.dirname(filepath) old_dirpath = os.path.join(dirpath, reference_dir) @@ -73,6 +72,17 @@ def test_get_images(output_dir, filepath, reference_dir, reference_override_dir) return old_img, ref_img, new_img, diff_color_img, diff_alpha_img +class TestResult: + def __init__(self, report, filepath, name): + self.filepath = filepath + self.name = name + self.error = None + self.tmp_out_img_base = os.path.join(report.output_dir, "tmp_" + name) + self.tmp_out_img = self.tmp_out_img_base + '0001.png' + self.old_img, self.ref_img, self.new_img, self.diff_color_img, self.diff_alpha_img = test_get_images( + report.output_dir, filepath, name, report.reference_dir, report.reference_override_dir) + + class Report: __slots__ = ( 'title', @@ -329,31 +339,27 @@ class Report: relpath = os.path.relpath(filepath, self.output_dir) return pathlib.Path(relpath).as_posix() - def _write_test_html(self, testname, filepath, error): - name = test_get_name(filepath) - name = name.replace('_', ' ') + def _write_test_html(self, test_category, test_result): + name = test_result.name.replace('_', ' ') - old_img, ref_img, new_img, diff_color_img, diff_alpha_img = test_get_images( - self.output_dir, filepath, self.reference_dir, self.reference_override_dir) + status = test_result.error if test_result.error else "" + tr_style = """ class="table-danger" """ if test_result.error else "" - status = error if error else "" - tr_style = """ class="table-danger" """ if error else "" - - new_url = self._relative_url(new_img) - ref_url = self._relative_url(ref_img) - diff_color_url = self._relative_url(diff_color_img) - diff_alpha_url = self._relative_url(diff_alpha_img) + new_url = self._relative_url(test_result.new_img) + ref_url = self._relative_url(test_result.ref_img) + diff_color_url = self._relative_url(test_result.diff_color_img) + diff_alpha_url = self._relative_url(test_result.diff_alpha_img) test_html = f""" - {name}
{testname}
{status} + {name}
{test_category}
{status} """ - if error: + if test_result.error: self.failed_tests += test_html else: self.passed_tests += test_html @@ -369,33 +375,30 @@ class Report: """ . format(tr_style=tr_style, name=name, - testname=testname, + testname=test_result.name, status=status, new_url=new_url, ref_url=ref_url) self.compare_tests += test_html - def _diff_output(self, filepath, tmp_filepath): - old_img, ref_img, new_img, diff_color_img, diff_alpha_img = test_get_images( - self.output_dir, filepath, self.reference_dir, self.reference_override_dir) - + def _diff_output(self, test): # Create reference render directory. - old_dirpath = os.path.dirname(old_img) + old_dirpath = os.path.dirname(test.old_img) os.makedirs(old_dirpath, exist_ok=True) # Copy temporary to new image. - if os.path.exists(new_img): - os.remove(new_img) - if os.path.exists(tmp_filepath): - shutil.copy(tmp_filepath, new_img) + if os.path.exists(test.new_img): + os.remove(test.new_img) + if os.path.exists(test.tmp_out_img): + shutil.copy(test.tmp_out_img, test.new_img) - if os.path.exists(ref_img): + if os.path.exists(test.ref_img): # Diff images test with threshold. command = ( self.oiiotool, - ref_img, - tmp_filepath, + test.ref_img, + test.tmp_out_img, "--fail", str(self.fail_threshold), "--failpercent", str(self.fail_percent), "--diff", @@ -415,21 +418,21 @@ class Report: if failed and self.update: # Update reference image if requested. - shutil.copy(new_img, ref_img) - shutil.copy(new_img, old_img) + shutil.copy(test.new_img, test.ref_img) + shutil.copy(test.new_img, test.old_img) failed = False # Generate color diff image. command = ( self.oiiotool, - ref_img, + test.ref_img, "--ch", "R,G,B", - tmp_filepath, + test.tmp_out_img, "--ch", "R,G,B", "--sub", "--abs", "--mulc", "16", - "-o", diff_color_img, + "-o", test.diff_color_img, ) try: subprocess.check_output(command, stderr=subprocess.STDOUT) @@ -440,14 +443,14 @@ class Report: # Generate alpha diff image. command = ( self.oiiotool, - ref_img, + test.ref_img, "--ch", "A", - tmp_filepath, + test.tmp_out_img, "--ch", "A", "--sub", "--abs", "--mulc", "16", - "-o", diff_alpha_img, + "-o", test.diff_alpha_img, ) try: subprocess.check_output(command, stderr=subprocess.STDOUT) @@ -475,27 +478,36 @@ class Report: # Each render test is supposed to override this method. return [] + def _get_filepath_tests(self, filepath): + list_filepath = filepath.replace('.blend', '_permutations.txt') + if os.path.exists(list_filepath): + with open(list_filepath, 'r') as file: + return [TestResult(self, filepath, testname.rstrip('\n')) for testname in file] + else: + testname = test_get_name(filepath) + return [TestResult(self, filepath, testname)] + def _run_tests(self, filepaths, blender, arguments_cb, batch): # Run multiple tests in a single Blender process since startup can be # a significant factor. In case of crashes, re-run the remaining tests. verbose = os.environ.get("BLENDER_VERBOSE") is not None remaining_filepaths = filepaths[:] - errors = [] + test_results = [] while len(remaining_filepaths) > 0: command = [blender] - output_filepaths = [] + running_tests = [] # Construct output filepaths and command to run for filepath in remaining_filepaths: + running_tests.append(filepath) + testname = test_get_name(filepath) print_message(testname, 'SUCCESS', 'RUN') base_output_filepath = os.path.join(self.output_dir, "tmp_" + testname) output_filepath = base_output_filepath + '0001.png' - output_filepaths.append(output_filepath) - if os.path.exists(output_filepath): os.remove(output_filepath) @@ -524,35 +536,41 @@ class Report: print(output.decode("utf-8", 'ignore')) # Detect missing filepaths and consider those errors - for filepath, output_filepath in zip(remaining_filepaths[:], output_filepaths): + for filepath in running_tests: remaining_filepaths.pop(0) + file_crashed = False - if crash: - # In case of crash, stop after missing files and re-render remaining - if not os.path.exists(output_filepath): - errors.append("CRASH") - print_message("Crash running Blender") - print_message(testname, 'FAILURE', 'FAILED') - break + for test in self._get_filepath_tests(filepath): + if crash: + # In case of crash, stop after missing files and re-render remaining + if not os.path.exists(test.tmp_out_img): + test.error = "CRASH" + print_message("Crash running Blender") + print_message(test.name, 'FAILURE', 'FAILED') + file_crashed = True + break - testname = test_get_name(filepath) + if not os.path.exists(test.tmp_out_img) or os.path.getsize(test.tmp_out_img) == 0: + test.error = "NO OUTPUT" + print_message("No render result file found") + print_message(test.tmp_out_img, 'FAILURE', 'FAILED') + elif not self._diff_output(test): + test.error = "VERIFY" + print_message("Render result is different from reference image") + print_message(test.name, 'FAILURE', 'FAILED') + else: + test.error = None + print_message(test.name, 'SUCCESS', 'OK') - if not os.path.exists(output_filepath) or os.path.getsize(output_filepath) == 0: - errors.append("NO OUTPUT") - print_message("No render result file found") - print_message(testname, 'FAILURE', 'FAILED') - elif not self._diff_output(filepath, output_filepath): - errors.append("VERIFY") - print_message("Render result is different from reference image") - print_message(testname, 'FAILURE', 'FAILED') - else: - errors.append(None) - print_message(testname, 'SUCCESS', 'OK') + if os.path.exists(test.tmp_out_img): + os.remove(test.tmp_out_img) - if os.path.exists(output_filepath): - os.remove(output_filepath) + test_results.append(test) - return errors + if file_crashed: + break + + return test_results def _run_all_tests(self, dirname, dirpath, blender, arguments_cb, batch, fail_silently): passed_tests = [] @@ -568,22 +586,21 @@ class Report: format(len(all_files)), 'SUCCESS', "==========") time_start = time.time() - errors = self._run_tests(all_files, blender, arguments_cb, batch) - for filepath, error in zip(all_files, errors): - testname = test_get_name(filepath) - if error: - if error == "NO_ENGINE": + test_results = self._run_tests(all_files, blender, arguments_cb, batch) + for test in test_results: + if test.error: + if test.error == "NO_ENGINE": return False - elif error == "NO_START": + elif test.error == "NO_START": return False - if fail_silently and error != 'CRASH': - silently_failed_tests.append(testname) + if fail_silently and test.error != 'CRASH': + silently_failed_tests.append(test.name) else: - failed_tests.append(testname) + failed_tests.append(test.name) else: - passed_tests.append(testname) - self._write_test_html(dirname, filepath, error) + passed_tests.append(test.name) + self._write_test_html(dirname, test) time_end = time.time() elapsed_ms = int((time_end - time_start) * 1000) print_message("") diff --git a/tests/python/overlay/modes-gen.py b/tests/python/overlay/modes-gen.py new file mode 100644 index 00000000000..d0d724a54eb --- /dev/null +++ b/tests/python/overlay/modes-gen.py @@ -0,0 +1,19 @@ +# SPDX-FileCopyrightText: 2025 Blender Authors +# +# SPDX-License-Identifier: Apache-2.0 + +import os +import sys +import bpy + +sys.path.append(os.path.dirname(__file__)) +import overlay_common + +bpy.context.window.workspace = bpy.data.workspaces['Test'] + +ob = bpy.context.active_object +space = bpy.data.screens["Default"].areas[0].spaces[0] + +permutations = overlay_common.ob_modes_permutations(ob, space) + +overlay_common.run_test(permutations) diff --git a/tests/python/overlay/overlay_common.py b/tests/python/overlay/overlay_common.py new file mode 100644 index 00000000000..23cc52c7d24 --- /dev/null +++ b/tests/python/overlay/overlay_common.py @@ -0,0 +1,246 @@ +# SPDX-FileCopyrightText: 2025 Blender Authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import os +from pathlib import Path +import argparse + +"""Common functionality for Overlay render tests. + +The intended usage is setting up a Permutations instance containing all the variants/permutations for a given blend file, +then passing it to the `run_test(permutations)` function. + +The `run_test` function also checks for a `--test` argument in `sys.argv`. +When set, it will reproduce the state of a given test number. +So: +`blender "(...)/tests/data/overlay/.blend" -P "(...)/tests/python/overlay/.py" -- --test ` +will open the blend file and set its state to the permutation, instead of running the tests. + +Common permutations can also be declared inside this file. +See `ob_modes_permutations` for an example on how to setup permutations. +""" + + +class Permutations: + """Container class for the permutations of a given test file. + + Can be combined with other Permutations. + """ + + def __init__(self, reset_key=None, variants_dict={}): + """Setup the initial set of permutations. + + :reset_key: str - The variants_dict key that resets the test to its default state. + :variants_dict: {str: lambda} + - Where the key is the name of the state variation (for example "xray-on" or "xray-off") + and the lambda sets the blend to that state. + """ + reset = [] + if reset_key: + reset = [variants_dict[reset_key]] + self.dict = {k: Permutation([v], reset) for k, v in variants_dict.items()} + + def add(self, key_filter_cb=None, permutations_array=[]): + """Combine two sets of permutations. + + Replaces the current permutations with every possible combination of current and incoming permutations, + unless key_filter_cb is set and returns False for a given key, in which case they're kept as-is. + + :key_filter_cb: lambda (key: str): :bool: + - A callback for deciding which keys the permutations should be applied to. + :permutations: [Permutations] + - An array of Permutations instances. Each Permutations instance is applied sequentially, + so a.add(None, [b]); a.add(None, [c]) is equivalent to a.add(None, [b, c]) + """ + for permutations in permutations_array: + dict_copy = self.dict.copy() + self.dict = {} + for key, permutation in dict_copy.items(): + if key_filter_cb and not key_filter_cb(key): + self.dict[key] = permutation + continue + for key_in, permutation_in in permutations.dict.items(): + self.dict[f"{key}_{key_in}"] = permutation.combine(permutation_in) + + def loop(self): + """Loop through each permutation, applying its state before yielding its key.""" + for key, permutation in self.dict.items(): + permutation.apply() + yield key + permutation.reset() + + +class Permutation: + """A single test permutation. + + A permutation containing all the callbacks needed for setting up its state (_apply), + and for resetting the test back to its default state (_reset). + + This class is meant to be used internally by the Permutations class. + """ + + def __init__(self, apply, reset): + self._apply = apply + self._reset = reset + + def combine(self, other): + return Permutation(self._apply + other._apply, self._reset + other._reset) + + def apply(self): + for cb in self._apply: + cb() + + def reset(self): + for cb in self._reset: + cb() + + +def set_permutation_from_args(permutations): + """If the command line requested a specific permutation, set the blend state to it.""" + import sys + if "--" not in sys.argv: + return False + parser = argparse.ArgumentParser() + parser.add_argument("--test", type=int, default=0, required=True) + args = parser.parse_args(sys.argv[sys.argv.index("--") + 1:]) + if args.test == 0: + return False + else: + key = list(permutations.dict.keys())[args.test] + print(f"Set test permutation {args.test}: {key}") + permutations.dict[key].apply() + return True + + +def render_permutations(permutations): + """Render each permutation and generate a list, following the conventions expected by render_report.py.""" + base_output_path = bpy.context.scene.render.filepath + base_testname = Path(bpy.data.filepath).stem + output_paths = [] + permutation_index = 0 + for key in permutations.loop(): + permutation_index += 1 + testname = f"{permutation_index:04d}_{key}" + filepath = f"{base_output_path}_{testname}0001.png" + testpath = f"{base_testname}_{testname}" + output_paths.append(testpath) + bpy.context.scene.render.filepath = filepath + bpy.ops.render.opengl(write_still=True, view_context=True) + + output_list_txt = bpy.data.filepath.replace(".blend", "_permutations.txt") + with open(output_list_txt, 'w') as file: + file.write("\n".join(output_paths)) + + +def run_test(permutations): + """Check if the command line requested a specific permutation, otherwise run all tests and quit Blender.""" + if set_permutation_from_args(permutations): + return + + def run(): + render_permutations(permutations) + bpy.ops.wm.quit_blender() + + bpy.app.timers.register(run, first_interval=1) + + +def ob_modes_permutations(ob, space): + """Returns permutations for every possible object mode and overlay settings.""" + shading = space.shading + overlay = space.overlay + + ob_modes = Permutations(None, { + "object": lambda: bpy.ops.object.mode_set(mode='OBJECT'), + "edit": lambda: bpy.ops.object.mode_set(mode='EDIT'), + "sculpt": lambda: bpy.ops.object.mode_set(mode='SCULPT'), + "vertex-paint": lambda: bpy.ops.object.mode_set(mode='VERTEX_PAINT'), + "weight-paint": lambda: bpy.ops.object.mode_set(mode='WEIGHT_PAINT'), + "texture-paint": lambda: bpy.ops.object.mode_set(mode='TEXTURE_PAINT'), + }) + + ob_modes.add(lambda key: "sculpt" in key, [ + Permutations("mask-off", { + "mask-off": lambda: setattr(overlay, "show_sculpt_mask", False), + "mask-on": lambda: setattr(overlay, "show_sculpt_mask", True), + }), + Permutations("sets-off", { + "sets-off": lambda: setattr(overlay, "show_sculpt_face_sets", False), + "sets-on": lambda: setattr(overlay, "show_sculpt_face_sets", True), + }), + ]) + + ob_modes.add(lambda key: "paint" in key, [ + Permutations("mask-off", { + "mask-off": lambda: ( + setattr(ob.data, "use_paint_mask", False), setattr(ob.data, "use_paint_mask_vertex", False)), + "mask-face": lambda: ( + setattr(ob.data, "use_paint_mask", True), setattr(ob.data, "use_paint_mask_vertex", False)), + "mask-vert": lambda: ( + setattr(ob.data, "use_paint_mask", False), setattr(ob.data, "use_paint_mask_vertex", True)), + }), + Permutations("paint-wire-off", { + "paint-wire-off": lambda: setattr(overlay, "show_paint_wire", False), + "paint-wire-on": lambda: setattr(overlay, "show_paint_wire", True), + }), + ]) + + ob_modes.add(lambda key: "weight-paint" in key, [ + Permutations("w-contours-off", { + "w-contours-off": lambda: setattr(overlay, "show_wpaint_contours", False), + "w-contours-on": lambda: setattr(overlay, "show_wpaint_contours", True), + }) + ]) + + # TODO: Edit mode variants. + + shading_modes = Permutations("solid", { + "solid": lambda: setattr(shading, "type", 'SOLID'), + "wireframe": lambda: setattr(shading, "type", 'WIREFRAME'), + }) + + shading_modes.add(lambda key: key == "solid", [ + Permutations("xray-off", { + "xray-off": lambda: ( + setattr(shading, "show_xray", False)), + "xray-on": lambda: ( + setattr(shading, "show_xray", True), setattr(shading, "xray_alpha", 0.5)), + "xray-on-alpha-1": lambda: ( + setattr(shading, "show_xray", True), setattr(shading, "xray_alpha", 1.0)), + }) + ]) + + shading_modes.add(lambda key: key == "wireframe", [ + Permutations("xray-off", { + "xray-off": lambda: ( + setattr(shading, "show_xray_wireframe", False)), + "xray-on": lambda: ( + setattr(shading, "show_xray_wireframe", True), setattr(shading, "xray_alpha_wireframe", 0.5)), + "xray-on-alpha-1": lambda: ( + setattr(shading, "show_xray_wireframe", True), setattr(shading, "xray_alpha_wireframe", 1.0)), + }) + ]) + + shading_modes.add(lambda key: "solid" in key, [ + Permutations("ob-solid", { + "ob-solid": lambda: setattr(ob, "display_type", 'SOLID'), + "ob-wire": lambda: setattr(ob, "display_type", 'WIRE'), + }) + ]) + + shading_modes.add(lambda key: "ob-solid" in key, [ + Permutations("ob-wire-off", { + "ob-wire-off": lambda: setattr(ob, "show_wire", False), + "ob-wire-on": lambda: setattr(ob, "show_wire", True), + }) + ]) + + in_front_modes = Permutations("in-front-off", { + "in-front-off": lambda: setattr(ob, "show_in_front", False), + "in-front-on": lambda: setattr(ob, "show_in_front", True), + }) + + ob_modes.add(None, [shading_modes, in_front_modes]) + + return ob_modes diff --git a/tests/python/overlay_render_tests.py b/tests/python/overlay_render_tests.py new file mode 100644 index 00000000000..863f390a886 --- /dev/null +++ b/tests/python/overlay_render_tests.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python3 +# SPDX-FileCopyrightText: 2025 Blender Authors +# +# SPDX-License-Identifier: Apache-2.0 + +import argparse +import importlib.util +import os +import platform +import subprocess +import sys +from pathlib import Path +import sys + +from modules import render_report + + +class OverlayReport(render_report.Report): + def __init__(self, title, output_dir, oiiotool, variation=None, blocklist=[]): + super().__init__(title, output_dir, oiiotool, variation=variation, blocklist=blocklist) + self.gpu_backend = variation + + def _get_render_arguments(self, arguments_cb, filepath, base_output_filepath): + return arguments_cb(filepath, base_output_filepath, gpu_backend=self.gpu_backend) + + +def get_arguments(filepath, output_filepath, gpu_backend): + arguments = [ + "--no-window-focus", + "--window-geometry", + "0", "0", "128", "128", + "-noaudio", + "--factory-startup", + "--enable-autoexec", + "--debug-memory", + "--debug-exit-on-error"] + + if gpu_backend: + arguments.extend(["--gpu-backend", gpu_backend]) + + # Windows separators get messed up when passing them inside the python expression + output_filepath = output_filepath.replace("\\", "/") + + script_name = Path(filepath).stem + ".py" + current_dir = os.path.dirname(os.path.realpath(__file__)) + script_filepath = os.path.join(current_dir, "overlay", script_name) + + arguments.extend([ + filepath, + "--python-expr", + f'import bpy; bpy.context.scene.render.filepath = "{output_filepath}"', + "-P", + script_filepath]) + + return arguments + + +def create_argparse(): + parser = argparse.ArgumentParser( + description="Run test script for each blend file in TESTDIR, comparing the render result with known output." + ) + parser.add_argument("--blender", required=True) + parser.add_argument("--testdir", required=True) + parser.add_argument("--outdir", required=True) + parser.add_argument("--oiiotool", required=True) + parser.add_argument('--batch', default=False, action='store_true') + parser.add_argument('--fail-silently', default=False, action='store_true') + parser.add_argument('--gpu-backend') + return parser + + +def main(): + parser = create_argparse() + args = parser.parse_args() + + report = OverlayReport("Overlay", args.outdir, args.oiiotool, variation=args.gpu_backend) + if args.gpu_backend == "vulkan": + report.set_compare_engine('overlay', 'opengl') + else: + report.set_compare_engine('workbench', 'opengl') + report.set_pixelated(True) + report.set_reference_dir("overlay_renders") + + test_dir_name = Path(args.testdir).name + if test_dir_name.startswith('hair') and platform.system() == "Darwin": + report.set_fail_threshold(0.050) + + ok = report.run(args.testdir, args.blender, get_arguments, batch=args.batch, fail_silently=args.fail_silently) + + sys.exit(not ok) + + +if __name__ == "__main__": + main()