diff --git a/tests/performance/api/config.py b/tests/performance/api/config.py index 132c3db03db..ce113074a4c 100644 --- a/tests/performance/api/config.py +++ b/tests/performance/api/config.py @@ -121,7 +121,8 @@ class TestConfig: config = TestConfig._read_config_module(self.base_dir) self.tests = TestCollection(env, getattr(config, 'tests', ['*']), - getattr(config, 'categories', ['*'])) + getattr(config, 'categories', ['*']), + getattr(config, 'background', False)) self.revisions = getattr(config, 'revisions', {}) self.builds = getattr(config, 'builds', {}) self.queue = TestQueue(self.base_dir / 'results.json') @@ -243,6 +244,9 @@ class TestConfig: test_category = test.category() for device in self.devices: + if not (test.use_device() or device.type == "CPU"): + continue + entry = self.queue.find(revision_name, test_name, test_category, device.id) if entry: # Test if revision hash or executable changed. diff --git a/tests/performance/api/test.py b/tests/performance/api/test.py index dfffe4b6c7a..8c08d3158a7 100644 --- a/tests/performance/api/test.py +++ b/tests/performance/api/test.py @@ -26,6 +26,12 @@ class Test: """ return False + def use_background(self) -> bool: + """ + Test runs in background mode and requires no display. + """ + return True + @abc.abstractmethod def run(self, env, device_id: str) -> Dict: """ @@ -34,7 +40,7 @@ class Test: class TestCollection: - def __init__(self, env, names_filter: List = ['*'], categories_filter: List = ['*']): + def __init__(self, env, names_filter: List = ['*'], categories_filter: List = ['*'], background: bool = False): import importlib import pkgutil import tests @@ -48,6 +54,9 @@ class TestCollection: tests = module.generate(env) for test in tests: + if background and not test.use_background(): + continue + test_category = test.category() found = False for category_filter in categories_filter: diff --git a/tests/performance/benchmark.py b/tests/performance/benchmark.py index 08ce9fa0db2..e2fac612afc 100755 --- a/tests/performance/benchmark.py +++ b/tests/performance/benchmark.py @@ -61,7 +61,10 @@ def print_row(config: api.TestConfig, entries: List, end='\n') -> None: row += f"{revision: <15} " if config.queue.has_multiple_categories: - row += f"{entries[0].category: <15} " + category_name = entries[0].category + if entries[0].device_type != "CPU": + category_name += " " + entries[0].device_type + row += f"{category_name: <15} " row += f"{entries[0].test: <40} " for entry in entries: @@ -99,10 +102,13 @@ def run_entry(env: api.TestEnvironment, row: List, entry: api.TestEntry, update_only: bool): + updated = False + failed = False + # Check if entry needs to be run. if update_only and entry.status not in ('queued', 'outdated'): print_row(config, row, end='\r') - return False + return updated, failed # Run test entry. revision = entry.revision @@ -115,7 +121,9 @@ def run_entry(env: api.TestEnvironment, test = config.tests.find(testname, testcategory) if not test: - return False + return updated, failed + + updated = True # Log all output to dedicated log file. logname = testcategory + '_' + testname + '_' + revision @@ -128,12 +136,13 @@ def run_entry(env: api.TestEnvironment, entry.error_msg = '' # Build revision, or just set path to existing executable. - entry.status = 'building' - print_row(config, row, end='\r') executable_ok = True if len(entry.executable): env.set_blender_executable(pathlib.Path(entry.executable), environment) else: + entry.status = 'building' + print_row(config, row, end='\r') + if config.benchmark_type == "comparison": install_dir = config.builds_dir / revision else: @@ -143,6 +152,7 @@ def run_entry(env: api.TestEnvironment, if not executable_ok: entry.status = 'failed' entry.error_msg = 'Failed to build' + failed = True else: env.set_blender_executable(install_dir, environment) @@ -159,6 +169,7 @@ def run_entry(env: api.TestEnvironment, except KeyboardInterrupt as e: raise e except Exception as e: + failed = True entry.status = 'failed' entry.error_msg = str(e) @@ -171,7 +182,7 @@ def run_entry(env: api.TestEnvironment, env.unset_log_file() env.set_default_blender_executable() - return True + return updated, failed def cmd_init(env: api.TestEnvironment, argv: List): @@ -258,6 +269,8 @@ def cmd_run(env: api.TestEnvironment, argv: List, update_only: bool): parser.add_argument('test', nargs='?', default='*') args = parser.parse_args(argv) + exit_code = 0 + configs = env.get_configs(args.config) for config in configs: updated = False @@ -267,11 +280,14 @@ def cmd_run(env: api.TestEnvironment, argv: List, update_only: bool): if match_entry(row[0], args): for entry in row: try: - if run_entry(env, config, row, entry, update_only): + test_updated, test_failed = run_entry(env, config, row, entry, update_only) + if test_updated: updated = True # Write queue every time in case running gets interrupted, # so it can be resumed. config.queue.write() + if test_failed: + exit_code = 1 except KeyboardInterrupt as e: cancel = True break @@ -290,6 +306,8 @@ def cmd_run(env: api.TestEnvironment, argv: List, update_only: bool): print("\nfile://" + str(html_filepath)) + sys.exit(exit_code) + def cmd_graph(argv: List): # Create graph from a given JSON results file. diff --git a/tests/performance/tests/eevee.py b/tests/performance/tests/eevee.py index bc233decb07..e1e969d45b8 100644 --- a/tests/performance/tests/eevee.py +++ b/tests/performance/tests/eevee.py @@ -114,6 +114,9 @@ else: def category(self): return "eevee" + def use_background(self): + return False + def run(self, env, device_id): args = {} _, log = env.run_in_blender(_run, args, [self.filepath], foreground=True)