diff --git a/tests/performance/api/__init__.py b/tests/performance/api/__init__.py index a74c97fb295..1a54655e925 100644 --- a/tests/performance/api/__init__.py +++ b/tests/performance/api/__init__.py @@ -2,7 +2,7 @@ # # SPDX-License-Identifier: Apache-2.0 -from .environment import TestEnvironment +from .environment import TestFailure, TestEnvironment from .device import TestDevice, TestMachine from .config import TestEntry, TestQueue, TestConfig from .test import Test, TestCollection diff --git a/tests/performance/api/config.py b/tests/performance/api/config.py index bc407a3ca8b..cd663316adf 100644 --- a/tests/performance/api/config.py +++ b/tests/performance/api/config.py @@ -31,7 +31,10 @@ class TestEntry: device_id: str = 'CPU' device_name: str = 'Unknown CPU' status: str = 'queued' + # Short, single-line error. error_msg: str = '' + # More detailed error info, potentially multi-lines. + exception_msg: str = '' output: dict = field(default_factory=dict) benchmark_type: str = 'comparison' diff --git a/tests/performance/api/environment.py b/tests/performance/api/environment.py index a9f8d6f8529..7165f218161 100644 --- a/tests/performance/api/environment.py +++ b/tests/performance/api/environment.py @@ -21,6 +21,20 @@ from .config import TestConfig from .device import TestMachine +class TestFailure(Exception): + def __init__(self, *args, message, output_lines=[], **kwargs): + super().__init__(message, *args) + self.message = message + self.output_lines = output_lines + + def __str__(self): + msg = self.message + if self.output_lines: + msg += f":\n{'': <10} | " + msg += f"\n{'': <10} | ".join(l.rstrip(' \r\n\t') for l in self.output_lines) + return msg + + class TestEnvironment: def __init__(self, blender_git_dir: pathlib.Path, base_dir: pathlib.Path): self.blender_git_dir = blender_git_dir @@ -223,7 +237,7 @@ class TestEnvironment: # Raise error on failure if proc.returncode != 0 and not silent: - raise Exception("Error executing command") + raise TestFailure(message="Error executing command", output_lines=lines) return lines diff --git a/tests/performance/benchmark.py b/tests/performance/benchmark.py index e6d5c953371..fd52e0ec060 100755 --- a/tests/performance/benchmark.py +++ b/tests/performance/benchmark.py @@ -92,6 +92,19 @@ def print_row(config: api.TestConfig, entries: list, end='\n') -> None: print(row, end=end, flush=True) +def print_entry(config: api.TestConfig, entry: api.TestEntry) -> None: + # Print a single test entry, potentially on multiple lines, with more details than in `print_row`. + # NOTE: Currently only used to print detailed error info. + + print_row(config, [entry]) + + if entry.status != 'failed': + return + if not entry.exception_msg: + return + print(entry.exception_msg, flush=True) + + def match_entry(entry: api.TestEntry, args: argparse.Namespace): # Filter tests by name and category. return ( @@ -176,7 +189,8 @@ def run_entry(env: api.TestEnvironment, except Exception as e: failed = True entry.status = 'failed' - entry.error_msg = str(e) + entry.error_msg = 'Failed to run' + entry.exception_msg = str(e) print_row(config, row, end='\r') @@ -293,6 +307,7 @@ def cmd_run(env: api.TestEnvironment, argv: list, update_only: bool): config.queue.write() if test_failed: exit_code = 1 + print_entry(config, entry) except KeyboardInterrupt as e: cancel = True break