Benchmark Tests: improve error reporting.
When a benchmark test was failing, there was very little info available to investigate it. Now report the stdout/stderr generated by the failing command.
This commit is contained in:
@@ -2,7 +2,7 @@
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from .environment import TestEnvironment
|
||||
from .environment import TestFailure, TestEnvironment
|
||||
from .device import TestDevice, TestMachine
|
||||
from .config import TestEntry, TestQueue, TestConfig
|
||||
from .test import Test, TestCollection
|
||||
|
||||
@@ -31,7 +31,10 @@ class TestEntry:
|
||||
device_id: str = 'CPU'
|
||||
device_name: str = 'Unknown CPU'
|
||||
status: str = 'queued'
|
||||
# Short, single-line error.
|
||||
error_msg: str = ''
|
||||
# More detailed error info, potentially multi-lines.
|
||||
exception_msg: str = ''
|
||||
output: dict = field(default_factory=dict)
|
||||
benchmark_type: str = 'comparison'
|
||||
|
||||
|
||||
@@ -21,6 +21,20 @@ from .config import TestConfig
|
||||
from .device import TestMachine
|
||||
|
||||
|
||||
class TestFailure(Exception):
|
||||
def __init__(self, *args, message, output_lines=[], **kwargs):
|
||||
super().__init__(message, *args)
|
||||
self.message = message
|
||||
self.output_lines = output_lines
|
||||
|
||||
def __str__(self):
|
||||
msg = self.message
|
||||
if self.output_lines:
|
||||
msg += f":\n{'': <10} | "
|
||||
msg += f"\n{'': <10} | ".join(l.rstrip(' \r\n\t') for l in self.output_lines)
|
||||
return msg
|
||||
|
||||
|
||||
class TestEnvironment:
|
||||
def __init__(self, blender_git_dir: pathlib.Path, base_dir: pathlib.Path):
|
||||
self.blender_git_dir = blender_git_dir
|
||||
@@ -223,7 +237,7 @@ class TestEnvironment:
|
||||
|
||||
# Raise error on failure
|
||||
if proc.returncode != 0 and not silent:
|
||||
raise Exception("Error executing command")
|
||||
raise TestFailure(message="Error executing command", output_lines=lines)
|
||||
|
||||
return lines
|
||||
|
||||
|
||||
@@ -92,6 +92,19 @@ def print_row(config: api.TestConfig, entries: list, end='\n') -> None:
|
||||
print(row, end=end, flush=True)
|
||||
|
||||
|
||||
def print_entry(config: api.TestConfig, entry: api.TestEntry) -> None:
|
||||
# Print a single test entry, potentially on multiple lines, with more details than in `print_row`.
|
||||
# NOTE: Currently only used to print detailed error info.
|
||||
|
||||
print_row(config, [entry])
|
||||
|
||||
if entry.status != 'failed':
|
||||
return
|
||||
if not entry.exception_msg:
|
||||
return
|
||||
print(entry.exception_msg, flush=True)
|
||||
|
||||
|
||||
def match_entry(entry: api.TestEntry, args: argparse.Namespace):
|
||||
# Filter tests by name and category.
|
||||
return (
|
||||
@@ -176,7 +189,8 @@ def run_entry(env: api.TestEnvironment,
|
||||
except Exception as e:
|
||||
failed = True
|
||||
entry.status = 'failed'
|
||||
entry.error_msg = str(e)
|
||||
entry.error_msg = 'Failed to run'
|
||||
entry.exception_msg = str(e)
|
||||
|
||||
print_row(config, row, end='\r')
|
||||
|
||||
@@ -293,6 +307,7 @@ def cmd_run(env: api.TestEnvironment, argv: list, update_only: bool):
|
||||
config.queue.write()
|
||||
if test_failed:
|
||||
exit_code = 1
|
||||
print_entry(config, entry)
|
||||
except KeyboardInterrupt as e:
|
||||
cancel = True
|
||||
break
|
||||
|
||||
Reference in New Issue
Block a user