Cleanup: update use of typing in for Python scripts
This commit is contained in:
@@ -7,7 +7,6 @@ import json
|
||||
import pathlib
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Dict, List
|
||||
|
||||
from .test import TestCollection
|
||||
|
||||
@@ -25,7 +24,7 @@ class TestEntry:
|
||||
category: str = ''
|
||||
revision: str = ''
|
||||
git_hash: str = ''
|
||||
environment: Dict = field(default_factory=dict)
|
||||
environment: dict = field(default_factory=dict)
|
||||
executable: str = ''
|
||||
date: int = 0
|
||||
device_type: str = 'CPU'
|
||||
@@ -33,10 +32,10 @@ class TestEntry:
|
||||
device_name: str = 'Unknown CPU'
|
||||
status: str = 'queued'
|
||||
error_msg: str = ''
|
||||
output: Dict = field(default_factory=dict)
|
||||
output: dict = field(default_factory=dict)
|
||||
benchmark_type: str = 'comparison'
|
||||
|
||||
def to_json(self) -> Dict:
|
||||
def to_json(self) -> dict:
|
||||
json_dict = {}
|
||||
for field in self.__dataclass_fields__:
|
||||
json_dict[field] = getattr(self, field)
|
||||
@@ -65,7 +64,7 @@ class TestQueue:
|
||||
entry.from_json(json_entry)
|
||||
self.entries.append(entry)
|
||||
|
||||
def rows(self, use_revision_columns: bool) -> List:
|
||||
def rows(self, use_revision_columns: bool) -> list:
|
||||
# Generate rows of entries for printing and running.
|
||||
entries = sorted(
|
||||
self.entries,
|
||||
@@ -92,7 +91,7 @@ class TestQueue:
|
||||
|
||||
return [value for _, value in sorted(rows.items())]
|
||||
|
||||
def find(self, revision: str, test: str, category: str, device_id: str) -> Dict:
|
||||
def find(self, revision: str, test: str, category: str, device_id: str) -> dict:
|
||||
for entry in self.entries:
|
||||
if entry.revision == revision and \
|
||||
entry.test == test and \
|
||||
@@ -133,7 +132,7 @@ class TestConfig:
|
||||
|
||||
self._update_queue(env)
|
||||
|
||||
def revision_names(self) -> List:
|
||||
def revision_names(self) -> list:
|
||||
return sorted(list(self.revisions.keys()) + list(self.builds.keys()))
|
||||
|
||||
def device_name(self, device_id: str) -> str:
|
||||
@@ -162,7 +161,7 @@ class TestConfig:
|
||||
f.write(default_config)
|
||||
|
||||
@staticmethod
|
||||
def read_blender_executables(env, name) -> List:
|
||||
def read_blender_executables(env, name) -> list:
|
||||
config = TestConfig._read_config_module(env.base_dir / name)
|
||||
builds = getattr(config, 'builds', {})
|
||||
executables = []
|
||||
@@ -182,7 +181,7 @@ class TestConfig:
|
||||
spec.loader.exec_module(mod)
|
||||
return mod
|
||||
|
||||
def _update_devices(self, env, device_filters: List) -> None:
|
||||
def _update_devices(self, env, device_filters: list) -> None:
|
||||
# Find devices matching the filters.
|
||||
need_gpus = device_filters != ['CPU']
|
||||
machine = env.get_machine(need_gpus)
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
|
||||
import platform
|
||||
import subprocess
|
||||
from typing import List
|
||||
|
||||
|
||||
def get_cpu_name() -> str:
|
||||
@@ -23,7 +22,7 @@ def get_cpu_name() -> str:
|
||||
return "Unknown CPU"
|
||||
|
||||
|
||||
def get_gpu_device(args: None) -> List:
|
||||
def get_gpu_device(args: None) -> list:
|
||||
# Get the list of available Cycles GPU devices.
|
||||
import bpy
|
||||
|
||||
|
||||
@@ -12,7 +12,10 @@ import platform
|
||||
import pickle
|
||||
import subprocess
|
||||
import sys
|
||||
from typing import Callable, Dict, List
|
||||
|
||||
from collections.abc import (
|
||||
Callable,
|
||||
)
|
||||
|
||||
from .config import TestConfig
|
||||
from .device import TestMachine
|
||||
@@ -121,7 +124,7 @@ class TestEnvironment:
|
||||
self._init_default_blender_executable()
|
||||
return True
|
||||
|
||||
def set_blender_executable(self, executable_path: pathlib.Path, environment: Dict = {}) -> None:
|
||||
def set_blender_executable(self, executable_path: pathlib.Path, environment: dict = {}) -> None:
|
||||
if executable_path.is_dir():
|
||||
executable_path = self._blender_executable_from_path(executable_path)
|
||||
|
||||
@@ -183,7 +186,7 @@ class TestEnvironment:
|
||||
def unset_log_file(self) -> None:
|
||||
self.log_file = None
|
||||
|
||||
def call(self, args: List[str], cwd: pathlib.Path, silent: bool = False, environment: Dict = {}) -> List[str]:
|
||||
def call(self, args: list[str], cwd: pathlib.Path, silent: bool = False, environment: dict = {}) -> list[str]:
|
||||
# Execute command with arguments in specified directory,
|
||||
# and return combined stdout and stderr output.
|
||||
|
||||
@@ -224,7 +227,7 @@ class TestEnvironment:
|
||||
|
||||
return lines
|
||||
|
||||
def call_blender(self, args: List[str], foreground=False) -> List[str]:
|
||||
def call_blender(self, args: list[str], foreground=False) -> list[str]:
|
||||
# Execute Blender command with arguments.
|
||||
common_args = ['--factory-startup', '-noaudio', '--enable-autoexec', '--python-exit-code', '1']
|
||||
if foreground:
|
||||
@@ -236,10 +239,10 @@ class TestEnvironment:
|
||||
environment=self.blender_executable_environment)
|
||||
|
||||
def run_in_blender(self,
|
||||
function: Callable[[Dict], Dict],
|
||||
args: Dict,
|
||||
blender_args: List = [],
|
||||
foreground=False) -> Dict:
|
||||
function: Callable[[dict], dict],
|
||||
args: dict,
|
||||
blender_args: list = [],
|
||||
foreground=False) -> dict:
|
||||
# Run function in a Blender instance. Arguments and return values are
|
||||
# passed as a Python object that must be serializable with pickle.
|
||||
|
||||
@@ -272,7 +275,7 @@ class TestEnvironment:
|
||||
|
||||
return {}, lines
|
||||
|
||||
def find_blend_files(self, dirpath: pathlib.Path) -> List:
|
||||
def find_blend_files(self, dirpath: pathlib.Path) -> list:
|
||||
# Find .blend files in subdirectories of the given directory in the
|
||||
# lib/benchmarks directory.
|
||||
dirpath = self.benchmarks_dir / dirpath
|
||||
@@ -281,7 +284,7 @@ class TestEnvironment:
|
||||
filepaths.append(pathlib.Path(filename))
|
||||
return filepaths
|
||||
|
||||
def get_config_names(self) -> List:
|
||||
def get_config_names(self) -> list:
|
||||
names = []
|
||||
|
||||
if self.base_dir.exists():
|
||||
@@ -292,7 +295,7 @@ class TestEnvironment:
|
||||
|
||||
return names
|
||||
|
||||
def get_configs(self, name: str = None, names_only: bool = False) -> List:
|
||||
def get_configs(self, name: str = None, names_only: bool = False) -> list:
|
||||
# Get list of configurations in the benchmarks directory.
|
||||
configs = []
|
||||
|
||||
|
||||
@@ -6,11 +6,10 @@ from . import TestQueue
|
||||
|
||||
import json
|
||||
import pathlib
|
||||
from typing import Dict, List
|
||||
|
||||
|
||||
class TestGraph:
|
||||
def __init__(self, json_filepaths: List[pathlib.Path]):
|
||||
def __init__(self, json_filepaths: list[pathlib.Path]):
|
||||
# Initialize graph from JSON file. Note that this is implemented without
|
||||
# accessing any benchmark environment or configuration. This ways benchmarks
|
||||
# run on various machines can be aggregated and the graph generated on another
|
||||
@@ -61,7 +60,7 @@ class TestGraph:
|
||||
|
||||
self.json = json.dumps(data, indent=2)
|
||||
|
||||
def chart(self, device_name: str, chart_name: str, entries: List, chart_type: str, output: str) -> Dict:
|
||||
def chart(self, device_name: str, chart_name: str, entries: list, chart_type: str, output: str) -> dict:
|
||||
# Gather used tests.
|
||||
tests = {}
|
||||
for entry in entries:
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
|
||||
import abc
|
||||
import fnmatch
|
||||
from typing import Dict, List
|
||||
|
||||
|
||||
class Test:
|
||||
@@ -33,14 +32,14 @@ class Test:
|
||||
return True
|
||||
|
||||
@abc.abstractmethod
|
||||
def run(self, env, device_id: str) -> Dict:
|
||||
def run(self, env, device_id: str) -> dict:
|
||||
"""
|
||||
Execute the test and report results.
|
||||
"""
|
||||
|
||||
|
||||
class TestCollection:
|
||||
def __init__(self, env, names_filter: List = ['*'], categories_filter: List = ['*'], background: bool = False):
|
||||
def __init__(self, env, names_filter: list = ['*'], categories_filter: list = ['*'], background: bool = False):
|
||||
import importlib
|
||||
import pkgutil
|
||||
import tests
|
||||
|
||||
@@ -10,7 +10,6 @@ import glob
|
||||
import pathlib
|
||||
import shutil
|
||||
import sys
|
||||
from typing import List
|
||||
|
||||
|
||||
def find_blender_git_dir() -> pathlib.Path:
|
||||
@@ -49,7 +48,7 @@ def print_header(config: api.TestConfig) -> None:
|
||||
print(header)
|
||||
|
||||
|
||||
def print_row(config: api.TestConfig, entries: List, end='\n') -> None:
|
||||
def print_row(config: api.TestConfig, entries: list, end='\n') -> None:
|
||||
# Print one or more test entries on a row.
|
||||
row = ""
|
||||
|
||||
@@ -99,7 +98,7 @@ def match_entry(entry: api.TestEntry, args: argparse.Namespace):
|
||||
|
||||
def run_entry(env: api.TestEnvironment,
|
||||
config: api.TestConfig,
|
||||
row: List,
|
||||
row: list,
|
||||
entry: api.TestEntry,
|
||||
update_only: bool):
|
||||
updated = False
|
||||
@@ -185,7 +184,7 @@ def run_entry(env: api.TestEnvironment,
|
||||
return updated, failed
|
||||
|
||||
|
||||
def cmd_init(env: api.TestEnvironment, argv: List):
|
||||
def cmd_init(env: api.TestEnvironment, argv: list):
|
||||
# Initialize benchmarks folder.
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--build', default=False, action='store_true')
|
||||
@@ -195,7 +194,7 @@ def cmd_init(env: api.TestEnvironment, argv: List):
|
||||
env.unset_log_file()
|
||||
|
||||
|
||||
def cmd_list(env: api.TestEnvironment, argv: List) -> None:
|
||||
def cmd_list(env: api.TestEnvironment, argv: list) -> None:
|
||||
# List devices, tests and configurations.
|
||||
print('DEVICES')
|
||||
machine = env.get_machine()
|
||||
@@ -216,7 +215,7 @@ def cmd_list(env: api.TestEnvironment, argv: List) -> None:
|
||||
print(config_name)
|
||||
|
||||
|
||||
def cmd_status(env: api.TestEnvironment, argv: List):
|
||||
def cmd_status(env: api.TestEnvironment, argv: list):
|
||||
# Print status of tests in configurations.
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('config', nargs='?', default=None)
|
||||
@@ -239,7 +238,7 @@ def cmd_status(env: api.TestEnvironment, argv: List):
|
||||
print_row(config, row)
|
||||
|
||||
|
||||
def cmd_reset(env: api.TestEnvironment, argv: List):
|
||||
def cmd_reset(env: api.TestEnvironment, argv: list):
|
||||
# Reset tests to re-run them.
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('config', nargs='?', default=None)
|
||||
@@ -262,7 +261,7 @@ def cmd_reset(env: api.TestEnvironment, argv: List):
|
||||
shutil.rmtree(config.logs_dir)
|
||||
|
||||
|
||||
def cmd_run(env: api.TestEnvironment, argv: List, update_only: bool):
|
||||
def cmd_run(env: api.TestEnvironment, argv: list, update_only: bool):
|
||||
# Run tests.
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('config', nargs='?', default=None)
|
||||
@@ -309,7 +308,7 @@ def cmd_run(env: api.TestEnvironment, argv: List, update_only: bool):
|
||||
sys.exit(exit_code)
|
||||
|
||||
|
||||
def cmd_graph(argv: List):
|
||||
def cmd_graph(argv: list):
|
||||
# Create graph from a given JSON results file.
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('json_file', nargs='+')
|
||||
|
||||
Reference in New Issue
Block a user