Cleanup: update use of typing in for Python scripts

This commit is contained in:
Campbell Barton
2024-10-23 12:48:09 +11:00
parent 39b9863cca
commit a0453ab87a
40 changed files with 287 additions and 367 deletions

View File

@@ -27,11 +27,9 @@ __all__ = (
"init",
)
from typing import (
Generator,
)
from collections.abc import (
Callable,
Iterator,
)
@@ -85,7 +83,7 @@ def init(cmake_path: str) -> bool:
def source_list(
path: str,
filename_check: Callable[[str], bool] | None = None,
) -> Generator[str, None, None]:
) -> Iterator[str]:
for dirpath, dirnames, filenames in os.walk(path):
# skip '.git'
dirnames[:] = [d for d in dirnames if not d.startswith(".")]

View File

@@ -22,11 +22,11 @@ import subprocess
from typing import (
Any,
Generator,
IO,
)
from collections.abc import (
Callable,
Iterator,
Sequence,
)
@@ -56,7 +56,7 @@ def is_c_any(filename: str) -> bool:
CMAKE_DIR = "."
def cmake_cache_var_iter() -> Generator[tuple[str, str, str], None, None]:
def cmake_cache_var_iter() -> Iterator[tuple[str, str, str]]:
import re
re_cache = re.compile(r'([A-Za-z0-9_\-]+)?:?([A-Za-z0-9_\-]+)?=(.*)$')
with open(join(CMAKE_DIR, "CMakeCache.txt"), 'r', encoding='utf-8') as cache_file:

View File

@@ -29,10 +29,8 @@ import string
import setuptools
import sys
from typing import (
Generator,
)
from collections.abc import (
Iterator,
Sequence,
)
@@ -89,7 +87,7 @@ def find_dominating_file(
# ------------------------------------------------------------------------------
# CMake Cache Access
def cmake_cache_var_iter(filepath_cmake_cache: str) -> Generator[tuple[str, str, str], None, None]:
def cmake_cache_var_iter(filepath_cmake_cache: str) -> Iterator[tuple[str, str, str]]:
re_cache = re.compile(r"([A-Za-z0-9_\-]+)?:?([A-Za-z0-9_\-]+)?=(.*)$")
with open(filepath_cmake_cache, "r", encoding="utf-8") as cache_file:
for l in cache_file:

View File

@@ -19,7 +19,6 @@ import subprocess
import time
from typing import (
Dict,
TextIO,
)
@@ -32,7 +31,7 @@ def man_format(data: str) -> str:
return data
def blender_extract_info(blender_bin: str) -> Dict[str, str]:
def blender_extract_info(blender_bin: str) -> dict[str, str]:
blender_env = {
"ASAN_OPTIONS": (
os.environ.get("ASAN_OPTIONS", "") +

View File

@@ -8,16 +8,13 @@ import os
import subprocess
import sys
from typing import (
Dict,
Generator,
Optional,
from collections.abc import (
Iterator,
Sequence,
Tuple,
)
def run(cmd: Sequence[str], *, env: Optional[Dict[str, str]] = None) -> None:
def run(cmd: Sequence[str], *, env: dict[str, str] | None = None) -> None:
print(" ", " ".join(cmd))
subprocess.check_call(cmd, env=env)
@@ -61,7 +58,7 @@ icons_blend = (
)
def names_and_time_from_path(path: str) -> Generator[Tuple[str, float], None, None]:
def names_and_time_from_path(path: str) -> Iterator[tuple[str, float]]:
for entry in os.scandir(path):
name = entry.name
if name.endswith(".dat"):

View File

@@ -975,7 +975,7 @@ def _extension_compat_cache_update_needed(
def _extension_compat_cache_create(
blender_id, # `tuple[Any, ...]`
extensions_enabled, # `set[tuple[str, str]]`
wheel_list, # `list[tuple[str, List[str]]]`
wheel_list, # `list[tuple[str, list[str]]]`
print_debug, # `Callable[[Any], None] | None`
): # `-> dict[str, Any]`
import os
@@ -1311,7 +1311,7 @@ def _fake_module_from_extension(mod_name, mod_path):
def _extension_sync_wheels(
*,
local_dir, # `str`
wheel_list, # `List[WheelSource]`
wheel_list, # `list[WheelSource]`
debug, # `bool`
): # `-> None`
import os

View File

@@ -14,18 +14,16 @@ import bpy
from bpy.types import Action
from dataclasses import dataclass
from typing import (
List,
from collections.abc import (
Mapping,
Sequence,
Tuple,
)
from rna_prop_ui import (
rna_idprop_value_to_python,
)
FCurveKey = Tuple[
FCurveKey = tuple[
# `fcurve.data_path`.
str,
# `fcurve.array_index`.
@@ -33,7 +31,7 @@ FCurveKey = Tuple[
]
# List of `[frame0, value0, frame1, value1, ...]` pairs.
ListKeyframes = List[float]
ListKeyframes = list[float]
@dataclass

View File

@@ -7,7 +7,6 @@ import json
import pathlib
from dataclasses import dataclass, field
from typing import Dict, List
from .test import TestCollection
@@ -25,7 +24,7 @@ class TestEntry:
category: str = ''
revision: str = ''
git_hash: str = ''
environment: Dict = field(default_factory=dict)
environment: dict = field(default_factory=dict)
executable: str = ''
date: int = 0
device_type: str = 'CPU'
@@ -33,10 +32,10 @@ class TestEntry:
device_name: str = 'Unknown CPU'
status: str = 'queued'
error_msg: str = ''
output: Dict = field(default_factory=dict)
output: dict = field(default_factory=dict)
benchmark_type: str = 'comparison'
def to_json(self) -> Dict:
def to_json(self) -> dict:
json_dict = {}
for field in self.__dataclass_fields__:
json_dict[field] = getattr(self, field)
@@ -65,7 +64,7 @@ class TestQueue:
entry.from_json(json_entry)
self.entries.append(entry)
def rows(self, use_revision_columns: bool) -> List:
def rows(self, use_revision_columns: bool) -> list:
# Generate rows of entries for printing and running.
entries = sorted(
self.entries,
@@ -92,7 +91,7 @@ class TestQueue:
return [value for _, value in sorted(rows.items())]
def find(self, revision: str, test: str, category: str, device_id: str) -> Dict:
def find(self, revision: str, test: str, category: str, device_id: str) -> dict:
for entry in self.entries:
if entry.revision == revision and \
entry.test == test and \
@@ -133,7 +132,7 @@ class TestConfig:
self._update_queue(env)
def revision_names(self) -> List:
def revision_names(self) -> list:
return sorted(list(self.revisions.keys()) + list(self.builds.keys()))
def device_name(self, device_id: str) -> str:
@@ -162,7 +161,7 @@ class TestConfig:
f.write(default_config)
@staticmethod
def read_blender_executables(env, name) -> List:
def read_blender_executables(env, name) -> list:
config = TestConfig._read_config_module(env.base_dir / name)
builds = getattr(config, 'builds', {})
executables = []
@@ -182,7 +181,7 @@ class TestConfig:
spec.loader.exec_module(mod)
return mod
def _update_devices(self, env, device_filters: List) -> None:
def _update_devices(self, env, device_filters: list) -> None:
# Find devices matching the filters.
need_gpus = device_filters != ['CPU']
machine = env.get_machine(need_gpus)

View File

@@ -4,7 +4,6 @@
import platform
import subprocess
from typing import List
def get_cpu_name() -> str:
@@ -23,7 +22,7 @@ def get_cpu_name() -> str:
return "Unknown CPU"
def get_gpu_device(args: None) -> List:
def get_gpu_device(args: None) -> list:
# Get the list of available Cycles GPU devices.
import bpy

View File

@@ -12,7 +12,10 @@ import platform
import pickle
import subprocess
import sys
from typing import Callable, Dict, List
from collections.abc import (
Callable,
)
from .config import TestConfig
from .device import TestMachine
@@ -121,7 +124,7 @@ class TestEnvironment:
self._init_default_blender_executable()
return True
def set_blender_executable(self, executable_path: pathlib.Path, environment: Dict = {}) -> None:
def set_blender_executable(self, executable_path: pathlib.Path, environment: dict = {}) -> None:
if executable_path.is_dir():
executable_path = self._blender_executable_from_path(executable_path)
@@ -183,7 +186,7 @@ class TestEnvironment:
def unset_log_file(self) -> None:
self.log_file = None
def call(self, args: List[str], cwd: pathlib.Path, silent: bool = False, environment: Dict = {}) -> List[str]:
def call(self, args: list[str], cwd: pathlib.Path, silent: bool = False, environment: dict = {}) -> list[str]:
# Execute command with arguments in specified directory,
# and return combined stdout and stderr output.
@@ -224,7 +227,7 @@ class TestEnvironment:
return lines
def call_blender(self, args: List[str], foreground=False) -> List[str]:
def call_blender(self, args: list[str], foreground=False) -> list[str]:
# Execute Blender command with arguments.
common_args = ['--factory-startup', '-noaudio', '--enable-autoexec', '--python-exit-code', '1']
if foreground:
@@ -236,10 +239,10 @@ class TestEnvironment:
environment=self.blender_executable_environment)
def run_in_blender(self,
function: Callable[[Dict], Dict],
args: Dict,
blender_args: List = [],
foreground=False) -> Dict:
function: Callable[[dict], dict],
args: dict,
blender_args: list = [],
foreground=False) -> dict:
# Run function in a Blender instance. Arguments and return values are
# passed as a Python object that must be serializable with pickle.
@@ -272,7 +275,7 @@ class TestEnvironment:
return {}, lines
def find_blend_files(self, dirpath: pathlib.Path) -> List:
def find_blend_files(self, dirpath: pathlib.Path) -> list:
# Find .blend files in subdirectories of the given directory in the
# lib/benchmarks directory.
dirpath = self.benchmarks_dir / dirpath
@@ -281,7 +284,7 @@ class TestEnvironment:
filepaths.append(pathlib.Path(filename))
return filepaths
def get_config_names(self) -> List:
def get_config_names(self) -> list:
names = []
if self.base_dir.exists():
@@ -292,7 +295,7 @@ class TestEnvironment:
return names
def get_configs(self, name: str = None, names_only: bool = False) -> List:
def get_configs(self, name: str = None, names_only: bool = False) -> list:
# Get list of configurations in the benchmarks directory.
configs = []

View File

@@ -6,11 +6,10 @@ from . import TestQueue
import json
import pathlib
from typing import Dict, List
class TestGraph:
def __init__(self, json_filepaths: List[pathlib.Path]):
def __init__(self, json_filepaths: list[pathlib.Path]):
# Initialize graph from JSON file. Note that this is implemented without
# accessing any benchmark environment or configuration. This ways benchmarks
# run on various machines can be aggregated and the graph generated on another
@@ -61,7 +60,7 @@ class TestGraph:
self.json = json.dumps(data, indent=2)
def chart(self, device_name: str, chart_name: str, entries: List, chart_type: str, output: str) -> Dict:
def chart(self, device_name: str, chart_name: str, entries: list, chart_type: str, output: str) -> dict:
# Gather used tests.
tests = {}
for entry in entries:

View File

@@ -4,7 +4,6 @@
import abc
import fnmatch
from typing import Dict, List
class Test:
@@ -33,14 +32,14 @@ class Test:
return True
@abc.abstractmethod
def run(self, env, device_id: str) -> Dict:
def run(self, env, device_id: str) -> dict:
"""
Execute the test and report results.
"""
class TestCollection:
def __init__(self, env, names_filter: List = ['*'], categories_filter: List = ['*'], background: bool = False):
def __init__(self, env, names_filter: list = ['*'], categories_filter: list = ['*'], background: bool = False):
import importlib
import pkgutil
import tests

View File

@@ -10,7 +10,6 @@ import glob
import pathlib
import shutil
import sys
from typing import List
def find_blender_git_dir() -> pathlib.Path:
@@ -49,7 +48,7 @@ def print_header(config: api.TestConfig) -> None:
print(header)
def print_row(config: api.TestConfig, entries: List, end='\n') -> None:
def print_row(config: api.TestConfig, entries: list, end='\n') -> None:
# Print one or more test entries on a row.
row = ""
@@ -99,7 +98,7 @@ def match_entry(entry: api.TestEntry, args: argparse.Namespace):
def run_entry(env: api.TestEnvironment,
config: api.TestConfig,
row: List,
row: list,
entry: api.TestEntry,
update_only: bool):
updated = False
@@ -185,7 +184,7 @@ def run_entry(env: api.TestEnvironment,
return updated, failed
def cmd_init(env: api.TestEnvironment, argv: List):
def cmd_init(env: api.TestEnvironment, argv: list):
# Initialize benchmarks folder.
parser = argparse.ArgumentParser()
parser.add_argument('--build', default=False, action='store_true')
@@ -195,7 +194,7 @@ def cmd_init(env: api.TestEnvironment, argv: List):
env.unset_log_file()
def cmd_list(env: api.TestEnvironment, argv: List) -> None:
def cmd_list(env: api.TestEnvironment, argv: list) -> None:
# List devices, tests and configurations.
print('DEVICES')
machine = env.get_machine()
@@ -216,7 +215,7 @@ def cmd_list(env: api.TestEnvironment, argv: List) -> None:
print(config_name)
def cmd_status(env: api.TestEnvironment, argv: List):
def cmd_status(env: api.TestEnvironment, argv: list):
# Print status of tests in configurations.
parser = argparse.ArgumentParser()
parser.add_argument('config', nargs='?', default=None)
@@ -239,7 +238,7 @@ def cmd_status(env: api.TestEnvironment, argv: List):
print_row(config, row)
def cmd_reset(env: api.TestEnvironment, argv: List):
def cmd_reset(env: api.TestEnvironment, argv: list):
# Reset tests to re-run them.
parser = argparse.ArgumentParser()
parser.add_argument('config', nargs='?', default=None)
@@ -262,7 +261,7 @@ def cmd_reset(env: api.TestEnvironment, argv: List):
shutil.rmtree(config.logs_dir)
def cmd_run(env: api.TestEnvironment, argv: List, update_only: bool):
def cmd_run(env: api.TestEnvironment, argv: list, update_only: bool):
# Run tests.
parser = argparse.ArgumentParser()
parser.add_argument('config', nargs='?', default=None)
@@ -309,7 +308,7 @@ def cmd_run(env: api.TestEnvironment, argv: List, update_only: bool):
sys.exit(exit_code)
def cmd_graph(argv: List):
def cmd_graph(argv: list):
# Create graph from a given JSON results file.
parser = argparse.ArgumentParser()
parser.add_argument('json_file', nargs='+')

View File

@@ -16,7 +16,6 @@ import pathlib
import subprocess
import sys
import unittest
from typing import Tuple
from modules.test_utils import (
with_tempdir,
@@ -44,7 +43,7 @@ class AbstractAlembicTest(AbstractBlenderRunnerTest):
# 'abcls' array notation, like "name[16]"
cls.abcls_array = re.compile(r'^(?P<name>[^\[]+)(\[(?P<arraysize>\d+)\])?$')
def abcls(self, *arguments) -> Tuple[int, str]:
def abcls(self, *arguments) -> tuple[int, str]:
"""Uses abcls and return its output.
:return: tuple (process exit status code, stdout)
@@ -403,7 +402,7 @@ class UVMapExportTest(AbstractAlembicTest):
basename = 'T77021-multiple-uvmaps-animated-mesh'
abc = tempdir / f'{basename}.abc'
script = f"import bpy; bpy.ops.wm.alembic_export(filepath='{abc.as_posix()}', start=1, end=1, " \
f"visible_objects_only=True, flatten=False)"
f"visible_objects_only=True, flatten=False)"
self.run_blender(f'{basename}.blend', script)
self.maxDiff = 1000

View File

@@ -10,7 +10,6 @@ import pathlib
import sys
import unittest
from math import degrees, radians
from typing import List
import bpy
@@ -148,7 +147,7 @@ class EulerFilterTest(AbstractAnimationTest, unittest.TestCase):
bpy.context.view_layer.objects.active = ob
@staticmethod
def active_object_rotation_channels() -> List[bpy.types.FCurve]:
def active_object_rotation_channels() -> list[bpy.types.FCurve]:
ob = bpy.context.view_layer.objects.active
action = ob.animation_data.action
return [action.fcurves.find('rotation_euler', index=idx) for idx in range(3)]

View File

@@ -30,15 +30,13 @@ NOTE:
import types
from typing import (
Any,
Dict,
Generator,
List,
Optional,
)
from collections.abc import (
Iterator,
Sequence,
Tuple,
)
KeyConfigData = List[Tuple[str, Tuple[Any], Dict[str, Any]]]
KeyConfigData = list[tuple[str, tuple[Any], dict[str, Any]]]
import contextlib
@@ -76,7 +74,7 @@ ALLOW_DUPLICATES = {
def temp_fn_argument_extractor(
mod: types.ModuleType,
mod_attr: str,
) -> Generator[List[Tuple[Tuple[Tuple[Any], ...], Dict[str, Dict[str, Any]]]], None, None]:
) -> Iterator[list[tuple[tuple[tuple[Any], ...], dict[str, dict[str, Any]]]]]:
"""
Temporarily intercept a function, so its arguments can be extracted.
The context manager gives us a list where each item is a tuple of
@@ -85,7 +83,7 @@ def temp_fn_argument_extractor(
args_collected = []
real_fn = getattr(mod, mod_attr)
def wrap_fn(*args: Tuple[Any], **kw: Dict[str, Any]) -> Any:
def wrap_fn(*args: tuple[Any], **kw: dict[str, Any]) -> Any:
args_collected.append((args, kw))
return real_fn(*args, **kw)
setattr(mod, mod_attr, wrap_fn)
@@ -100,7 +98,7 @@ def round_float_32(f: float) -> float:
return unpack("f", pack("f", f))[0] # type: ignore
def report_humanly_readable_difference(a: Any, b: Any) -> Optional[str]:
def report_humanly_readable_difference(a: Any, b: Any) -> str | None:
"""
Compare strings, return None when they match,
otherwise a humanly readable difference message.
@@ -117,7 +115,7 @@ def report_humanly_readable_difference(a: Any, b: Any) -> Optional[str]:
# -----------------------------------------------------------------------------
# Keymap Utilities.
def keyconfig_preset_scan() -> List[str]:
def keyconfig_preset_scan() -> list[str]:
"""
Return all bundled presets (keymaps), not user presets.
"""
@@ -185,7 +183,7 @@ def keymap_data_clean(keyconfig_data: KeyConfigData, *, relaxed: bool) -> None:
items[i] = item_op, item_event, None
def keyconfig_config_as_filename_component(values: Sequence[Tuple[str, Any]]) -> str:
def keyconfig_config_as_filename_component(values: Sequence[tuple[str, Any]]) -> str:
"""
Takes a configuration, eg:
@@ -211,7 +209,7 @@ def keyconfig_activate_and_extract_data(
filepath: str,
*,
relaxed: bool,
config: Sequence[Tuple[str, Any]],
config: Sequence[tuple[str, Any]],
) -> KeyConfigData:
"""
Activate the key-map by filepath,
@@ -247,7 +245,7 @@ def keyconfig_report_duplicates(keyconfig_data: KeyConfigData) -> str:
error_text = []
for km_idname, km_args, km_items_data in keyconfig_data:
items = tuple(km_items_data["items"])
unique: Dict[str, List[int]] = {}
unique: dict[str, list[int]] = {}
for i, (item_op, item_event, item_prop) in enumerate(items):
# Ensure stable order as `repr` will use order of definition.
item_event = {key: item_event[key] for key in sorted(item_event.keys())}

View File

@@ -15,11 +15,8 @@ import os
import sys
import argparse
from typing import (
Generator,
List,
Optional,
Tuple,
from collections.abc import (
Iterator,
)
SOURCE_DIR = os.path.abspath(os.path.normpath(os.path.join(os.path.dirname(__file__), "..", "..")))
@@ -31,7 +28,7 @@ SORT_BY_FN = {
}
def blend_list(path: str) -> Generator[str, None, None]:
def blend_list(path: str) -> Iterator[str]:
for dirpath, dirnames, filenames in os.walk(path):
# skip '.git'
dirnames[:] = [d for d in dirnames if not d.startswith(".")]
@@ -53,7 +50,7 @@ def load_blend_file(filepath: str) -> None:
bpy.ops.wm.open_mainfile(filepath=filepath)
def load_files_immediately(blend_files: List[str], blend_file_index_offset: int) -> None:
def load_files_immediately(blend_files: list[str], blend_file_index_offset: int) -> None:
index = blend_file_index_offset
for filepath in blend_files:
print_load_message(filepath, index)
@@ -61,10 +58,10 @@ def load_files_immediately(blend_files: List[str], blend_file_index_offset: int)
load_blend_file(filepath)
def load_files_with_wait(blend_files: List[str], blend_file_index_offset: int, wait: float) -> None:
def load_files_with_wait(blend_files: list[str], blend_file_index_offset: int, wait: float) -> None:
index = 0
def load_on_timer() -> Optional[float]:
def load_on_timer() -> float | None:
nonlocal index
if index >= len(blend_files):
sys.exit(0)
@@ -80,7 +77,7 @@ def load_files_with_wait(blend_files: List[str], blend_file_index_offset: int, w
bpy.app.timers.register(load_on_timer, persistent=True)
def argparse_handle_int_range(value: str) -> Tuple[int, int]:
def argparse_handle_int_range(value: str) -> tuple[int, int]:
range_beg, sep, range_end = value.partition(":")
if not sep:
raise argparse.ArgumentTypeError("Expected a \":\" separator!")
@@ -151,7 +148,7 @@ def argparse_create() -> argparse.ArgumentParser:
return parser
def main() -> Optional[int]:
def main() -> int | None:
try:
argv_sep = sys.argv.index("--")
except ValueError:

View File

@@ -47,11 +47,10 @@ import tempfile
from typing import (
Any,
Dict,
)
from collections.abc import (
Iterator,
Optional,
Sequence,
Tuple,
)
@@ -123,9 +122,9 @@ class backend_wayland(backend_base):
@staticmethod
def _weston_env_and_ini_from_portable(
*,
wayland_root_dir: Optional[str],
weston_root_dir: Optional[str],
) -> Tuple[Optional[Dict[str, str]], str]:
wayland_root_dir: str | None,
weston_root_dir: str | None,
) -> tuple[dict[str, str] | None, str]:
"""
Construct a portable environment to run WESTON in.
"""
@@ -212,7 +211,7 @@ class backend_wayland(backend_base):
)
@staticmethod
def _weston_env_and_ini_from_system() -> Tuple[Optional[Dict[str, str]], str]:
def _weston_env_and_ini_from_system() -> tuple[dict[str, str] | None, str]:
weston_env = None
weston_ini = [
"[shell]",
@@ -227,7 +226,7 @@ class backend_wayland(backend_base):
)
@staticmethod
def _weston_env_and_ini() -> Tuple[Optional[Dict[str, str]], str]:
def _weston_env_and_ini() -> tuple[dict[str, str] | None, str]:
wayland_root_dir = os.environ.get("WAYLAND_ROOT_DIR")
weston_root_dir = os.environ.get("WESTON_ROOT_DIR")
@@ -262,7 +261,7 @@ class backend_wayland(backend_base):
"--height=600",
# `--config={..}` is added to point to a temp file.
]
cmd_kw: Dict[str, Any] = {}
cmd_kw: dict[str, Any] = {}
if weston_env is not None:
cmd_kw["env"] = weston_env
if not VERBOSE:

View File

@@ -18,12 +18,6 @@ check_docs_code_layout.py --markdown=markdown.txt
import os
import argparse
from typing import (
List,
Optional,
)
# -----------------------------------------------------------------------------
# Constants
@@ -42,7 +36,7 @@ def text_with_title_underline(text: str, underline: str = "=") -> str:
return "\n{:s}\n{:s}\n".format(text, len(text) * underline)
def html_extract_markdown_from_url(url: str) -> Optional[str]:
def html_extract_markdown_from_url(url: str) -> str | None:
"""
Download
"""
@@ -60,7 +54,7 @@ def html_extract_markdown_from_url(url: str) -> Optional[str]:
# -----------------------------------------------------------------------------
# markdown Text Parsing
def markdown_to_paths(markdown: str) -> List[str]:
def markdown_to_paths(markdown: str) -> list[str]:
file_paths = []
markdown = markdown.replace("<p>", "")
markdown = markdown.replace("</p>", "")
@@ -83,14 +77,14 @@ def markdown_to_paths(markdown: str) -> List[str]:
# -----------------------------------------------------------------------------
# Reporting
def report_known_markdown_paths(file_paths: List[str]) -> None:
def report_known_markdown_paths(file_paths: list[str]) -> None:
heading = "Paths Found in markdown Table"
print(text_with_title_underline(heading))
for p in file_paths:
print("-", p)
def report_missing_source(file_paths: List[str]) -> int:
def report_missing_source(file_paths: list[str]) -> int:
heading = "Missing in Source Dir"
test = [p for p in file_paths if not os.path.exists(os.path.join(SOURCE_DIR, p))]
@@ -108,7 +102,7 @@ def report_missing_source(file_paths: List[str]) -> int:
return len(test)
def report_incomplete(file_paths: List[str]) -> int:
def report_incomplete(file_paths: list[str]) -> int:
heading = "Missing Documentation"
test = []
@@ -137,7 +131,7 @@ def report_incomplete(file_paths: List[str]) -> int:
return len(test)
def report_alphabetical_order(file_paths: List[str]) -> int:
def report_alphabetical_order(file_paths: list[str]) -> int:
heading = "Non-Alphabetically Ordered"
test = []

View File

@@ -28,20 +28,15 @@ from check_cmake_consistency_config import (
BUILD_DIR,
)
from typing import (
from collections.abc import (
Callable,
Dict,
Generator,
Iterator,
List,
Optional,
Tuple,
)
global_h = set()
global_c = set()
global_refs: Dict[str, List[Tuple[str, int]]] = {}
global_refs: dict[str, list[tuple[str, int]]] = {}
# Flatten `IGNORE_SOURCE_MISSING` to avoid nested looping.
IGNORE_SOURCE_MISSING_FLAT = [
@@ -50,7 +45,7 @@ IGNORE_SOURCE_MISSING_FLAT = [
]
# Ignore cmake file, path pairs.
global_ignore_source_missing: Dict[str, List[str]] = {}
global_ignore_source_missing: dict[str, list[str]] = {}
for k, v in IGNORE_SOURCE_MISSING_FLAT:
global_ignore_source_missing.setdefault(k, []).append(v)
del IGNORE_SOURCE_MISSING_FLAT
@@ -73,8 +68,8 @@ def replace_line(f: str, i: int, text: str, keep_indent: bool = True) -> None:
def source_list(
path: str,
filename_check: Optional[Callable[[str], bool]] = None,
) -> Generator[str, None, None]:
filename_check: Callable[[str], bool] | None = None,
) -> Iterator[str]:
for dirpath, dirnames, filenames in os.walk(path):
# skip '.git'
dirnames[:] = [d for d in dirnames if not d.startswith(".")]
@@ -110,12 +105,12 @@ def cmake_get_src(f: str) -> None:
sources_c = []
filen = open(f, "r", encoding="utf8")
it: Optional[Iterator[str]] = iter(filen)
it: Iterator[str] | None = iter(filen)
found = False
i = 0
# print(f)
def is_definition(l: str, f: str, i: int, name: str) -> Tuple[bool, int]:
def is_definition(l: str, f: str, i: int, name: str) -> tuple[bool, int]:
"""
Return (is_definition, single_line_offset).
"""
@@ -308,7 +303,7 @@ def cmake_get_src(f: str) -> None:
filen.close()
def is_ignore_source(f: str, ignore_used: List[bool]) -> bool:
def is_ignore_source(f: str, ignore_used: list[bool]) -> bool:
for index, ignore_path in enumerate(IGNORE_SOURCE):
if ignore_path in f:
ignore_used[index] = True
@@ -316,7 +311,7 @@ def is_ignore_source(f: str, ignore_used: List[bool]) -> bool:
return False
def is_ignore_cmake(f: str, ignore_used: List[bool]) -> bool:
def is_ignore_cmake(f: str, ignore_used: list[bool]) -> bool:
for index, ignore_path in enumerate(IGNORE_CMAKE):
if ignore_path in f:
ignore_used[index] = True

View File

@@ -3,9 +3,6 @@
# SPDX-License-Identifier: GPL-2.0-or-later
import os
from typing import (
Tuple,
)
IGNORE_SOURCE = (
"/lib/",
@@ -100,7 +97,7 @@ IGNORE_SOURCE = (
# Ignore cmake file, path pairs,
# NOTE: keep commented block to show the intended format (even when unused).
IGNORE_SOURCE_MISSING: Tuple[Tuple[str, Tuple[str, ...]], ...] = (
IGNORE_SOURCE_MISSING: tuple[tuple[str, tuple[str, ...]], ...] = (
( # Use for `WITH_NANOVDB`.
"intern/cycles/kernel/CMakeLists.txt", (
"hiprt/impl/Aabb.h",

View File

@@ -9,19 +9,15 @@ noted by the date which must be included with the *DEPRECATED* comment.
Once this date is past, the code should be removed.
"""
from typing import (
Callable,
Generator,
List,
Tuple,
Optional,
)
import os
import datetime
from os.path import splitext
from collections.abc import (
Callable,
Iterator,
)
SKIP_DIRS = (
"extern",
"lib",
@@ -64,7 +60,7 @@ def is_source_any(filename: str) -> bool:
return is_c_any(filename) or is_py(filename)
def source_list(path: str, filename_check: Optional[Callable[[str], bool]] = None) -> Generator[str, None, None]:
def source_list(path: str, filename_check: Callable[[str], bool] | None = None) -> Iterator[str]:
for dirpath, dirnames, filenames in os.walk(path):
# skip '.git'
dirnames[:] = [d for d in dirnames if not d.startswith(".")]
@@ -74,7 +70,7 @@ def source_list(path: str, filename_check: Optional[Callable[[str], bool]] = Non
yield os.path.join(dirpath, filename)
def deprecations() -> List[Tuple[datetime.datetime, Tuple[str, int], str]]:
def deprecations() -> list[tuple[datetime.datetime, tuple[str, int], str]]:
"""
Searches out source code for lines like

View File

@@ -19,10 +19,9 @@ import os
import sys
import argparse
from typing import (
from collections.abc import (
Callable,
Generator,
Optional,
Iterator,
)
# Use GCC's `__INCLUDE_LEVEL__` to find direct duplicate includes.
@@ -90,8 +89,8 @@ def scan_source_recursive(dirpath: str, is_restore: bool) -> None:
def source_list(
path: str,
filename_check: Optional[Callable[[str], bool]] = None,
) -> Generator[str, None, None]:
filename_check: Callable[[str], bool] | None = None,
) -> Iterator[str]:
for dirpath, dirnames, filenames in os.walk(path):
# skip '.git'
dirnames[:] = [d for d in dirnames if not d.startswith(".")]

View File

@@ -17,14 +17,12 @@ import re
from dataclasses import dataclass
from typing import (
from collections.abc import (
Callable,
Dict,
Generator,
List,
Tuple,
Iterator,
)
# -----------------------------------------------------------------------------
# Constants
@@ -42,7 +40,7 @@ EXPECT_SPDX_IN_FIRST_CHARS = 1024
# Show unique headers after modifying them.
# Useful when reviewing changes as there may be many duplicates.
REPORT_UNIQUE_HEADER_MAPPING = False
mapping: Dict[str, List[str]] = {}
mapping: dict[str, list[str]] = {}
SOURCE_DIR = os.path.normpath(
os.path.abspath(
@@ -64,7 +62,7 @@ del fh
# Global Variables
# Count how many licenses are used.
SPDX_IDENTIFIER_STATS: Dict[str, int] = {SPDX_IDENTIFIER_UNKNOWN: 0}
SPDX_IDENTIFIER_STATS: dict[str, int] = {SPDX_IDENTIFIER_UNKNOWN: 0}
# -----------------------------------------------------------------------------
# File Type Checks
@@ -192,7 +190,7 @@ def txt_anonymous_years(text: str) -> str:
return text
def txt_find_next_indented_block(text: str, find: str, pos: int, limit: int) -> Tuple[int, int]:
def txt_find_next_indented_block(text: str, find: str, pos: int, limit: int) -> tuple[int, int]:
"""
Support for finding an indented block of text.
Return the identifier index and the end of the block.
@@ -405,9 +403,9 @@ operation = check_contents
def source_files(
path: str,
paths_exclude: Tuple[str, ...],
paths_exclude: tuple[str, ...],
filename_test: Callable[[str], bool],
) -> Generator[str, None, None]:
) -> Iterator[str]:
# Split paths into directories & files.
dirs_exclude_list = []
files_exclude_list = []
@@ -487,8 +485,8 @@ def main() -> None:
@dataclass
class Pass:
filename_test: Callable[[str], bool]
source_paths_include: Tuple[str, ...]
source_paths_exclude: Tuple[str, ...]
source_paths_include: tuple[str, ...]
source_paths_exclude: tuple[str, ...]
passes = (
Pass(

View File

@@ -10,14 +10,13 @@ from check_mypy_config import PATHS, PATHS_EXCLUDE
from typing import (
Any,
)
from collections.abc import (
Callable,
Generator,
Optional,
Tuple,
Dict,
Iterator,
)
FileAndArgs = Tuple[str, Tuple[Any, ...], Dict[str, str]]
FileAndArgs = tuple[str, tuple[Any, ...], dict[str, str]]
# print(PATHS)
SOURCE_EXT = (
@@ -32,8 +31,8 @@ def is_source(filename: str) -> bool:
def path_iter(
path: str,
filename_check: Optional[Callable[[str], bool]] = None,
) -> Generator[str, None, None]:
filename_check: Callable[[str], bool] | None = None,
) -> Iterator[str]:
for dirpath, dirnames, filenames in os.walk(path):
# skip ".git"
dirnames[:] = [d for d in dirnames if not d.startswith(".")]
@@ -47,9 +46,9 @@ def path_iter(
def path_expand_with_args(
paths_and_args: Tuple[FileAndArgs, ...],
filename_check: Optional[Callable[[str], bool]] = None,
) -> Generator[FileAndArgs, None, None]:
paths_and_args: tuple[FileAndArgs, ...],
filename_check: Callable[[str], bool] | None = None,
) -> Iterator[FileAndArgs]:
for f_and_args in paths_and_args:
f, f_args = f_and_args[0], f_and_args[1:]
if not os.path.exists(f):

View File

@@ -5,11 +5,9 @@
import os
from typing import (
Any,
Tuple,
Dict,
)
PATHS: Tuple[Tuple[str, Tuple[Any, ...], Dict[str, str]], ...] = (
PATHS: tuple[tuple[str, tuple[Any, ...], dict[str, str]], ...] = (
("build_files/cmake/", (), {'MYPYPATH': "modules"}),
("build_files/utils/", (), {'MYPYPATH': "modules"}),
("doc/manpage/blender.1.py", (), {}),

View File

@@ -18,23 +18,18 @@ import os
import argparse
import sys
from typing import (
from collections.abc import (
Callable,
Dict,
Generator,
List,
Optional,
Set,
Tuple,
Iterator,
)
# Report: word, line, column.
Report = Tuple[str, int, int]
Report = tuple[str, int, int]
# Cache: {filepath: length, hash, reports}.
CacheData = Dict[str, Tuple[int, bytes, List[Report]]]
CacheData = dict[str, tuple[int, bytes, list[Report]]]
# Map word to suggestions.
SuggestMap = Dict[str, str]
SuggestMap = dict[str, str]
ONLY_ONCE = True
USE_COLOR = True
@@ -118,7 +113,7 @@ def dictionary_create(): # type: ignore
return dict_spelling
def dictionary_check(w: str, code_words: Set[str]) -> bool:
def dictionary_check(w: str, code_words: set[str]) -> bool:
w_lower = w.lower()
if w_lower in dict_ignore:
return True
@@ -152,7 +147,7 @@ def dictionary_check(w: str, code_words: Set[str]) -> bool:
return is_correct
def dictionary_suggest(w: str) -> List[str]:
def dictionary_suggest(w: str) -> list[str]:
return _dict.suggest(w) # type: ignore
@@ -162,7 +157,7 @@ _dict = dictionary_create() # type: ignore
# -----------------------------------------------------------------------------
# General Utilities
def hash_of_file_and_len(fp: str) -> Tuple[bytes, int]:
def hash_of_file_and_len(fp: str) -> tuple[bytes, int]:
import hashlib
with open(fp, 'rb') as fh:
data = fh.read()
@@ -232,7 +227,7 @@ if USE_SKIP_SINGLE_IDENTIFIER_COMMENTS:
re_single_word_c_comments = re.compile(r"\/\*[\s]*[a-zA-Z_]+[a-zA-Z0-9_]*[\s]*\*\/")
def words_from_text(text: str, check_type: str) -> List[Tuple[str, int]]:
def words_from_text(text: str, check_type: str) -> list[tuple[str, int]]:
""" Extract words to treat as English for spell checking.
"""
# Replace non-newlines with white-space, so all alignment is kept.
@@ -293,10 +288,10 @@ class Comment:
self.line = line
self.type = type
def parse(self, check_type: str) -> List[Tuple[str, int]]:
def parse(self, check_type: str) -> list[tuple[str, int]]:
return words_from_text(self.text, check_type=check_type)
def line_and_column_from_comment_offset(self, pos: int) -> Tuple[int, int]:
def line_and_column_from_comment_offset(self, pos: int) -> tuple[int, int]:
text = self.text
slineno = self.line + text.count("\n", 0, pos)
# Allow for -1 to be not found.
@@ -309,7 +304,7 @@ class Comment:
return slineno, scol
def extract_code_strings(filepath: str) -> Tuple[List[Comment], Set[str]]:
def extract_code_strings(filepath: str) -> tuple[list[Comment], set[str]]:
from pygments import lexers
from pygments.token import Token
@@ -342,7 +337,7 @@ def extract_code_strings(filepath: str) -> Tuple[List[Comment], Set[str]]:
return comments, code_words
def extract_py_comments(filepath: str) -> Tuple[List[Comment], Set[str]]:
def extract_py_comments(filepath: str) -> tuple[list[Comment], set[str]]:
import token
import tokenize
@@ -371,7 +366,7 @@ def extract_py_comments(filepath: str) -> Tuple[List[Comment], Set[str]]:
return comments, code_words
def extract_cmake_comments(filepath: str) -> Tuple[List[Comment], Set[str]]:
def extract_cmake_comments(filepath: str) -> tuple[list[Comment], set[str]]:
from pygments import lexers
from pygments.token import Token
@@ -400,7 +395,7 @@ def extract_cmake_comments(filepath: str) -> Tuple[List[Comment], Set[str]]:
return comments, code_words
def extract_c_comments(filepath: str) -> Tuple[List[Comment], Set[str]]:
def extract_c_comments(filepath: str) -> tuple[list[Comment], set[str]]:
"""
Extracts comments like this:
@@ -549,7 +544,7 @@ def spell_check_file(
filepath: str,
check_type: str,
extract_type: str = 'COMMENTS',
) -> Generator[Report, None, None]:
) -> Iterator[Report]:
if extract_type == 'COMMENTS':
if filepath.endswith(".py"):
comment_list, code_words = extract_py_comments(filepath)
@@ -591,17 +586,17 @@ def spell_check_file(
def spell_check_file_recursive(
dirpath: str,
check_type: str,
regex_list: List[re.Pattern[str]],
regex_list: list[re.Pattern[str]],
extract_type: str = 'COMMENTS',
cache_data: Optional[CacheData] = None,
cache_data: CacheData | None = None,
) -> None:
import os
from os.path import join
def source_list(
path: str,
filename_check: Optional[Callable[[str], bool]] = None,
) -> Generator[str, None, None]:
filename_check: Callable[[str], bool] | None = None,
) -> Iterator[str]:
for dirpath, dirnames, filenames in os.walk(path):
# Only needed so this can be matches with ignore paths.
dirpath = os.path.abspath(dirpath)
@@ -652,16 +647,16 @@ def spell_check_file_recursive(
# )
#
def spell_cache_read(cache_filepath: str) -> Tuple[CacheData, SuggestMap]:
def spell_cache_read(cache_filepath: str) -> tuple[CacheData, SuggestMap]:
import pickle
cache_store: Tuple[CacheData, SuggestMap] = {}, {}
cache_store: tuple[CacheData, SuggestMap] = {}, {}
if os.path.exists(cache_filepath):
with open(cache_filepath, 'rb') as fh:
cache_store = pickle.load(fh)
return cache_store
def spell_cache_write(cache_filepath: str, cache_store: Tuple[CacheData, SuggestMap]) -> None:
def spell_cache_write(cache_filepath: str, cache_store: tuple[CacheData, SuggestMap]) -> None:
import pickle
with open(cache_filepath, 'wb') as fh:
pickle.dump(cache_store, fh)
@@ -672,8 +667,8 @@ def spell_check_file_with_cache_support(
check_type: str,
*,
extract_type: str = 'COMMENTS',
cache_data: Optional[CacheData] = None,
) -> Generator[Report, None, None]:
cache_data: CacheData | None = None,
) -> Iterator[Report]:
"""
Iterator each item is a report: (word, line_number, column_number)
"""
@@ -798,7 +793,7 @@ def main() -> int:
cache_filepath = args.cache_file
check_type = args.check_type
cache_data: Optional[CacheData] = None
cache_data: CacheData | None = None
if cache_filepath:
cache_data, _suggest_map = spell_cache_read(cache_filepath)
clear_stale_cache = True

View File

@@ -4,7 +4,10 @@
from __future__ import annotations
import enum
import typing as t
from collections.abc import (
Sequence,
)
# Does not actually exist.
@@ -50,11 +53,11 @@ class Type:
sizeof: int
code: TypeCode
dynamic: bool
name: t.Optional[str]
tag: t.Optional[str]
objfile: t.Optional[Objfile]
name: str | None
tag: str | None
objfile: Objfile | None
def fields(self) -> t.List[Field]:
def fields(self) -> list[Field]:
pass
def array(self, n1, n2=None) -> Type:
@@ -87,7 +90,7 @@ class Type:
def target(self) -> Type:
pass
def template_argument(self, n, block=None) -> t.Union[Type, Value]:
def template_argument(self, n, block=None) -> type | Value:
pass
def optimized_out(self) -> Value:
@@ -97,7 +100,7 @@ class Type:
class Field:
bitpos: int
enumval: int
name: t.Optional[str]
name: str | None
artificial: bool
is_base_class: bool
bitsize: int
@@ -107,7 +110,7 @@ class Field:
class Value:
type: Type
address: t.Optional[Value]
address: Value | None
is_optimized_out: bool
dynamic_type: Type
is_lazy: bool
@@ -142,7 +145,7 @@ class Value:
def reinterpret_cast(self, type: Type) -> Value:
pass
def __getitem__(self, subscript: t.Union[int, str]) -> Value:
def __getitem__(self, subscript: int | str) -> Value:
pass
@@ -184,7 +187,7 @@ class Command:
def invoke(self, argument: str, from_tty: bool):
pass
def complete(self, text: str, word: str) -> t.Union[t.Sequence[str], CompleteCode]:
def complete(self, text: str, word: str) -> Sequence[str] | CompleteCode:
pass

View File

@@ -3,7 +3,6 @@
# SPDX-License-Identifier: GPL-2.0-or-later
from .gdb import Value
import typing as t
class PrettyPrinter:

View File

@@ -3,7 +3,6 @@
# SPDX-License-Identifier: GPL-2.0-or-later
from gdb import Type, Field
import typing as t
def get_basic_type(type: Type) -> Type:
@@ -14,5 +13,5 @@ def has_field(type: Type) -> bool:
pass
def make_enum_dict(enum_type: Type) -> t.Dict[str, int]:
def make_enum_dict(enum_type: Type) -> dict[str, int]:
pass

View File

@@ -13,17 +13,12 @@ import urllib.request
from typing import (
Any,
Dict,
List,
Optional,
Set,
Union,
)
BASE_API_URL = "https://projects.blender.org/api/v1"
def url_json_get(url: str) -> Optional[Union[Dict[str, Any], List[Dict[str, Any]]]]:
def url_json_get(url: str) -> dict[str, Any] | list[dict[str, Any]] | None:
try:
# Make the HTTP request and store the response in a 'response' object
response = urllib.request.urlopen(url)
@@ -41,8 +36,8 @@ def url_json_get(url: str) -> Optional[Union[Dict[str, Any], List[Dict[str, Any]
def url_json_get_all_pages(
url: str,
verbose: bool = False,
) -> List[Dict[str, Any]]:
result: List[Dict[str, Any]] = []
) -> list[dict[str, Any]]:
result: list[dict[str, Any]] = []
page = 1
while True:
if verbose:
@@ -67,7 +62,7 @@ def url_json_get_all_pages(
return result
def gitea_user_get(username: str) -> Dict[str, Any]:
def gitea_user_get(username: str) -> dict[str, Any]:
"""
Get the user data as JSON from the user name. https://docs.gitea.com/api/next/#tag/user/operation/userGet
"""
@@ -78,7 +73,7 @@ def gitea_user_get(username: str) -> Dict[str, Any]:
return result
def gitea_json_issue_get(issue_fullname: str) -> Dict[str, Any]:
def gitea_json_issue_get(issue_fullname: str) -> dict[str, Any]:
"""
Get issue/pull JSON data.
:param issue_fullname: string in the format "{owner}/{repo}/issues/{number}"
@@ -89,7 +84,7 @@ def gitea_json_issue_get(issue_fullname: str) -> Dict[str, Any]:
return result
def gitea_json_activities_get(username: str, date: str) -> List[Dict[str, Any]]:
def gitea_json_activities_get(username: str, date: str) -> list[dict[str, Any]]:
"""
List a user's activity feeds.
:param username: username of user.
@@ -102,16 +97,16 @@ def gitea_json_activities_get(username: str, date: str) -> List[Dict[str, Any]]:
def gitea_json_issues_search(
type: Optional[str] = None,
since: Optional[str] = None,
before: Optional[str] = None,
type: str | None = None,
since: str | None = None,
before: str | None = None,
state: str = 'all',
labels: Optional[str] = None,
labels: str | None = None,
created: bool = False,
reviewed: bool = False,
access_token: Optional[str] = None,
access_token: str | None = None,
verbose: bool = True,
) -> List[Dict[str, Any]]:
) -> list[dict[str, Any]]:
"""
Search for issues across the repositories that the user has access to.
:param type: filter by type (issues / pulls) if set.
@@ -154,12 +149,12 @@ def gitea_json_issues_search(
def gitea_json_issue_events_filter(
issue_fullname: str,
date_start: Optional[datetime.datetime] = None,
date_end: Optional[datetime.datetime] = None,
username: Optional[str] = None,
labels: Optional[Set[str]] = None,
event_type: Set[str] = set(),
) -> List[Dict[str, Any]]:
date_start: datetime.datetime | None = None,
date_end: datetime.datetime | None = None,
username: str | None = None,
labels: set[str] | None = None,
event_type: set[str] = set(),
) -> list[dict[str, Any]]:
"""
Filter all comments and events on the issue list.
:param issue_fullname: string in the format "{owner}/{repo}/issues/{number}"
@@ -203,7 +198,7 @@ def gitea_json_issue_events_filter(
# WORKAROUND: This function doesn't involve GITEA, and the obtained username may not match the username used in GITEA.
# However, it provides an option to fetch the configured username from the local Git,
# in case the user does not explicitly supply the username.
def git_username_detect() -> Optional[str]:
def git_username_detect() -> str | None:
import os
import subprocess

View File

@@ -32,9 +32,8 @@ from gitea_utils import (
from typing import (
Any,
Dict,
List,
Set,
)
from collections.abc import (
Iterable,
)
@@ -107,35 +106,35 @@ def report_personal_weekly_get(
verbose: bool = True,
) -> None:
data_cache: Dict[str, Dict[str, Any]] = {}
data_cache: dict[str, dict[str, Any]] = {}
def gitea_json_issue_get_cached(issue_fullname: str) -> Dict[str, Any]:
def gitea_json_issue_get_cached(issue_fullname: str) -> dict[str, Any]:
if issue_fullname not in data_cache:
issue = gitea_json_issue_get(issue_fullname)
data_cache[issue_fullname] = issue
return data_cache[issue_fullname]
pulls_closed: Set[str] = set()
pulls_commented: Set[str] = set()
pulls_created: Set[str] = set()
pulls_closed: set[str] = set()
pulls_commented: set[str] = set()
pulls_created: set[str] = set()
issues_closed: Set[str] = set()
issues_commented: Set[str] = set()
issues_created: Set[str] = set()
issues_closed: set[str] = set()
issues_commented: set[str] = set()
issues_created: set[str] = set()
pulls_reviewed: List[str] = []
pulls_reviewed: list[str] = []
issues_confirmed: List[str] = []
issues_needing_user_info: List[str] = []
issues_needing_developer_info: List[str] = []
issues_fixed: List[str] = []
issues_duplicated: List[str] = []
issues_archived: List[str] = []
issues_confirmed: list[str] = []
issues_needing_user_info: list[str] = []
issues_needing_developer_info: list[str] = []
issues_fixed: list[str] = []
issues_duplicated: list[str] = []
issues_archived: list[str] = []
commits_main: List[str] = []
commits_main: list[str] = []
user_data: Dict[str, Any] = gitea_user_get(username)
user_data: dict[str, Any] = gitea_user_get(username)
for i in range(7):
date_curr = start + datetime.timedelta(days=i)
@@ -173,7 +172,7 @@ def report_personal_weekly_get(
content_json = json.loads(activity["content"])
assert isinstance(content_json, dict)
repo_fullname = activity["repo"]["full_name"]
content_json_commits: List[Dict[str, Any]] = content_json["Commits"]
content_json_commits: list[dict[str, Any]] = content_json["Commits"]
for commits in content_json_commits:
# Skip commits that were not made by this user. Using email doesn't seem to
# be possible unfortunately.

View File

@@ -21,12 +21,6 @@ import sys
import subprocess
import argparse
from typing import (
List,
Tuple,
Optional,
)
# Temporary, until all platforms update to 2.3.1.
VERSION_MIN = (1, 6, 0)
VERSION_MAX_RECOMMENDED = (2, 3, 1)
@@ -55,7 +49,7 @@ ignore_files = {
}
def compute_paths(paths: List[str], use_default_paths: bool) -> List[str]:
def compute_paths(paths: list[str], use_default_paths: bool) -> list[str]:
# Optionally pass in files to operate on.
if use_default_paths:
paths = [
@@ -79,7 +73,7 @@ def compute_paths(paths: List[str], use_default_paths: bool) -> List[str]:
return paths
def source_files_from_git(paths: List[str], changed_only: bool) -> List[str]:
def source_files_from_git(paths: list[str], changed_only: bool) -> list[str]:
if changed_only:
cmd = ("git", "diff", "HEAD", "--name-only", "-z", "--", *paths)
else:
@@ -88,22 +82,22 @@ def source_files_from_git(paths: List[str], changed_only: bool) -> List[str]:
return [f.decode('ascii') for f in files]
def autopep8_parse_version(version: str) -> Tuple[int, int, int]:
def autopep8_parse_version(version: str) -> tuple[int, int, int]:
# Ensure exactly 3 numbers.
major, minor, patch = (tuple(int(n) for n in version.split("-")[0].split(".")) + (0, 0, 0))[0:3]
return major, minor, patch
def version_str_from_tuple(version: Tuple[int, ...]) -> str:
def version_str_from_tuple(version: tuple[int, ...]) -> str:
return ".".join(str(x) for x in version)
def autopep8_ensure_version_from_command(
autopep8_format_cmd_argument: str,
) -> Optional[Tuple[str, Tuple[int, int, int]]]:
) -> tuple[str, tuple[int, int, int]] | None:
# The version to parse.
version_str: Optional[str] = None
version_str: str | None = None
global AUTOPEP8_FORMAT_CMD
autopep8_format_cmd = None
@@ -142,10 +136,10 @@ def autopep8_ensure_version_from_command(
return None
def autopep8_ensure_version_from_module() -> Optional[Tuple[str, Tuple[int, int, int]]]:
def autopep8_ensure_version_from_module() -> tuple[str, tuple[int, int, int]] | None:
# The version to parse.
version_str: Optional[str] = None
version_str: str | None = None
# Extract the version from the module.
try:
@@ -164,7 +158,7 @@ def autopep8_ensure_version_from_module() -> Optional[Tuple[str, Tuple[int, int,
return None
def autopep8_format(files: List[str]) -> bytes:
def autopep8_format(files: list[str]) -> bytes:
cmd = [
AUTOPEP8_FORMAT_CMD,
*AUTOPEP8_FORMAT_DEFAULT_ARGS,
@@ -178,7 +172,7 @@ def autopep8_format(files: List[str]) -> bytes:
return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def autopep8_format_no_subprocess(files: List[str]) -> None:
def autopep8_format_no_subprocess(files: list[str]) -> None:
cmd = [
*AUTOPEP8_FORMAT_DEFAULT_ARGS,
*files

View File

@@ -11,10 +11,6 @@ sys.path.append(os.path.join(PWD, "modules"))
from batch_edit_text import run
from typing import (
Optional,
)
SOURCE_DIR = os.path.normpath(os.path.abspath(os.path.normpath(os.path.join(PWD, "..", ".."))))
# TODO, move to config file
@@ -31,7 +27,7 @@ SOURCE_EXT = (
)
def sort_struct_lists(fn: str, data_src: str) -> Optional[str]:
def sort_struct_lists(fn: str, data_src: str) -> str | None:
import re
# eg:
@@ -53,7 +49,7 @@ def sort_struct_lists(fn: str, data_src: str) -> Optional[str]:
lines = data_src.splitlines(keepends=True)
def can_sort(l: str) -> Optional[int]:
def can_sort(l: str) -> int | None:
if re_match_struct.match(l):
return 1
if re_match_struct_type.match(l):

View File

@@ -15,11 +15,6 @@ import os
import sys
import re
from typing import (
Dict,
Optional,
)
PWD = os.path.dirname(__file__)
sys.path.append(os.path.join(PWD, "modules"))
@@ -44,11 +39,11 @@ re_words = re.compile("[A-Za-z_][A-Za-z_0-9]*")
re_match_struct = re.compile(r"struct\s+([A-Za-z_][A-Za-z_0-9]*)\s*;")
def clean_structs(fn: str, data_src: str) -> Optional[str]:
def clean_structs(fn: str, data_src: str) -> str | None:
from pygments.token import Token
from pygments import lexers
word_occurance: Dict[str, int] = {}
word_occurance: dict[str, int] = {}
lex = lexers.get_lexer_by_name("c++")
lex.get_tokens(data_src)

View File

@@ -18,12 +18,8 @@ import os
import sys
import subprocess
from typing import (
List,
Optional,
from collections.abc import (
Sequence,
Set,
Tuple,
)
VERSION_MIN = (17, 0, 6)
@@ -49,7 +45,7 @@ extensions_only_retab = (
)
# Add files which are too large/heavy to format.
ignore_files: Set[str] = set([
ignore_files: set[str] = set([
# Currently empty, looks like.
# "intern/cycles/render/sobol.cpp",
])
@@ -66,7 +62,7 @@ ignore_directories = {
}
def compute_paths(paths: List[str], use_default_paths: bool) -> List[str]:
def compute_paths(paths: list[str], use_default_paths: bool) -> list[str]:
# The resulting paths:
# - Use forward slashes on all systems.
# - Are relative to the GIT repository without any `.` or `./` prefix.
@@ -91,7 +87,7 @@ def compute_paths(paths: List[str], use_default_paths: bool) -> List[str]:
return paths
def source_files_from_git(paths: Sequence[str], changed_only: bool) -> List[str]:
def source_files_from_git(paths: Sequence[str], changed_only: bool) -> list[str]:
if changed_only:
cmd = ("git", "diff", "HEAD", "--name-only", "-z", "--", *paths)
else:
@@ -126,7 +122,7 @@ def convert_tabs_to_spaces(files: Sequence[str]) -> None:
fh.write(data)
def clang_format_ensure_version() -> Optional[Tuple[int, int, int]]:
def clang_format_ensure_version() -> tuple[int, int, int] | None:
global CLANG_FORMAT_CMD
clang_format_cmd = None
version_output = ""
@@ -142,18 +138,18 @@ def clang_format_ensure_version() -> Optional[Tuple[int, int, int]]:
continue
CLANG_FORMAT_CMD = clang_format_cmd
break
version: Optional[str] = next(iter(v for v in version_output.split() if v[0].isdigit()), None)
version: str | None = next(iter(v for v in version_output.split() if v[0].isdigit()), None)
if version is None:
return None
version = version.split("-")[0]
# Ensure exactly 3 numbers.
version_num: Tuple[int, int, int] = (tuple(int(n) for n in version.split(".")) + (0, 0, 0))[:3] # type: ignore
version_num: tuple[int, int, int] = (tuple(int(n) for n in version.split(".")) + (0, 0, 0))[:3] # type: ignore
print("Using {:s} ({:d}.{:d}.{:d})...".format(CLANG_FORMAT_CMD, version_num[0], version_num[1], version_num[2]))
return version_num
def clang_format_file(files: List[str]) -> bytes:
def clang_format_file(files: list[str]) -> bytes:
cmd = [
CLANG_FORMAT_CMD,
# Update the files in-place.
@@ -168,7 +164,7 @@ def clang_print_output(output: bytes) -> None:
print(output.decode('utf8', errors='ignore').strip())
def clang_format(files: List[str]) -> None:
def clang_format(files: list[str]) -> None:
pool = multiprocessing.Pool()
# Process in chunks to reduce overhead of starting processes.

View File

@@ -12,10 +12,6 @@ Sorts CMake path lists
import os
import sys
from typing import (
Optional,
)
PWD = os.path.dirname(__file__)
sys.path.append(os.path.join(PWD, "modules"))
@@ -37,7 +33,7 @@ SOURCE_EXT = (
)
def sort_cmake_file_lists(fn: str, data_src: str) -> Optional[str]:
def sort_cmake_file_lists(fn: str, data_src: str) -> str | None:
fn_dir = os.path.dirname(fn)
lines = data_src.splitlines(keepends=True)

View File

@@ -20,18 +20,14 @@ import string
from typing import (
Any,
Dict,
Generator,
List,
Optional,
)
from collections.abc import (
Iterator,
Sequence,
Set,
Tuple,
Type,
)
# List of (source_file, all_arguments)
ProcessedCommands = List[Tuple[str, str]]
ProcessedCommands = list[tuple[str, str]]
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
@@ -110,7 +106,7 @@ def line_from_span(text: str, start: int, end: int) -> str:
return text[start:end]
def files_recursive_with_ext(path: str, ext: Tuple[str, ...]) -> Generator[str, None, None]:
def files_recursive_with_ext(path: str, ext: tuple[str, ...]) -> Iterator[str]:
for dirpath, dirnames, filenames in os.walk(path):
# skip '.git' and other dot-files.
dirnames[:] = [d for d in dirnames if not d.startswith(".")]
@@ -264,7 +260,7 @@ def text_cxx_in_macro_definition(data: str, pos: int) -> bool:
def run(
args: Sequence[str],
*,
cwd: Optional[str],
cwd: str | None,
quiet: bool,
verbose_compile: bool,
) -> int:
@@ -286,7 +282,7 @@ def run(
# -----------------------------------------------------------------------------
# Build System Access
def cmake_cache_var(cmake_dir: str, var: str) -> Optional[str]:
def cmake_cache_var(cmake_dir: str, var: str) -> str | None:
with open(os.path.join(cmake_dir, "CMakeCache.txt"), encoding='utf-8') as cache_file:
lines = [
l_strip for l in cache_file
@@ -300,7 +296,7 @@ def cmake_cache_var(cmake_dir: str, var: str) -> Optional[str]:
return None
def cmake_cache_var_is_true(cmake_var: Optional[str]) -> bool:
def cmake_cache_var_is_true(cmake_var: str | None) -> bool:
if cmake_var is None:
return False
@@ -316,7 +312,7 @@ def cmake_cache_var_is_true(cmake_var: Optional[str]) -> bool:
RE_CFILE_SEARCH = re.compile(r"\s\-c\s([\S]+)")
def process_commands(cmake_dir: str, data: Sequence[str]) -> Optional[ProcessedCommands]:
def process_commands(cmake_dir: str, data: Sequence[str]) -> ProcessedCommands | None:
compiler_c = cmake_cache_var(cmake_dir, "CMAKE_C_COMPILER")
compiler_cxx = cmake_cache_var(cmake_dir, "CMAKE_CXX_COMPILER")
if compiler_c is None:
@@ -354,7 +350,7 @@ def process_commands(cmake_dir: str, data: Sequence[str]) -> Optional[ProcessedC
return file_args
def find_build_args_ninja(build_dir: str) -> Optional[ProcessedCommands]:
def find_build_args_ninja(build_dir: str) -> ProcessedCommands | None:
import time
cmake_dir = build_dir
make_exe = "ninja"
@@ -374,7 +370,7 @@ def find_build_args_ninja(build_dir: str) -> Optional[ProcessedCommands]:
return process_commands(cmake_dir, data)
def find_build_args_make(build_dir: str) -> Optional[ProcessedCommands]:
def find_build_args_make(build_dir: str) -> ProcessedCommands | None:
import time
make_exe = "make"
with subprocess.Popen(
@@ -446,11 +442,11 @@ class EditGenerator:
if getattr(cls, "edit_list_from_file") is EditGenerator.edit_list_from_file:
raise Exception("Class {!r} missing \"edit_list_from_file\" callback!".format(cls))
def __new__(cls, *args: Tuple[Any], **kwargs: Dict[str, Any]) -> Any:
def __new__(cls, *args: tuple[Any], **kwargs: dict[str, Any]) -> Any:
raise RuntimeError("Class {!r} should not be instantiated".format(cls))
@staticmethod
def edit_list_from_file(_source: str, _data: str, _shared_edit_data: Any) -> List[Edit]:
def edit_list_from_file(_source: str, _data: str, _shared_edit_data: Any) -> list[Edit]:
# The `__init_subclass__` function ensures this is always overridden.
raise RuntimeError("This function must be overridden by it's subclass!")
return []
@@ -482,7 +478,7 @@ class edit_generators:
is_default = False
@staticmethod
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> List[Edit]:
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> list[Edit]:
edits = []
for match in re.finditer(r"sizeof\(([a-zA-Z_]+)\) \* (\d+) \* (\d+)", data):
@@ -539,7 +535,7 @@ class edit_generators:
is_default = False
@staticmethod
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> List[Edit]:
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> list[Edit]:
edits = []
# `float abc[3] = {0, 1, 2};` -> `const float abc[3] = {0, 1, 2};`
@@ -595,7 +591,7 @@ class edit_generators:
is_default = True
@staticmethod
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> List[Edit]:
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> list[Edit]:
edits = []
# `1.f` -> `1.0f`
@@ -628,7 +624,7 @@ class edit_generators:
is_default = True
@staticmethod
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> List[Edit]:
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> list[Edit]:
edits = []
# Keep `typedef` unsigned as some files have local types, e.g.
@@ -678,8 +674,8 @@ class edit_generators:
is_default = True
@staticmethod
def edit_list_from_file(source: str, data: str, _shared_edit_data: Any) -> List[Edit]:
edits: List[Edit] = []
def edit_list_from_file(source: str, data: str, _shared_edit_data: Any) -> list[Edit]:
edits: list[Edit] = []
# The user might include C & C++, if they forget, it is better not to operate on C.
if source.lower().endswith((".h", ".c")):
@@ -716,8 +712,8 @@ class edit_generators:
is_default = True
@staticmethod
def edit_list_from_file(source: str, data: str, _shared_edit_data: Any) -> List[Edit]:
edits: List[Edit] = []
def edit_list_from_file(source: str, data: str, _shared_edit_data: Any) -> list[Edit]:
edits: list[Edit] = []
# The user might include C & C++, if they forget, it is better not to operate on C.
if source.lower().endswith((".h", ".c")):
@@ -744,8 +740,8 @@ class edit_generators:
is_default = True
@staticmethod
def edit_list_from_file(source: str, data: str, _shared_edit_data: Any) -> List[Edit]:
edits: List[Edit] = []
def edit_list_from_file(source: str, data: str, _shared_edit_data: Any) -> list[Edit]:
edits: list[Edit] = []
# The user might include C & C++, if they forget, it is better not to operate on C.
if source.lower().endswith((".h", ".c")):
@@ -787,7 +783,7 @@ class edit_generators:
is_default = True
@staticmethod
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> List[Edit]:
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> list[Edit]:
edits = []
for use_brackets in (True, False):
@@ -862,7 +858,7 @@ class edit_generators:
is_default = True
@staticmethod
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> List[Edit]:
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> list[Edit]:
edits = []
for use_brackets in (True, False):
@@ -947,7 +943,7 @@ class edit_generators:
is_default = False
@staticmethod
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> List[Edit]:
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> list[Edit]:
edits = []
# for match in re.finditer(r"( [a-zA-Z0-9_]+ [a-zA-Z0-9_]+ = [A-Z][A-Z_0-9_]*;)", data):
@@ -978,7 +974,7 @@ class edit_generators:
is_default = True
@staticmethod
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> List[Edit]:
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> list[Edit]:
edits = []
# Keep:
@@ -1016,7 +1012,7 @@ class edit_generators:
is_default = True
@staticmethod
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> List[Edit]:
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> list[Edit]:
edits = []
# Remove `return (NULL);`
@@ -1045,7 +1041,7 @@ class edit_generators:
is_default = True
@staticmethod
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> List[Edit]:
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> list[Edit]:
edits = []
# `strcmp(a, b) == 0` -> `STREQ(a, b)`
@@ -1090,8 +1086,8 @@ class edit_generators:
is_default = True
@staticmethod
def edit_list_from_file(source: str, data: str, _shared_edit_data: Any) -> List[Edit]:
edits: List[Edit] = []
def edit_list_from_file(source: str, data: str, _shared_edit_data: Any) -> list[Edit]:
edits: list[Edit] = []
# The user might include C & C++, if they forget, it is better not to operate on C.
if source.lower().endswith((".h", ".c")):
@@ -1130,7 +1126,7 @@ class edit_generators:
is_default = True
@staticmethod
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> List[Edit]:
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> list[Edit]:
edits = []
# `BLI_strncpy(a, b, sizeof(a))` -> `STRNCPY(a, b)`
@@ -1190,7 +1186,7 @@ class edit_generators:
is_default = True
@staticmethod
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> List[Edit]:
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> list[Edit]:
edits = []
# Note that this replacement is only valid in some cases,
# so only apply with validation that binary output matches.
@@ -1217,7 +1213,7 @@ class edit_generators:
is_default = False
@staticmethod
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> List[Edit]:
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> list[Edit]:
edits = []
re_cxx_cast = re.compile(r"[a-z_]+<([^\>]+)>\((.*)\)")
@@ -1358,7 +1354,7 @@ class edit_generators:
is_default = False
@staticmethod
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> List[Edit]:
def edit_list_from_file(_source: str, data: str, _shared_edit_data: Any) -> list[Edit]:
edits = []
# Give up after searching for a bracket this many characters and finding none.
@@ -1483,7 +1479,7 @@ class edit_generators:
def setup(cls) -> Any:
# For each file replace `pragma once` with old-style header guard.
# This is needed so we can remove the header with the knowledge the source file didn't use it indirectly.
files: List[Tuple[str, str, str, str]] = []
files: list[tuple[str, str, str, str]] = []
shared_edit_data = {
'files': files,
}
@@ -1529,7 +1525,7 @@ class edit_generators:
fh.write(data)
@classmethod
def edit_list_from_file(cls, _source: str, data: str, _shared_edit_data: Any) -> List[Edit]:
def edit_list_from_file(cls, _source: str, data: str, _shared_edit_data: Any) -> list[Edit]:
edits = []
# Remove include.
@@ -1569,9 +1565,9 @@ class edit_generators:
is_default = True
@staticmethod
def edit_list_from_file(source: str, data: str, _shared_edit_data: Any) -> List[Edit]:
def edit_list_from_file(source: str, data: str, _shared_edit_data: Any) -> list[Edit]:
edits: List[Edit] = []
edits: list[Edit] = []
# The user might include C & C++, if they forget, it is better not to operate on C.
if source.lower().endswith((".h", ".c")):
@@ -1650,9 +1646,9 @@ class edit_generators:
def test_edit(
source: str,
output: str,
output_bytes: Optional[bytes],
output_bytes: bytes | None,
build_args: Sequence[str],
build_cwd: Optional[str],
build_cwd: str | None,
data: str,
data_test: str,
*,
@@ -1697,7 +1693,7 @@ def test_edit(
# -----------------------------------------------------------------------------
# List Fix Functions
def edit_function_get_all(*, is_default: Optional[bool] = None) -> List[str]:
def edit_function_get_all(*, is_default: bool | None = None) -> list[str]:
fixes = []
for name in dir(edit_generators):
value = getattr(edit_generators, name)
@@ -1710,7 +1706,7 @@ def edit_function_get_all(*, is_default: Optional[bool] = None) -> List[str]:
return fixes
def edit_class_from_id(name: str) -> Type[EditGenerator]:
def edit_class_from_id(name: str) -> type[EditGenerator]:
result = getattr(edit_generators, name)
assert issubclass(result, EditGenerator)
# MYPY 0.812 doesn't recognize the assert above.
@@ -1769,7 +1765,7 @@ def wash_source_with_edit(
source: str,
output: str,
build_args: Sequence[str],
build_cwd: Optional[str],
build_cwd: str | None,
skip_test: bool,
verbose_compile: bool,
verbose_edit_actions: bool,
@@ -1788,7 +1784,7 @@ def wash_source_with_edit(
#
# This is a heavy solution that guarantees edits never oscillate between
# multiple states, so re-visiting a previously visited state will always exit.
data_states: Set[str] = set()
data_states: set[str] = set()
# When overlapping edits are found, keep attempting edits.
edit_again = True
@@ -1888,7 +1884,7 @@ def wash_source_with_edit_list(
source: str,
output: str,
build_args: Sequence[str],
build_cwd: Optional[str],
build_cwd: str | None,
skip_test: bool,
verbose_compile: bool,
verbose_edit_actions: bool,
@@ -1915,7 +1911,7 @@ def wash_source_with_edit_list(
def run_edits_on_directory(
*,
build_dir: str,
regex_list: List[re.Pattern[str]],
regex_list: list[re.Pattern[str]],
edits_to_apply: Sequence[str],
skip_test: bool,
jobs: int,
@@ -1957,7 +1953,7 @@ def run_edits_on_directory(
os.path.join("source"),
)
def split_build_args_with_cwd(build_args_str: str) -> Tuple[Sequence[str], Optional[str]]:
def split_build_args_with_cwd(build_args_str: str) -> tuple[Sequence[str], str | None]:
import shlex
build_args = shlex.split(build_args_str)
@@ -1968,7 +1964,7 @@ def run_edits_on_directory(
del build_args[0:3]
return build_args, cwd
def output_from_build_args(build_args: Sequence[str], cwd: Optional[str]) -> str:
def output_from_build_args(build_args: Sequence[str], cwd: str | None) -> str:
i = build_args.index("-o")
# Assume the output is a relative path is a CWD was set.
if cwd:

View File

@@ -12,9 +12,11 @@ import sys
from pathlib import Path
from typing import (
Iterator,
NamedTuple,
)
from collections.abc import (
Iterator,
)
# -----------------------------------------------------------------------------
# Path Constants

View File

@@ -2,10 +2,9 @@
#
# SPDX-License-Identifier: GPL-2.0-or-later
from typing import (
from collections.abc import (
Callable,
Generator,
Optional,
Iterator,
Sequence,
)
@@ -13,7 +12,7 @@ TextOpFn = Callable[
# file_name, data_src
[str, str],
# data_dst or None when no change is made.
Optional[str]
str | None,
]
@@ -40,7 +39,7 @@ def run(
import os
def source_files(path: str) -> Generator[str, None, None]:
def source_files(path: str) -> Iterator[str]:
for dirpath, dirnames, filenames in os.walk(path):
dirnames[:] = [d for d in dirnames if not d.startswith(".")]
for filename in filenames: