Cleanup: reduce right-shift in Python scripts
This commit is contained in:
@@ -860,8 +860,11 @@ def cached_commits_store(list_of_commits: list[CommitInfo], path_to_cached_commi
|
||||
# on commits that are already sorted (and they're not interested in).
|
||||
data_to_cache = {}
|
||||
for commit in list_of_commits:
|
||||
if (commit.classification not in (NEEDS_MANUAL_SORTING, IGNORED)) and not (
|
||||
commit.has_been_overwritten) and (commit.module != UNKNOWN):
|
||||
if (
|
||||
(commit.classification not in (NEEDS_MANUAL_SORTING, IGNORED)) and
|
||||
(commit.has_been_overwritten is False) and
|
||||
(commit.module != UNKNOWN)
|
||||
):
|
||||
commit_hash, data = commit.prepare_for_cache()
|
||||
data_to_cache[commit_hash] = data
|
||||
|
||||
@@ -1040,14 +1043,16 @@ def validate_arguments(args: argparse.Namespace) -> bool:
|
||||
# -----------------------------------------------------------------------------
|
||||
# Main Function
|
||||
|
||||
def gather_and_sort_commits(current_release_tag: str,
|
||||
current_version: str,
|
||||
previous_release_tag: str,
|
||||
previous_version: str,
|
||||
backport_tasks: list[str],
|
||||
cache: bool = False,
|
||||
silence: bool = False,
|
||||
single_thread: bool = False) -> list[CommitInfo]:
|
||||
def gather_and_sort_commits(
|
||||
current_release_tag: str,
|
||||
current_version: str,
|
||||
previous_release_tag: str,
|
||||
previous_version: str,
|
||||
backport_tasks: list[str],
|
||||
cache: bool = False,
|
||||
silence: bool = False,
|
||||
single_thread: bool = False,
|
||||
) -> list[CommitInfo]:
|
||||
set_crawl_delay()
|
||||
|
||||
dir_of_sciprt = Path(__file__).parent.resolve()
|
||||
@@ -1094,7 +1099,8 @@ def main() -> int:
|
||||
args.backport_tasks,
|
||||
args.cache,
|
||||
args.silence,
|
||||
args.single_thread)
|
||||
args.single_thread,
|
||||
)
|
||||
|
||||
print_release_notes(list_of_commits)
|
||||
return 0
|
||||
|
||||
@@ -1346,9 +1346,11 @@ def process(layer_name, lineset_name):
|
||||
if len(pattern) > 0:
|
||||
sampling = 1.0
|
||||
controller = SplitPatternController(pattern, sampling)
|
||||
Operators.sequential_split(SplitPatternStartingUP0D(controller),
|
||||
SplitPatternStoppingUP0D(controller),
|
||||
sampling)
|
||||
Operators.sequential_split(
|
||||
SplitPatternStartingUP0D(controller),
|
||||
SplitPatternStoppingUP0D(controller),
|
||||
sampling,
|
||||
)
|
||||
# sort selected chains
|
||||
if linestyle.use_sorting:
|
||||
integration = integration_types.get(linestyle.integration_type, IntegrationType.MEAN)
|
||||
|
||||
@@ -298,8 +298,10 @@ def dump_rna_messages(msgs, reports, settings, verbose=False):
|
||||
if item.name and prop_name_validate(cls, item.name, item.identifier):
|
||||
process_msg(msgs, msgctxt, item.name, msgsrc, reports, check_ctxt_rna, settings)
|
||||
if item.description:
|
||||
process_msg(msgs, default_context, item.description, msgsrc, reports, check_ctxt_rna_tip,
|
||||
settings)
|
||||
process_msg(
|
||||
msgs, default_context, item.description, msgsrc, reports, check_ctxt_rna_tip,
|
||||
settings,
|
||||
)
|
||||
for item in prop.enum_items_static:
|
||||
if item.identifier in done_items:
|
||||
continue
|
||||
@@ -308,8 +310,10 @@ def dump_rna_messages(msgs, reports, settings, verbose=False):
|
||||
if item.name and prop_name_validate(cls, item.name, item.identifier):
|
||||
process_msg(msgs, msgctxt, item.name, msgsrc, reports, check_ctxt_rna, settings)
|
||||
if item.description:
|
||||
process_msg(msgs, default_context, item.description, msgsrc, reports, check_ctxt_rna_tip,
|
||||
settings)
|
||||
process_msg(
|
||||
msgs, default_context, item.description, msgsrc, reports, check_ctxt_rna_tip,
|
||||
settings,
|
||||
)
|
||||
|
||||
def walk_tools_definitions(cls):
|
||||
from bl_ui.space_toolsystem_common import ToolDef
|
||||
@@ -954,8 +958,10 @@ def dump_template_messages(msgs, reports, settings):
|
||||
for workspace_name in sorted(workspace_names):
|
||||
for msgsrc in sorted(workspace_names[workspace_name]):
|
||||
msgsrc = "Workspace from template " + msgsrc
|
||||
process_msg(msgs, msgctxt, workspace_name, msgsrc,
|
||||
reports, None, settings)
|
||||
process_msg(
|
||||
msgs, msgctxt, workspace_name, msgsrc,
|
||||
reports, None, settings,
|
||||
)
|
||||
|
||||
|
||||
def dump_asset_messages(msgs, reports, settings):
|
||||
@@ -977,8 +983,10 @@ def dump_asset_messages(msgs, reports, settings):
|
||||
|
||||
msgsrc = "Asset catalog from " + settings.ASSET_CATALOG_FILE
|
||||
for catalog in sorted(catalogs):
|
||||
process_msg(msgs, settings.DEFAULT_CONTEXT, catalog, msgsrc,
|
||||
reports, None, settings)
|
||||
process_msg(
|
||||
msgs, settings.DEFAULT_CONTEXT, catalog, msgsrc,
|
||||
reports, None, settings,
|
||||
)
|
||||
|
||||
# Parse the asset blend files
|
||||
asset_files = {}
|
||||
@@ -1015,20 +1023,28 @@ def dump_asset_messages(msgs, reports, settings):
|
||||
for asset in sorted(asset_files[asset_file], key=lambda a: a["name"]):
|
||||
name, description = asset["name"], asset["description"]
|
||||
msgsrc = "Asset name from file " + asset_file
|
||||
process_msg(msgs, settings.DEFAULT_CONTEXT, name, msgsrc,
|
||||
reports, None, settings)
|
||||
process_msg(
|
||||
msgs, settings.DEFAULT_CONTEXT, name, msgsrc,
|
||||
reports, None, settings,
|
||||
)
|
||||
msgsrc = "Asset description from file " + asset_file
|
||||
process_msg(msgs, settings.DEFAULT_CONTEXT, description, msgsrc,
|
||||
reports, None, settings)
|
||||
process_msg(
|
||||
msgs, settings.DEFAULT_CONTEXT, description, msgsrc,
|
||||
reports, None, settings,
|
||||
)
|
||||
|
||||
if "sockets" in asset:
|
||||
for socket_name, socket_description in asset["sockets"]:
|
||||
msgsrc = f"Socket name from node group {name}, file {asset_file}"
|
||||
process_msg(msgs, settings.DEFAULT_CONTEXT, socket_name, msgsrc,
|
||||
reports, None, settings)
|
||||
process_msg(
|
||||
msgs, settings.DEFAULT_CONTEXT, socket_name, msgsrc,
|
||||
reports, None, settings,
|
||||
)
|
||||
msgsrc = f"Socket description from node group {name}, file {asset_file}"
|
||||
process_msg(msgs, settings.DEFAULT_CONTEXT, socket_description, msgsrc,
|
||||
reports, None, settings)
|
||||
process_msg(
|
||||
msgs, settings.DEFAULT_CONTEXT, socket_description, msgsrc,
|
||||
reports, None, settings,
|
||||
)
|
||||
|
||||
|
||||
def dump_addon_bl_info(msgs, reports, module, settings):
|
||||
@@ -1137,8 +1153,10 @@ def dump_messages(do_messages, do_checks, settings):
|
||||
|
||||
# Get strings specific to translations' menu.
|
||||
for lng in settings.LANGUAGES:
|
||||
process_msg(msgs, settings.DEFAULT_CONTEXT, lng[1], "Languages’ labels from bl_i18n_utils/settings.py",
|
||||
reports, None, settings)
|
||||
process_msg(
|
||||
msgs, settings.DEFAULT_CONTEXT, lng[1], "Languages’ labels from bl_i18n_utils/settings.py",
|
||||
reports, None, settings,
|
||||
)
|
||||
|
||||
# Get strings from asset catalogs and blend files.
|
||||
# This loads each asset blend file in turn.
|
||||
|
||||
@@ -506,8 +506,10 @@ class I18nMessages:
|
||||
|
||||
msgs = cls(uid=uid, settings=settings)
|
||||
key = settings.PO_HEADER_KEY
|
||||
msgs.msgs[key] = I18nMessage([key[0]], [key[1]], msgstr.split("\n"), comment.split("\n"),
|
||||
False, False, settings=settings)
|
||||
msgs.msgs[key] = I18nMessage(
|
||||
[key[0]], [key[1]], msgstr.split("\n"), comment.split("\n"),
|
||||
False, False, settings=settings,
|
||||
)
|
||||
msgs.update_info()
|
||||
|
||||
return msgs
|
||||
@@ -935,8 +937,10 @@ class I18nMessages:
|
||||
self.parsing_errors.append((line_nr, "{} context/msgid is already in current messages!".format(msgkey)))
|
||||
return
|
||||
|
||||
self.msgs[msgkey] = I18nMessage(msgctxt_lines, msgid_lines, msgstr_lines, comment_lines,
|
||||
is_commented, is_fuzzy, settings=self.settings)
|
||||
self.msgs[msgkey] = I18nMessage(
|
||||
msgctxt_lines, msgid_lines, msgstr_lines, comment_lines,
|
||||
is_commented, is_fuzzy, settings=self.settings,
|
||||
)
|
||||
|
||||
# Let's clean up and get ready for next message!
|
||||
reading_msgid = reading_msgstr = reading_msgctxt = reading_comment = False
|
||||
@@ -1475,8 +1479,10 @@ class I18n:
|
||||
"""
|
||||
root_dir, pot_file = src
|
||||
if pot_file and os.path.isfile(pot_file):
|
||||
self.trans[self.settings.PARSER_TEMPLATE_ID] = I18nMessages(self.settings.PARSER_TEMPLATE_ID, 'PO',
|
||||
pot_file, pot_file, settings=self.settings)
|
||||
self.trans[self.settings.PARSER_TEMPLATE_ID] = I18nMessages(
|
||||
self.settings.PARSER_TEMPLATE_ID, 'PO',
|
||||
pot_file, pot_file, settings=self.settings,
|
||||
)
|
||||
self.src_po[self.settings.PARSER_TEMPLATE_ID] = pot_file
|
||||
|
||||
for uid, po_file in get_po_files_from_dir(root_dir, langs):
|
||||
@@ -1496,28 +1502,36 @@ class I18n:
|
||||
msgs = ()
|
||||
for key, (sources, gen_comments), *translations in msgs:
|
||||
if self.settings.PARSER_TEMPLATE_ID not in self.trans:
|
||||
self.trans[self.settings.PARSER_TEMPLATE_ID] = I18nMessages(self.settings.PARSER_TEMPLATE_ID,
|
||||
settings=self.settings)
|
||||
self.trans[self.settings.PARSER_TEMPLATE_ID] = I18nMessages(
|
||||
self.settings.PARSER_TEMPLATE_ID,
|
||||
settings=self.settings,
|
||||
)
|
||||
self.src[self.settings.PARSER_TEMPLATE_ID] = self.py_file
|
||||
if key in self.trans[self.settings.PARSER_TEMPLATE_ID].msgs:
|
||||
print("ERROR! key {} is defined more than once! Skipping re-definitions!")
|
||||
continue
|
||||
custom_src = [c for c in sources if c.startswith("bpy.")]
|
||||
src = [c for c in sources if not c.startswith("bpy.")]
|
||||
common_comment_lines = [self.settings.PO_COMMENT_PREFIX_GENERATED + c for c in gen_comments] + \
|
||||
[self.settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM + c for c in custom_src] + \
|
||||
[self.settings.PO_COMMENT_PREFIX_SOURCE + c for c in src]
|
||||
common_comment_lines = (
|
||||
[self.settings.PO_COMMENT_PREFIX_GENERATED + c for c in gen_comments] +
|
||||
[self.settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM + c for c in custom_src] +
|
||||
[self.settings.PO_COMMENT_PREFIX_SOURCE + c for c in src]
|
||||
)
|
||||
ctxt = [key[0]] if key[0] else [default_context]
|
||||
self.trans[self.settings.PARSER_TEMPLATE_ID].msgs[key] = I18nMessage(ctxt, [key[1]], [""],
|
||||
common_comment_lines, False, False,
|
||||
settings=self.settings)
|
||||
self.trans[self.settings.PARSER_TEMPLATE_ID].msgs[key] = I18nMessage(
|
||||
ctxt, [key[1]], [""],
|
||||
common_comment_lines, False, False,
|
||||
settings=self.settings,
|
||||
)
|
||||
for uid, msgstr, (is_fuzzy, user_comments) in translations:
|
||||
if uid not in self.trans:
|
||||
self.trans[uid] = I18nMessages(uid, settings=self.settings)
|
||||
self.src[uid] = self.py_file
|
||||
comment_lines = [self.settings.PO_COMMENT_PREFIX + c for c in user_comments] + common_comment_lines
|
||||
self.trans[uid].msgs[key] = I18nMessage(ctxt, [key[1]], [msgstr], comment_lines, False, is_fuzzy,
|
||||
settings=self.settings)
|
||||
self.trans[uid].msgs[key] = I18nMessage(
|
||||
ctxt, [key[1]], [msgstr], comment_lines, False, is_fuzzy,
|
||||
settings=self.settings,
|
||||
)
|
||||
# key = self.settings.PO_HEADER_KEY
|
||||
# for uid, trans in self.trans.items():
|
||||
# if key not in trans.msgs:
|
||||
@@ -1659,9 +1673,11 @@ class I18n:
|
||||
if prev is None and nxt is None:
|
||||
print("WARNING: Looks like given python file {} has no auto-generated translations yet, will be added "
|
||||
"at the end of the file, you can move that section later if needed...".format(dst))
|
||||
txt = ([txt, "", self.settings.PARSER_PY_MARKER_BEGIN] +
|
||||
_gen_py(self, langs) +
|
||||
["", self.settings.PARSER_PY_MARKER_END])
|
||||
txt = (
|
||||
[txt, "", self.settings.PARSER_PY_MARKER_BEGIN] +
|
||||
_gen_py(self, langs) +
|
||||
["", self.settings.PARSER_PY_MARKER_END]
|
||||
)
|
||||
else:
|
||||
# We completely replace the text found between start and end markers...
|
||||
txt = _gen_py(self, langs)
|
||||
|
||||
@@ -149,9 +149,11 @@ def log2vis(msgs, settings):
|
||||
|
||||
# print(*btypes)
|
||||
|
||||
fbd.fribidi_get_par_embedding_levels(btypes, ln,
|
||||
ctypes.byref(pbase_dir),
|
||||
embed_lvl)
|
||||
fbd.fribidi_get_par_embedding_levels(
|
||||
btypes, ln,
|
||||
ctypes.byref(pbase_dir),
|
||||
embed_lvl,
|
||||
)
|
||||
|
||||
# print(*embed_lvl)
|
||||
|
||||
|
||||
@@ -311,12 +311,15 @@ def axis_conversion(from_forward='Y', from_up='Z', to_forward='Y', to_up='Z'):
|
||||
raise Exception("Invalid axis arguments passed, "
|
||||
"can't use up/forward on the same axis")
|
||||
|
||||
value = reduce(int.__or__, (_axis_convert_num[a] << (i * 3)
|
||||
for i, a in enumerate((from_forward,
|
||||
from_up,
|
||||
to_forward,
|
||||
to_up,
|
||||
))))
|
||||
value = reduce(
|
||||
int.__or__,
|
||||
(_axis_convert_num[a] << (i * 3) for i, a in enumerate((
|
||||
from_forward,
|
||||
from_up,
|
||||
to_forward,
|
||||
to_up,
|
||||
)))
|
||||
)
|
||||
|
||||
for i, axis_lut in enumerate(_axis_convert_lut):
|
||||
if value in axis_lut:
|
||||
|
||||
@@ -387,10 +387,12 @@ def generate_to_string(vk_xml, header):
|
||||
for struct_to_generate in structs_to_generate:
|
||||
struct = root.find(f"types/type[@category='struct'][@name='{struct_to_generate}']")
|
||||
assert (struct is not None)
|
||||
vk_to_string += generate_struct_to_string_cpp(struct,
|
||||
flags_to_generate,
|
||||
enums_to_generate,
|
||||
structs_to_generate)
|
||||
vk_to_string += generate_struct_to_string_cpp(
|
||||
struct,
|
||||
flags_to_generate,
|
||||
enums_to_generate,
|
||||
structs_to_generate,
|
||||
)
|
||||
vk_to_string += "\n"
|
||||
|
||||
print(vk_to_string)
|
||||
|
||||
@@ -103,8 +103,10 @@ class TestPropArray(unittest.TestCase):
|
||||
return (expected_dtype, wrong_kind_dtype, wrong_size_dtype, expected_length, too_short_length,
|
||||
get_flat_iterable_all_dimensions)
|
||||
|
||||
def do_test_foreach_getset_current_dimension(self, prop_array, expected_dtype, wrong_kind_dtype, wrong_size_dtype,
|
||||
expected_length, too_short_length, get_flat_iterable_all_dimensions):
|
||||
def do_test_foreach_getset_current_dimension(
|
||||
self, prop_array, expected_dtype, wrong_kind_dtype, wrong_size_dtype,
|
||||
expected_length, too_short_length, get_flat_iterable_all_dimensions,
|
||||
):
|
||||
with self.assertRaises(TypeError):
|
||||
prop_array.foreach_set(range(too_short_length))
|
||||
|
||||
|
||||
@@ -368,11 +368,13 @@ class BlendFileBlock(_blendfile_header.BlockHeader):
|
||||
self.file.handle.seek(ofs, os.SEEK_SET)
|
||||
|
||||
print(dna_type_id, array_size, dna_size)
|
||||
return DNA_IO.read_data(self.file.handle, self.file.header,
|
||||
is_pointer,
|
||||
dna_type_id,
|
||||
dna_size,
|
||||
array_size)
|
||||
return DNA_IO.read_data(
|
||||
self.file.handle, self.file.header,
|
||||
is_pointer,
|
||||
dna_type_id,
|
||||
dna_size,
|
||||
array_size,
|
||||
)
|
||||
|
||||
def get_recursive_iter(
|
||||
self, path, path_root=b"",
|
||||
|
||||
Reference in New Issue
Block a user