Big i18n tools update, I/II.

Notes:
* Everything is still a bit raw and sometimes hackish.
* Not every feature implemented yet.
* A bunch of cleanup is still needed.
* Doc needs to be updated too!
This commit is contained in:
Bastien Montagne
2013-02-24 08:50:55 +00:00
parent c9d1f6fc5b
commit 2c348d003e
9 changed files with 2578 additions and 1706 deletions

View File

@@ -0,0 +1,891 @@
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Populate a template file (POT format currently) from Blender RNA/py/C data.
# XXX: This script is meant to be used from inside Blender!
# You should not directly use this script, rather use update_msg.py!
import collections
import copy
import datetime
import os
import re
import sys
# XXX Relative import does not work here when used from Blender...
from bl_i18n_utils import settings as i18n_settings, utils
import bpy
##### Utils #####
# check for strings like "+%f°"
ignore_reg = re.compile(r"^(?:[-*.()/\\+%°0-9]|%d|%f|%s|%r|\s)*$")
filter_message = ignore_reg.match
def init_spell_check(settings, lang="en_US"):
try:
from bl_i18n_utils import spell_check_utils
return spell_check_utils.SpellChecker(settings, lang)
except Exception as e:
print("Failed to import spell_check_utils ({})".format(str(e)))
return None
def _gen_check_ctxt(settings):
return {
"multi_rnatip": set(),
"multi_lines": set(),
"py_in_rna": set(),
"not_capitalized": set(),
"end_point": set(),
"undoc_ops": set(),
"spell_checker": init_spell_check(settings),
"spell_errors": {},
}
def _gen_reports(check_ctxt):
return {
"check_ctxt": check_ctxt,
"rna_structs": [],
"rna_structs_skipped": [],
"rna_props": [],
"rna_props_skipped": [],
"py_messages": [],
"py_messages_skipped": [],
"src_messages": [],
"src_messages_skipped": [],
"messages_skipped": set(),
}
def check(check_ctxt, msgs, key, msgsrc, settings):
"""
Performs a set of checks over the given key (context, message)...
"""
if check_ctxt is None:
return
multi_rnatip = check_ctxt.get("multi_rnatip")
multi_lines = check_ctxt.get("multi_lines")
py_in_rna = check_ctxt.get("py_in_rna")
not_capitalized = check_ctxt.get("not_capitalized")
end_point = check_ctxt.get("end_point")
undoc_ops = check_ctxt.get("undoc_ops")
spell_checker = check_ctxt.get("spell_checker")
spell_errors = check_ctxt.get("spell_errors")
if multi_rnatip is not None:
if key in msgs and key not in multi_rnatip:
multi_rnatip.add(key)
if multi_lines is not None:
if '\n' in key[1]:
multi_lines.add(key)
if py_in_rna is not None:
if key in py_in_rna[1]:
py_in_rna[0].add(key)
if not_capitalized is not None:
if(key[1] not in settings.WARN_MSGID_NOT_CAPITALIZED_ALLOWED and
key[1][0].isalpha() and not key[1][0].isupper()):
not_capitalized.add(key)
if end_point is not None:
if (key[1].strip().endswith('.') and not key[1].strip().endswith('...') and
key[1] not in settings.WARN_MSGID_END_POINT_ALLOWED):
end_point.add(key)
if undoc_ops is not None:
if key[1] == settings.UNDOC_OPS_STR:
undoc_ops.add(key)
if spell_checker is not None and spell_errors is not None:
err = spell_checker.check(key[1])
if err:
spell_errors[key] = err
def print_info(reports, pot):
def _print(*args, **kwargs):
kwargs["file"] = sys.stderr
print(*args, **kwargs)
pot.update_info()
_print("{} RNA structs were processed (among which {} were skipped), containing {} RNA properties "
"(among which {} were skipped).".format(len(reports["rna_structs"]), len(reports["rna_structs_skipped"]),
len(reports["rna_props"]), len(reports["rna_props_skipped"])))
_print("{} messages were extracted from Python UI code (among which {} were skipped), and {} from C source code "
"(among which {} were skipped).".format(len(reports["py_messages"]), len(reports["py_messages_skipped"]),
len(reports["src_messages"]), len(reports["src_messages_skipped"])))
_print("{} messages were rejected.".format(len(reports["messages_skipped"])))
_print("\n")
_print("Current POT stats:")
pot.print_stats(prefix="\t", output=_print)
_print("\n")
check_ctxt = reports["check_ctxt"]
if check_ctxt is None:
return
multi_rnatip = check_ctxt.get("multi_rnatip")
multi_lines = check_ctxt.get("multi_lines")
py_in_rna = check_ctxt.get("py_in_rna")
not_capitalized = check_ctxt.get("not_capitalized")
end_point = check_ctxt.get("end_point")
undoc_ops = check_ctxt.get("undoc_ops")
spell_errors = check_ctxt.get("spell_errors")
# XXX Temp, no multi_rnatip nor py_in_rna, see below.
keys = multi_lines | not_capitalized | end_point | undoc_ops | spell_errors.keys()
if keys:
_print("WARNINGS:")
for key in keys:
if undoc_ops and key in undoc_ops:
_print("\tThe following operators are undocumented!")
else:
_print("\t{}”|“{}”:".format(*key))
if multi_lines and key in multi_lines:
_print("\t\t-> newline in this message!")
if not_capitalized and key in not_capitalized:
_print("\t\t-> message not capitalized!")
if end_point and key in end_point:
_print("\t\t-> message with endpoint!")
# XXX Hide this one for now, too much false positives.
# if multi_rnatip and key in multi_rnatip:
# _print("\t\t-> tip used in several RNA items")
# if py_in_rna and key in py_in_rna:
# _print("\t\t-> RNA message also used in py UI code!")
if spell_errors and spell_errors.get(key):
lines = ["\t\t-> {}: misspelled, suggestions are ({})".format(w, "'" + "', '".join(errs) + "'")
for w, errs in spell_errors[key]]
_print("\n".join(lines))
_print("\t\t{}".format("\n\t\t".join(pot.msgs[key].sources)))
def enable_addons(addons={}, support={}, disable=False):
"""
Enable (or disable) addons based either on a set of names, or a set of 'support' types.
Returns the list of all affected addons (as fake modules)!
"""
import addon_utils
userpref = bpy.context.user_preferences
used_ext = {ext.module for ext in userpref.addons}
ret = [mod for mod in addon_utils.modules(addon_utils.addons_fake_modules)
if ((addons and mod.__name__ in addons) or
(not addons and addon_utils.module_bl_info(mod)["support"] in support))]
for mod in ret:
module_name = mod.__name__
if disable:
if module_name not in used_ext:
continue
print(" Disabling module ", module_name)
bpy.ops.wm.addon_disable(module=module_name)
else:
if module_name in used_ext:
continue
print(" Enabling module ", module_name)
bpy.ops.wm.addon_enable(module=module_name)
# XXX There are currently some problems with bpy/rna...
# *Very* tricky to solve!
# So this is a hack to make all newly added operator visible by
# bpy.types.OperatorProperties.__subclasses__()
for cat in dir(bpy.ops):
cat = getattr(bpy.ops, cat)
for op in dir(cat):
getattr(cat, op).get_rna()
return ret
def process_msg(msgs, msgctxt, msgid, msgsrc, reports, check_ctxt, settings):
if filter_message(msgid):
reports["messages_skipped"].add((msgid, msgsrc))
return
if not msgctxt:
# We do *not* want any "" context!
msgctxt = settings.DEFAULT_CONTEXT
# Always unescape keys!
msgctxt = utils.I18nMessage.do_unescape(msgctxt)
msgid = utils.I18nMessage.do_unescape(msgid)
key = (msgctxt, msgid)
check(check_ctxt, msgs, key, msgsrc, settings)
msgsrc = settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM + msgsrc
if key not in msgs:
msgs[key] = utils.I18nMessage([msgctxt], [msgid], [], [msgsrc], settings=settings)
else:
msgs[key].comment_lines.append(msgsrc)
##### RNA #####
def dump_messages_rna(msgs, reports, settings):
"""
Dump into messages dict all RNA-defined UI messages (labels en tooltips).
"""
def class_blacklist():
blacklist_rna_class = [
# core classes
"Context", "Event", "Function", "UILayout", "BlendData", "UnknownType",
# registerable classes
"Panel", "Menu", "Header", "RenderEngine", "Operator", "OperatorMacro", "Macro", "KeyingSetInfo",
# window classes
"Window",
]
# Collect internal operators
# extend with all internal operators
# note that this uses internal api introspection functions
# all possible operator names
op_ids = set(cls.bl_rna.identifier for cls in bpy.types.OperatorProperties.__subclasses__()) | \
set(cls.bl_rna.identifier for cls in bpy.types.Operator.__subclasses__()) | \
set(cls.bl_rna.identifier for cls in bpy.types.OperatorMacro.__subclasses__())
get_instance = __import__("_bpy").ops.get_instance
# path_resolve = type(bpy.context).__base__.path_resolve
for idname in op_ids:
op = get_instance(idname)
# XXX Do not skip INTERNAL's anymore, some of those ops show up in UI now!
# if 'INTERNAL' in path_resolve(op, "bl_options"):
# blacklist_rna_class.append(idname)
# Collect builtin classes we don't need to doc
blacklist_rna_class.append("Property")
blacklist_rna_class.extend([cls.__name__ for cls in bpy.types.Property.__subclasses__()])
# Collect classes which are attached to collections, these are api access only.
collection_props = set()
for cls_id in dir(bpy.types):
cls = getattr(bpy.types, cls_id)
for prop in cls.bl_rna.properties:
if prop.type == 'COLLECTION':
prop_cls = prop.srna
if prop_cls is not None:
collection_props.add(prop_cls.identifier)
blacklist_rna_class.extend(sorted(collection_props))
return blacklist_rna_class
check_ctxt_rna = check_ctxt_rna_tip = None
check_ctxt = reports["check_ctxt"]
if check_ctxt:
check_ctxt_rna = {
"multi_lines": check_ctxt.get("multi_lines"),
"not_capitalized": check_ctxt.get("not_capitalized"),
"end_point": check_ctxt.get("end_point"),
"undoc_ops": check_ctxt.get("undoc_ops"),
"spell_checker": check_ctxt.get("spell_checker"),
"spell_errors": check_ctxt.get("spell_errors"),
}
check_ctxt_rna_tip = check_ctxt_rna
check_ctxt_rna_tip["multi_rnatip"] = check_ctxt.get("multi_rnatip")
default_context = settings.DEFAULT_CONTEXT
# Function definitions
def walk_properties(cls):
bl_rna = cls.bl_rna
# Get our parents' properties, to not export them multiple times.
bl_rna_base = bl_rna.base
if bl_rna_base:
bl_rna_base_props = set(bl_rna_base.properties.values())
else:
bl_rna_base_props = set()
for prop in bl_rna.properties:
# Only write this property if our parent hasn't got it.
if prop in bl_rna_base_props:
continue
if prop.identifier == "rna_type":
continue
reports["rna_props"].append((cls, prop))
msgsrc = "bpy.types.{}.{}".format(bl_rna.identifier, prop.identifier)
msgctxt = prop.translation_context or default_context
if prop.name and (prop.name != prop.identifier or msgctxt != default_context):
process_msg(msgs, msgctxt, prop.name, msgsrc, reports, check_ctxt_rna, settings)
if prop.description:
process_msg(msgs, default_context, prop.description, msgsrc, reports, check_ctxt_rna_tip, settings)
if isinstance(prop, bpy.types.EnumProperty):
for item in prop.enum_items:
msgsrc = "bpy.types.{}.{}:'{}'".format(bl_rna.identifier, prop.identifier, item.identifier)
if item.name and item.name != item.identifier:
process_msg(msgs, msgctxt, item.name, msgsrc, reports, check_ctxt_rna, settings)
if item.description:
process_msg(msgs, default_context, item.description, msgsrc, reports, check_ctxt_rna_tip,
settings)
blacklist_rna_class = class_blacklist()
def walk_class(cls):
bl_rna = cls.bl_rna
reports["rna_structs"].append(cls)
if bl_rna.identifier in blacklist_rna_class:
reports["rna_structs_skipped"].append(cls)
return
# XXX translation_context of Operator sub-classes are not "good"!
# So ignore those Operator sub-classes (anyway, will get the same from OperatorProperties sub-classes!)...
if issubclass(cls, bpy.types.Operator):
reports["rna_structs_skipped"].append(cls)
return
msgsrc = "bpy.types." + bl_rna.identifier
msgctxt = bl_rna.translation_context or default_context
if bl_rna.name and (bl_rna.name != bl_rna.identifier or msgctxt != default_context):
process_msg(msgs, msgctxt, bl_rna.name, msgsrc, reports, check_ctxt_rna, settings)
if bl_rna.description:
process_msg(msgs, default_context, bl_rna.description, msgsrc, reports, check_ctxt_rna_tip, settings)
if hasattr(bl_rna, 'bl_label') and bl_rna.bl_label:
process_msg(msgs, msgctxt, bl_rna.bl_label, msgsrc, reports, check_ctxt_rna, settings)
walk_properties(cls)
def walk_keymap_hierarchy(hier, msgsrc_prev):
for lvl in hier:
msgsrc = msgsrc_prev + "." + lvl[1]
process_msg(msgs, default_context, lvl[0], msgsrc, reports, None, settings)
if lvl[3]:
walk_keymap_hierarchy(lvl[3], msgsrc)
# Dump Messages
def process_cls_list(cls_list):
if not cls_list:
return
def full_class_id(cls):
""" gives us 'ID.Lamp.AreaLamp' which is best for sorting."""
cls_id = ""
bl_rna = cls.bl_rna
while bl_rna:
cls_id = bl_rna.identifier + "." + cls_id
bl_rna = bl_rna.base
return cls_id
cls_list.sort(key=full_class_id)
for cls in cls_list:
walk_class(cls)
# Recursively process subclasses.
process_cls_list(cls.__subclasses__())
# Parse everything (recursively parsing from bpy_struct "class"...).
process_cls_list(bpy.types.ID.__base__.__subclasses__())
# And parse keymaps!
from bpy_extras.keyconfig_utils import KM_HIERARCHY
walk_keymap_hierarchy(KM_HIERARCHY, "KM_HIERARCHY")
##### Python source code #####
def dump_py_messages_from_files(msgs, reports, files, settings):
"""
Dump text inlined in the python files given, e.g. 'My Name' in:
layout.prop("someprop", text="My Name")
"""
import ast
bpy_struct = bpy.types.ID.__base__
# Helper function
def extract_strings_ex(node, is_split=False):
"""
Recursively get strings, needed in case we have "Blah" + "Blah", passed as an argument in that case it won't
evaluate to a string. However, break on some kind of stopper nodes, like e.g. Subscript.
"""
if type(node) == ast.Str:
eval_str = ast.literal_eval(node)
if eval_str:
yield (is_split, eval_str, (node,))
else:
is_split = (type(node) in separate_nodes)
for nd in ast.iter_child_nodes(node):
if type(nd) not in stopper_nodes:
yield from extract_strings_ex(nd, is_split=is_split)
def _extract_string_merge(estr_ls, nds_ls):
return "".join(s for s in estr_ls if s is not None), tuple(n for n in nds_ls if n is not None)
def extract_strings(node):
estr_ls = []
nds_ls = []
for is_split, estr, nds in extract_strings_ex(node):
estr_ls.append(estr)
nds_ls.extend(nds)
ret = _extract_string_merge(estr_ls, nds_ls)
return ret
def extract_strings_split(node):
"""
Returns a list args as returned by 'extract_strings()', But split into groups based on separate_nodes, this way
expressions like ("A" if test else "B") wont be merged but "A" + "B" will.
"""
estr_ls = []
nds_ls = []
bag = []
for is_split, estr, nds in extract_strings_ex(node):
if is_split:
bag.append((estr_ls, nds_ls))
estr_ls = []
nds_ls = []
estr_ls.append(estr)
nds_ls.extend(nds)
bag.append((estr_ls, nds_ls))
return [_extract_string_merge(estr_ls, nds_ls) for estr_ls, nds_ls in bag]
def _ctxt_to_ctxt(node):
return extract_strings(node)[0]
def _op_to_ctxt(node):
opname, _ = extract_strings(node)
if not opname:
return settings.DEFAULT_CONTEXT
op = bpy.ops
for n in opname.split('.'):
op = getattr(op, n)
try:
return op.get_rna().bl_rna.translation_context
except Exception as e:
default_op_context = bpy.app.translations.contexts.operator_default
print("ERROR: ", str(e))
print(" Assuming default operator context '{}'".format(default_op_context))
return default_op_context
# Gather function names.
# In addition of UI func, also parse pgettext ones...
# Tuples of (module name, (short names, ...)).
pgettext_variants = (
("pgettext", ("_",)),
("pgettext_iface", ("iface_",)),
("pgettext_tip", ("tip_",))
)
pgettext_variants_args = {"msgid": (0, {"msgctxt": 1})}
# key: msgid keywords.
# val: tuples of ((keywords,), context_getter_func) to get a context for that msgid.
# Note: order is important, first one wins!
translate_kw = {
"text": ((("text_ctxt",), _ctxt_to_ctxt),
(("operator",), _op_to_ctxt),
),
"msgid": ((("msgctxt",), _ctxt_to_ctxt),
),
}
context_kw_set = {}
for k, ctxts in translate_kw.items():
s = set()
for c, _ in ctxts:
s |= set(c)
context_kw_set[k] = s
# {func_id: {msgid: (arg_pos,
# {msgctxt: arg_pos,
# ...
# }
# ),
# ...
# },
# ...
# }
func_translate_args = {}
# First, functions from UILayout
# First loop is for msgid args, second one is for msgctxt args.
for func_id, func in bpy.types.UILayout.bl_rna.functions.items():
# check it has one or more arguments as defined in translate_kw
for arg_pos, (arg_kw, arg) in enumerate(func.parameters.items()):
if ((arg_kw in translate_kw) and (not arg.is_output) and (arg.type == 'STRING')):
func_translate_args.setdefault(func_id, {})[arg_kw] = (arg_pos, {})
for func_id, func in bpy.types.UILayout.bl_rna.functions.items():
if func_id not in func_translate_args:
continue
for arg_pos, (arg_kw, arg) in enumerate(func.parameters.items()):
if (not arg.is_output) and (arg.type == 'STRING'):
for msgid, msgctxts in context_kw_set.items():
if arg_kw in msgctxts:
func_translate_args[func_id][msgid][1][arg_kw] = arg_pos
# We manually add funcs from bpy.app.translations
for func_id, func_ids in pgettext_variants:
func_translate_args[func_id] = pgettext_variants_args
for func_id in func_ids:
func_translate_args[func_id] = pgettext_variants_args
#print(func_translate_args)
# Break recursive nodes look up on some kind of nodes.
# E.g. we dont want to get strings inside subscripts (blah["foo"])!
stopper_nodes = {ast.Subscript}
# Consider strings separate: ("a" if test else "b")
separate_nodes = {ast.IfExp}
check_ctxt_py = None
if reports["check_ctxt"]:
check_ctxt = reports["check_ctxt"]
check_ctxt_py = {
"py_in_rna": (check_ctxt.get("py_in_rna"), set(msgs.keys())),
"multi_lines": check_ctxt.get("multi_lines"),
"not_capitalized": check_ctxt.get("not_capitalized"),
"end_point": check_ctxt.get("end_point"),
"spell_checker": check_ctxt.get("spell_checker"),
"spell_errors": check_ctxt.get("spell_errors"),
}
for fp in files:
with open(fp, 'r', encoding="utf8") as filedata:
root_node = ast.parse(filedata.read(), fp, 'exec')
fp_rel = os.path.relpath(fp, settings.SOURCE_DIR)
for node in ast.walk(root_node):
if type(node) == ast.Call:
# print("found function at")
# print("%s:%d" % (fp, node.lineno))
# We can't skip such situations! from blah import foo\nfoo("bar") would also be an ast.Name func!
if type(node.func) == ast.Name:
func_id = node.func.id
elif hasattr(node.func, "attr"):
func_id = node.func.attr
# Ugly things like getattr(self, con.type)(context, box, con)
else:
continue
func_args = func_translate_args.get(func_id, {})
# First try to get i18n contexts, for every possible msgid id.
msgctxts = dict.fromkeys(func_args.keys(), "")
for msgid, (_, context_args) in func_args.items():
context_elements = {}
for arg_kw, arg_pos in context_args.items():
if arg_pos < len(node.args):
context_elements[arg_kw] = node.args[arg_pos]
else:
for kw in node.keywords:
if kw.arg == arg_kw:
context_elements[arg_kw] = kw.value
break
#print(context_elements)
for kws, proc in translate_kw[msgid]:
if set(kws) <= context_elements.keys():
args = tuple(context_elements[k] for k in kws)
#print("running ", proc, " with ", args)
ctxt = proc(*args)
if ctxt:
msgctxts[msgid] = ctxt
break
#print(translate_args)
# do nothing if not found
for arg_kw, (arg_pos, _) in func_args.items():
msgctxt = msgctxts[arg_kw]
estr_lst = [(None, ())]
if arg_pos < len(node.args):
estr_lst = extract_strings_split(node.args[arg_pos])
#print(estr, nds)
else:
for kw in node.keywords:
if kw.arg == arg_kw:
estr_lst = extract_strings_split(kw.value)
break
#print(estr, nds)
for estr, nds in estr_lst:
if estr:
if nds:
msgsrc = "{}:{}".format(fp_rel, sorted({nd.lineno for nd in nds})[0])
else:
msgsrc = "{}:???".format(fp_rel)
process_msg(msgs, msgctxt, estr, msgsrc, reports, check_ctxt_py, settings)
reports["py_messages"].append((msgctxt, estr, msgsrc))
def dump_py_messages(msgs, reports, addons, settings):
def _get_files(path):
if os.path.isdir(path):
# XXX use walk instead of listdir?
return [os.path.join(path, fn) for fn in sorted(os.listdir(path))
if not fn.startswith("_") and fn.endswith(".py")]
return [path]
files = []
for path in settings.CUSTOM_PY_UI_FILES:
files += _get_files(path)
# Add all addons we support in main translation file!
for mod in addons:
fn = mod.__file__
if os.path.basename(fn) == "__init__.py":
files += _get_files(os.path.dirname(fn))
else:
files.append(fn)
dump_py_messages_from_files(msgs, reports, files, settings)
##### C source code #####
def dump_src_messages(msgs, reports, settings):
def get_contexts():
"""Return a mapping {C_CTXT_NAME: ctxt_value}."""
return {k: getattr(bpy.app.translations.contexts, n) for k, n in bpy.app.translations.contexts_C_to_py.items()}
contexts = get_contexts()
# Build regexes to extract messages (with optional contexts) from C source.
pygettexts = tuple(re.compile(r).search for r in settings.PYGETTEXT_KEYWORDS)
_clean_str = re.compile(settings.str_clean_re).finditer
clean_str = lambda s: "".join(m.group("clean") for m in _clean_str(s))
def dump_src_file(path, rel_path, msgs, reports, settings):
def process_entry(_msgctxt, _msgid):
# Context.
msgctxt = settings.DEFAULT_CONTEXT
if _msgctxt:
if _msgctxt in contexts:
msgctxt = contexts[_msgctxt]
elif '"' in _msgctxt or "'" in _msgctxt:
msgctxt = clean_str(_msgctxt)
else:
print("WARNING: raw context “{}” couldnt be resolved!".format(_msgctxt))
# Message.
msgid = ""
if _msgid:
if '"' in _msgid or "'" in _msgid:
msgid = clean_str(_msgid)
else:
print("WARNING: raw message “{}” couldnt be resolved!".format(_msgid))
return msgctxt, msgid
check_ctxt_src = None
if reports["check_ctxt"]:
check_ctxt = reports["check_ctxt"]
check_ctxt_src = {
"multi_lines": check_ctxt.get("multi_lines"),
"not_capitalized": check_ctxt.get("not_capitalized"),
"end_point": check_ctxt.get("end_point"),
"spell_checker": check_ctxt.get("spell_checker"),
"spell_errors": check_ctxt.get("spell_errors"),
}
data = ""
with open(path) as f:
data = f.read()
for srch in pygettexts:
m = srch(data)
line = pos = 0
while m:
d = m.groupdict()
# Line.
line += data[pos:m.start()].count('\n')
msgsrc = rel_path + ":" + str(line)
_msgid = d.get("msg_raw")
# First, try the "multi-contexts" stuff!
_msgctxts = tuple(d.get("ctxt_raw{}".format(i)) for i in range(settings.PYGETTEXT_MAX_MULTI_CTXT))
if _msgctxts[0]:
for _msgctxt in _msgctxts:
if not _msgctxt:
break
msgctxt, msgid = process_entry(_msgctxt, _msgid)
process_msg(msgs, msgctxt, msgid, msgsrc, reports, check_ctxt_src, settings)
reports["src_messages"].append((msgctxt, msgid, msgsrc))
else:
_msgctxt = d.get("ctxt_raw")
msgctxt, msgid = process_entry(_msgctxt, _msgid)
process_msg(msgs, msgctxt, msgid, msgsrc, reports, check_ctxt_src, settings)
reports["src_messages"].append((msgctxt, msgid, msgsrc))
pos = m.end()
line += data[m.start():pos].count('\n')
m = srch(data, pos)
forbidden = set()
forced = set()
if os.path.isfile(settings.SRC_POTFILES):
with open(settings.SRC_POTFILES) as src:
for l in src:
if l[0] == '-':
forbidden.add(l[1:].rstrip('\n'))
elif l[0] != '#':
forced.add(l.rstrip('\n'))
for root, dirs, files in os.walk(settings.POTFILES_SOURCE_DIR):
if "/.svn" in root:
continue
for fname in files:
if os.path.splitext(fname)[1] not in settings.PYGETTEXT_ALLOWED_EXTS:
continue
path = os.path.join(root, fname)
rel_path = os.path.relpath(path, settings.SOURCE_DIR)
if rel_path in forbidden:
continue
elif rel_path not in forced:
forced.add(rel_path)
for rel_path in sorted(forced):
path = os.path.join(settings.SOURCE_DIR, rel_path)
if os.path.exists(path):
dump_src_file(path, rel_path, msgs, reports, settings)
##### Main functions! #####
def dump_messages(do_messages, do_checks, settings):
bl_ver = "Blender " + bpy.app.version_string
bl_rev = bpy.app.build_revision
bl_date = datetime.datetime.strptime(bpy.app.build_date.decode() + "T" + bpy.app.build_time.decode(),
"%Y-%m-%dT%H:%M:%S")
pot = utils.I18nMessages.gen_empty_messages(settings.PARSER_TEMPLATE_ID, bl_ver, bl_rev, bl_date, bl_date.year,
settings=settings)
msgs = pot.msgs
# Enable all wanted addons.
# For now, enable all official addons, before extracting msgids.
addons = enable_addons(support={"OFFICIAL"})
# Note this is not needed if we have been started with factory settings, but just in case...
enable_addons(support={"COMMUNITY", "TESTING"}, disable=True)
reports = _gen_reports(_gen_check_ctxt(settings) if do_checks else None)
# Get strings from RNA.
dump_messages_rna(msgs, reports, settings)
# Get strings from UI layout definitions text="..." args.
dump_py_messages(msgs, reports, addons, settings)
# Get strings from C source code.
dump_src_messages(msgs, reports, settings)
# Get strings specific to translations' menu.
for lng in settings.LANGUAGES:
process_msg(msgs, settings.DEFAULT_CONTEXT, lng[1], "Languages labels from bl_i18n_utils/settings.py",
reports, None, settings)
for cat in settings.LANGUAGES_CATEGORIES:
process_msg(msgs, settings.DEFAULT_CONTEXT, cat[1],
"Language categories labels from bl_i18n_utils/settings.py", reports, None, settings)
#pot.check()
pot.unescape() # Strings gathered in py/C source code may contain escaped chars...
print_info(reports, pot)
#pot.check()
if do_messages:
print("Writing messages…")
pot.write('PO', settings.FILE_NAME_POT)
print("Finished extracting UI messages!")
def dump_addon_messages(module_name, messages_formats, do_checks, settings):
# Enable our addon and get strings from RNA.
addon = enable_addons(addons={module_name})[0]
addon_info = addon_utils.module_bl_info(addon)
ver = addon_info.name + " " + ".".join(addon_info.version)
rev = "???"
date = datetime.datetime()
pot = utils.I18nMessages.gen_empty_messages(settings.PARSER_TEMPLATE_ID, ver, rev, date, date.year,
settings=settings)
msgs = pot.msgs
minus_msgs = copy.deepcopy(msgs)
check_ctxt = _gen_check_ctxt(settings) if do_checks else None
minus_check_ctxt = _gen_check_ctxt(settings) if do_checks else None
# Get current addon state (loaded or not):
was_loaded = addon_utils.check(module_name)[1]
# Enable our addon and get strings from RNA.
addons = enable_addons(addons={module_name})
reports = _gen_reports(check_ctxt)
dump_messages_rna(msgs, reports, settings)
# Now disable our addon, and rescan RNA.
enable_addons(addons={module_name}, disable=True)
reports["check_ctxt"] = minus_check_ctxt
dump_messages_rna(minus_msgs, reports, settings)
# Restore previous state if needed!
if was_loaded:
enable_addons(addons={module_name})
# and make the diff!
for key in minus_msgs:
if key == settings.PO_HEADER_KEY:
continue
del msgs[key]
if check_ctxt:
for key in check_ctxt:
for warning in minus_check_ctxt[key]:
check_ctxt[key].remove(warning)
# and we are done with those!
del minus_msgs
del minus_check_ctxt
# get strings from UI layout definitions text="..." args
reports["check_ctxt"] = check_ctxt
dump_messages_pytext(msgs, reports, addons, settings)
print_info(reports, pot)
return pot
def main():
try:
import bpy
except ImportError:
print("This script must run from inside blender")
return
import sys
back_argv = sys.argv
# Get rid of Blender args!
sys.argv = sys.argv[sys.argv.index("--") + 1:]
import argparse
parser = argparse.ArgumentParser(description="Process UI messages from inside Blender.")
parser.add_argument('-c', '--no_checks', default=True, action="store_false", help="No checks over UI messages.")
parser.add_argument('-m', '--no_messages', default=True, action="store_false", help="No export of UI messages.")
parser.add_argument('-o', '--output', default=None, help="Output POT file path.")
parser.add_argument('-s', '--settings', default=None,
help="Override (some) default settings. Either a JSon file name, or a JSon string.")
args = parser.parse_args()
settings = i18n_settings.I18nSettings()
settings.from_json(args.settings)
if args.output:
settings.FILE_NAME_POT = args.output
dump_messages(do_messages=args.no_messages, do_checks=args.no_checks, settings=settings)
sys.argv = back_argv
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
main()

View File

@@ -1,762 +0,0 @@
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Write out messages.txt from Blender.
# XXX: This script is meant to be used from inside Blender!
# You should not directly use this script, rather use update_msg.py!
import os
import re
import collections
import copy
# XXX Relative import does not work here when used from Blender...
from bl_i18n_utils import settings
import bpy
print(dir(settings))
SOURCE_DIR = settings.SOURCE_DIR
CUSTOM_PY_UI_FILES = [os.path.abspath(os.path.join(SOURCE_DIR, p)) for p in settings.CUSTOM_PY_UI_FILES]
FILE_NAME_MESSAGES = settings.FILE_NAME_MESSAGES
MSG_COMMENT_PREFIX = settings.MSG_COMMENT_PREFIX
MSG_CONTEXT_PREFIX = settings.MSG_CONTEXT_PREFIX
CONTEXT_DEFAULT = settings.CONTEXT_DEFAULT
#CONTEXT_DEFAULT = bpy.app.i18n.contexts.default # XXX Not yet! :)
UNDOC_OPS_STR = settings.UNDOC_OPS_STR
NC_ALLOWED = settings.WARN_MSGID_NOT_CAPITALIZED_ALLOWED
##### Utils #####
# check for strings like ": %d"
ignore_reg = re.compile(r"^(?:[-*.()/\\+:%xWXYZ0-9]|%d|%f|%s|%r|\s)*$")
filter_message = ignore_reg.match
def check(check_ctxt, messages, key, msgsrc):
"""
Performs a set of checks over the given key (context, message)...
"""
if check_ctxt is None:
return
multi_rnatip = check_ctxt.get("multi_rnatip")
multi_lines = check_ctxt.get("multi_lines")
py_in_rna = check_ctxt.get("py_in_rna")
not_capitalized = check_ctxt.get("not_capitalized")
end_point = check_ctxt.get("end_point")
undoc_ops = check_ctxt.get("undoc_ops")
if multi_rnatip is not None:
if key in messages and key not in multi_rnatip:
multi_rnatip.add(key)
if multi_lines is not None:
if '\n' in key[1]:
multi_lines.add(key)
if py_in_rna is not None:
if key in py_in_rna[1]:
py_in_rna[0].add(key)
if not_capitalized is not None:
if(key[1] not in NC_ALLOWED and key[1][0].isalpha() and not key[1][0].isupper()):
not_capitalized.add(key)
if end_point is not None:
if key[1].strip().endswith('.'):
end_point.add(key)
if undoc_ops is not None:
if key[1] == UNDOC_OPS_STR:
undoc_ops.add(key)
def print_warnings(check_ctxt, messages):
if check_ctxt is not None:
print("WARNINGS:")
keys = set()
for c in check_ctxt.values():
keys |= c
# XXX Temp, see below
keys -= check_ctxt["multi_rnatip"]
for key in keys:
if key in check_ctxt["undoc_ops"]:
print("\tThe following operators are undocumented:")
else:
print("\t{}”|“{}”:".format(*key))
if key in check_ctxt["multi_lines"]:
print("\t\t-> newline in this message!")
if key in check_ctxt["not_capitalized"]:
print("\t\t-> message not capitalized!")
if key in check_ctxt["end_point"]:
print("\t\t-> message with endpoint!")
# XXX Hide this one for now, too much false positives.
# if key in check_ctxt["multi_rnatip"]:
# print("\t\t-> tip used in several RNA items")
if key in check_ctxt["py_in_rna"]:
print("\t\t-> RNA message also used in py UI code:")
print("\t\t{}".format("\n\t\t".join(messages[key])))
def enable_addons(addons={}, support={}, disable=False):
"""
Enable (or disable) addons based either on a set of names, or a set of 'support' types.
Returns the list of all affected addons (as fake modules)!
"""
import addon_utils
import bpy
userpref = bpy.context.user_preferences
used_ext = {ext.module for ext in userpref.addons}
ret = [mod for mod in addon_utils.modules(addon_utils.addons_fake_modules)
if ((addons and mod.__name__ in addons) or
(not addons and addon_utils.module_bl_info(mod)["support"] in support))]
for mod in ret:
module_name = mod.__name__
if disable:
if module_name not in used_ext:
continue
print(" Disabling module ", module_name)
bpy.ops.wm.addon_disable(module=module_name)
else:
if module_name in used_ext:
continue
print(" Enabling module ", module_name)
bpy.ops.wm.addon_enable(module=module_name)
# XXX There are currently some problems with bpy/rna...
# *Very* tricky to solve!
# So this is a hack to make all newly added operator visible by
# bpy.types.OperatorProperties.__subclasses__()
for cat in dir(bpy.ops):
cat = getattr(bpy.ops, cat)
for op in dir(cat):
getattr(cat, op).get_rna()
return ret
##### RNA #####
def dump_messages_rna(messages, check_ctxt):
"""
Dump into messages dict all RNA-defined UI messages (labels en tooltips).
"""
import bpy
def classBlackList():
blacklist_rna_class = [
# core classes
"Context", "Event", "Function", "UILayout", "BlendData",
# registerable classes
"Panel", "Menu", "Header", "RenderEngine", "Operator", "OperatorMacro", "Macro",
"KeyingSetInfo", "UnknownType",
# window classes
"Window",
]
# ---------------------------------------------------------------------
# Collect internal operators
# extend with all internal operators
# note that this uses internal api introspection functions
# all possible operator names
op_ids = set(cls.bl_rna.identifier for cls in bpy.types.OperatorProperties.__subclasses__()) | \
set(cls.bl_rna.identifier for cls in bpy.types.Operator.__subclasses__()) | \
set(cls.bl_rna.identifier for cls in bpy.types.OperatorMacro.__subclasses__())
get_instance = __import__("_bpy").ops.get_instance
path_resolve = type(bpy.context).__base__.path_resolve
for idname in op_ids:
op = get_instance(idname)
# XXX Do not skip INTERNAL's anymore, some of those ops show up in UI now!
# if 'INTERNAL' in path_resolve(op, "bl_options"):
# blacklist_rna_class.append(idname)
# ---------------------------------------------------------------------
# Collect builtin classes we don't need to doc
blacklist_rna_class.append("Property")
blacklist_rna_class.extend([cls.__name__ for cls in bpy.types.Property.__subclasses__()])
# ---------------------------------------------------------------------
# Collect classes which are attached to collections, these are api
# access only.
collection_props = set()
for cls_id in dir(bpy.types):
cls = getattr(bpy.types, cls_id)
for prop in cls.bl_rna.properties:
if prop.type == 'COLLECTION':
prop_cls = prop.srna
if prop_cls is not None:
collection_props.add(prop_cls.identifier)
blacklist_rna_class.extend(sorted(collection_props))
return blacklist_rna_class
blacklist_rna_class = classBlackList()
def filterRNA(bl_rna):
rid = bl_rna.identifier
if rid in blacklist_rna_class:
print(" skipping", rid)
return True
return False
check_ctxt_rna = check_ctxt_rna_tip = None
if check_ctxt:
check_ctxt_rna = {"multi_lines": check_ctxt.get("multi_lines"),
"not_capitalized": check_ctxt.get("not_capitalized"),
"end_point": check_ctxt.get("end_point"),
"undoc_ops": check_ctxt.get("undoc_ops")}
check_ctxt_rna_tip = check_ctxt_rna
check_ctxt_rna_tip["multi_rnatip"] = check_ctxt.get("multi_rnatip")
# -------------------------------------------------------------------------
# Function definitions
def walkProperties(bl_rna):
import bpy
# Get our parents' properties, to not export them multiple times.
bl_rna_base = bl_rna.base
if bl_rna_base:
bl_rna_base_props = bl_rna_base.properties.values()
else:
bl_rna_base_props = ()
for prop in bl_rna.properties:
# Only write this property if our parent hasn't got it.
if prop in bl_rna_base_props:
continue
if prop.identifier == "rna_type":
continue
msgsrc = "bpy.types.{}.{}".format(bl_rna.identifier, prop.identifier)
context = getattr(prop, "translation_context", CONTEXT_DEFAULT)
if prop.name and (prop.name != prop.identifier or context):
key = (context, prop.name)
check(check_ctxt_rna, messages, key, msgsrc)
messages.setdefault(key, []).append(msgsrc)
if prop.description:
key = (CONTEXT_DEFAULT, prop.description)
check(check_ctxt_rna_tip, messages, key, msgsrc)
messages.setdefault(key, []).append(msgsrc)
if isinstance(prop, bpy.types.EnumProperty):
for item in prop.enum_items:
msgsrc = "bpy.types.{}.{}:'{}'".format(bl_rna.identifier,
prop.identifier,
item.identifier)
if item.name and item.name != item.identifier:
key = (CONTEXT_DEFAULT, item.name)
check(check_ctxt_rna, messages, key, msgsrc)
messages.setdefault(key, []).append(msgsrc)
if item.description:
key = (CONTEXT_DEFAULT, item.description)
check(check_ctxt_rna_tip, messages, key, msgsrc)
messages.setdefault(key, []).append(msgsrc)
def walkRNA(bl_rna):
if filterRNA(bl_rna):
return
msgsrc = ".".join(("bpy.types", bl_rna.identifier))
context = getattr(bl_rna, "translation_context", CONTEXT_DEFAULT)
if bl_rna.name and (bl_rna.name != bl_rna.identifier or context):
key = (context, bl_rna.name)
check(check_ctxt_rna, messages, key, msgsrc)
messages.setdefault(key, []).append(msgsrc)
if bl_rna.description:
key = (CONTEXT_DEFAULT, bl_rna.description)
check(check_ctxt_rna_tip, messages, key, msgsrc)
messages.setdefault(key, []).append(msgsrc)
if hasattr(bl_rna, 'bl_label') and bl_rna.bl_label:
key = (context, bl_rna.bl_label)
check(check_ctxt_rna, messages, key, msgsrc)
messages.setdefault(key, []).append(msgsrc)
walkProperties(bl_rna)
def walkClass(cls):
walkRNA(cls.bl_rna)
def walk_keymap_hierarchy(hier, msgsrc_prev):
for lvl in hier:
msgsrc = "{}.{}".format(msgsrc_prev, lvl[1])
messages.setdefault((CONTEXT_DEFAULT, lvl[0]), []).append(msgsrc)
if lvl[3]:
walk_keymap_hierarchy(lvl[3], msgsrc)
# -------------------------------------------------------------------------
# Dump Messages
def process_cls_list(cls_list):
if not cls_list:
return 0
def full_class_id(cls):
""" gives us 'ID.Lamp.AreaLamp' which is best for sorting.
"""
cls_id = ""
bl_rna = cls.bl_rna
while bl_rna:
cls_id = "{}.{}".format(bl_rna.identifier, cls_id)
bl_rna = bl_rna.base
return cls_id
cls_list.sort(key=full_class_id)
processed = 0
for cls in cls_list:
# XXX translation_context of Operator sub-classes are not "good"!
# So ignore those Operator sub-classes (anyway, will get the same from OperatorProperties
# sub-classes!)...
if issubclass(cls, bpy.types.Operator):
continue
walkClass(cls)
# classes.add(cls)
# Recursively process subclasses.
processed += process_cls_list(cls.__subclasses__()) + 1
return processed
# Parse everything (recursively parsing from bpy_struct "class"...).
processed = process_cls_list(type(bpy.context).__base__.__subclasses__())
print("{} classes processed!".format(processed))
from bpy_extras.keyconfig_utils import KM_HIERARCHY
walk_keymap_hierarchy(KM_HIERARCHY, "KM_HIERARCHY")
##### Python source code #####
def dump_py_messages_from_files(messages, check_ctxt, files):
"""
Dump text inlined in the python files given, e.g. 'My Name' in:
layout.prop("someprop", text="My Name")
"""
import ast
bpy_struct = bpy.types.ID.__base__
# Helper function
def extract_strings_ex(node, is_split=False):
"""
Recursively get strings, needed in case we have "Blah" + "Blah", passed as an argument in that case it won't
evaluate to a string. However, break on some kind of stopper nodes, like e.g. Subscript.
"""
if type(node) == ast.Str:
eval_str = ast.literal_eval(node)
if eval_str:
yield (is_split, eval_str, (node,))
else:
is_split = (type(node) in separate_nodes)
for nd in ast.iter_child_nodes(node):
if type(nd) not in stopper_nodes:
yield from extract_strings_ex(nd, is_split=is_split)
def _extract_string_merge(estr_ls, nds_ls):
return "".join(s for s in estr_ls if s is not None), tuple(n for n in nds_ls if n is not None)
def extract_strings(node):
estr_ls = []
nds_ls = []
for is_split, estr, nds in extract_strings_ex(node):
estr_ls.append(estr)
nds_ls.extend(nds)
ret = _extract_string_merge(estr_ls, nds_ls)
#print(ret)
return ret
def extract_strings_split(node):
"""
Returns a list args as returned by 'extract_strings()',
But split into groups based on separate_nodes, this way
expressions like ("A" if test else "B") wont be merged but
"A" + "B" will.
"""
estr_ls = []
nds_ls = []
bag = []
for is_split, estr, nds in extract_strings_ex(node):
if is_split:
bag.append((estr_ls, nds_ls))
estr_ls = []
nds_ls = []
estr_ls.append(estr)
nds_ls.extend(nds)
bag.append((estr_ls, nds_ls))
return [_extract_string_merge(estr_ls, nds_ls) for estr_ls, nds_ls in bag]
def _ctxt_to_ctxt(node):
return extract_strings(node)[0]
def _op_to_ctxt(node):
opname, _ = extract_strings(node)
if not opname:
return ""
op = bpy.ops
for n in opname.split('.'):
op = getattr(op, n)
try:
return op.get_rna().bl_rna.translation_context
except Exception as e:
default_op_context = bpy.app.translations.contexts.operator_default
print("ERROR: ", str(e))
print(" Assuming default operator context '{}'".format(default_op_context))
return default_op_context
# -------------------------------------------------------------------------
# Gather function names
# In addition of UI func, also parse pgettext ones...
# Tuples of (module name, (short names, ...)).
pgettext_variants = (
("pgettext", ("_",)),
("pgettext_iface", ("iface_",)),
("pgettext_tip", ("tip_",))
)
pgettext_variants_args = {"msgid": (0, {"msgctxt": 1})}
# key: msgid keywords.
# val: tuples of ((keywords,), context_getter_func) to get a context for that msgid.
# Note: order is important, first one wins!
translate_kw = {
"text": ((("text_ctxt",), _ctxt_to_ctxt),
(("operator",), _op_to_ctxt),
),
"msgid": ((("msgctxt",), _ctxt_to_ctxt),
),
}
context_kw_set = {}
for k, ctxts in translate_kw.items():
s = set()
for c, _ in ctxts:
s |= set(c)
context_kw_set[k] = s
# {func_id: {msgid: (arg_pos,
# {msgctxt: arg_pos,
# ...
# }
# ),
# ...
# },
# ...
# }
func_translate_args = {}
# First, functions from UILayout
# First loop is for msgid args, second one is for msgctxt args.
for func_id, func in bpy.types.UILayout.bl_rna.functions.items():
# check it has one or more arguments as defined in translate_kw
for arg_pos, (arg_kw, arg) in enumerate(func.parameters.items()):
if ((arg_kw in translate_kw) and (not arg.is_output) and (arg.type == 'STRING')):
func_translate_args.setdefault(func_id, {})[arg_kw] = (arg_pos, {})
for func_id, func in bpy.types.UILayout.bl_rna.functions.items():
if func_id not in func_translate_args:
continue
for arg_pos, (arg_kw, arg) in enumerate(func.parameters.items()):
if (not arg.is_output) and (arg.type == 'STRING'):
for msgid, msgctxts in context_kw_set.items():
if arg_kw in msgctxts:
func_translate_args[func_id][msgid][1][arg_kw] = arg_pos
# We manually add funcs from bpy.app.translations
for func_id, func_ids in pgettext_variants:
func_translate_args[func_id] = pgettext_variants_args
for func_id in func_ids:
func_translate_args[func_id] = pgettext_variants_args
#print(func_translate_args)
# Break recursive nodes look up on some kind of nodes.
# E.g. we dont want to get strings inside subscripts (blah["foo"])!
stopper_nodes = {ast.Subscript}
# Consider strings separate: ("a" if test else "b")
separate_nodes = {ast.IfExp}
check_ctxt_py = None
if check_ctxt:
check_ctxt_py = {"py_in_rna": (check_ctxt["py_in_rna"], messages.copy()),
"multi_lines": check_ctxt["multi_lines"],
"not_capitalized": check_ctxt["not_capitalized"],
"end_point": check_ctxt["end_point"]}
for fp in files:
with open(fp, 'r', encoding="utf8") as filedata:
root_node = ast.parse(filedata.read(), fp, 'exec')
fp_rel = os.path.relpath(fp, SOURCE_DIR)
for node in ast.walk(root_node):
if type(node) == ast.Call:
# print("found function at")
# print("%s:%d" % (fp, node.lineno))
# We can't skip such situations! from blah import foo\nfoo("bar") would also be an ast.Name func!
if type(node.func) == ast.Name:
func_id = node.func.id
elif hasattr(node.func, "attr"):
func_id = node.func.attr
# Ugly things like getattr(self, con.type)(context, box, con)
else:
continue
func_args = func_translate_args.get(func_id, {})
# First try to get i18n contexts, for every possible msgid id.
contexts = dict.fromkeys(func_args.keys(), "")
for msgid, (_, context_args) in func_args.items():
context_elements = {}
for arg_kw, arg_pos in context_args.items():
if arg_pos < len(node.args):
context_elements[arg_kw] = node.args[arg_pos]
else:
for kw in node.keywords:
if kw.arg == arg_kw:
context_elements[arg_kw] = kw.value
break
#print(context_elements)
for kws, proc in translate_kw[msgid]:
if set(kws) <= context_elements.keys():
args = tuple(context_elements[k] for k in kws)
#print("running ", proc, " with ", args)
ctxt = proc(*args)
if ctxt:
contexts[msgid] = ctxt
break
#print(translate_args)
# do nothing if not found
for arg_kw, (arg_pos, _) in func_args.items():
estr_lst = [(None, ())]
if arg_pos < len(node.args):
estr_lst = extract_strings_split(node.args[arg_pos])
#print(estr, nds)
else:
for kw in node.keywords:
if kw.arg == arg_kw:
estr_lst = extract_strings_split(kw.value)
break
#print(estr, nds)
for estr, nds in estr_lst:
if estr:
key = (contexts[arg_kw], estr)
if nds:
msgsrc = ["{}:{}".format(fp_rel, sorted({nd.lineno for nd in nds})[0])]
else:
msgsrc = ["{}:???".format(fp_rel)]
check(check_ctxt_py, messages, key, msgsrc)
messages.setdefault(key, []).extend(msgsrc)
def dump_py_messages(messages, check_ctxt, addons):
mod_dir = os.path.join(SOURCE_DIR, "release", "scripts", "startup", "bl_ui")
files = [os.path.join(mod_dir, fn) for fn in sorted(os.listdir(mod_dir))
if not fn.startswith("_") if fn.endswith("py")]
# Dummy Cycles has its py addon in its own dir!
files += CUSTOM_PY_UI_FILES
# Add all addons we support in main translation file!
for mod in addons:
fn = mod.__file__
if os.path.basename(fn) == "__init__.py":
mod_dir = os.path.dirname(fn)
files += [fn for fn in sorted(os.listdir(mod_dir))
if os.path.isfile(fn) and os.path.splitext(fn)[1] == ".py"]
else:
files.append(fn)
dump_py_messages_from_files(messages, check_ctxt, files)
##### Main functions! #####
def dump_messages(do_messages, do_checks):
messages = getattr(collections, 'OrderedDict', dict)()
messages[(CONTEXT_DEFAULT, "")] = []
# Enable all wanted addons.
# For now, enable all official addons, before extracting msgids.
addons = enable_addons(support={"OFFICIAL"})
check_ctxt = None
if do_checks:
check_ctxt = {"multi_rnatip": set(),
"multi_lines": set(),
"py_in_rna": set(),
"not_capitalized": set(),
"end_point": set(),
"undoc_ops": set()}
# get strings from RNA
dump_messages_rna(messages, check_ctxt)
# get strings from UI layout definitions text="..." args
dump_py_messages(messages, check_ctxt, addons)
del messages[(CONTEXT_DEFAULT, "")]
print_warnings(check_ctxt, messages)
if do_messages:
print("Writing messages…")
num_written = 0
num_filtered = 0
with open(FILE_NAME_MESSAGES, 'w', encoding="utf8") as message_file:
for (ctx, key), value in messages.items():
# filter out junk values
if filter_message(key):
num_filtered += 1
continue
# Remove newlines in key and values!
message_file.write("\n".join(MSG_COMMENT_PREFIX + msgsrc.replace("\n", "") for msgsrc in value))
message_file.write("\n")
if ctx:
message_file.write(MSG_CONTEXT_PREFIX + ctx.replace("\n", "") + "\n")
message_file.write(key.replace("\n", "") + "\n")
num_written += 1
print("Written {} messages to: {} ({} were filtered out)."
"".format(num_written, FILE_NAME_MESSAGES, num_filtered))
def dump_addon_messages(module_name, messages_formats, do_checks):
messages = getattr(collections, 'OrderedDict', dict)()
messages[(CONTEXT_DEFAULT, "")] = []
minus_messages = copy.deepcopy(messages)
check_ctxt = None
minus_check_ctxt = None
if do_checks:
check_ctxt = {"multi_rnatip": set(),
"multi_lines": set(),
"py_in_rna": set(),
"not_capitalized": set(),
"end_point": set(),
"undoc_ops": set()}
minus_check_ctxt = copy.deepcopy(check_ctxt)
# Get current addon state (loaded or not):
was_loaded = addon_utils.check(module_name)[1]
# Enable our addon and get strings from RNA.
enable_addons(addons={module_name})
dump_messages_rna(messages, check_ctxt)
# Now disable our addon, and rescan RNA.
enable_addons(addons={module_name}, disable=True)
dump_messages_rna(minus_messages, minus_check_ctxt)
# Restore previous state if needed!
if was_loaded:
enable_addons(addons={module_name})
# and make the diff!
for key in minus_messages:
if k == (CONTEXT_DEFAULT, ""):
continue
del messages[k]
if check_ctxt:
for key in check_ctxt:
for warning in minus_check_ctxt[key]:
check_ctxt[key].remove(warning)
# and we are done with those!
del minus_messages
del minus_check_ctxt
# get strings from UI layout definitions text="..." args
dump_messages_pytext(messages, check_ctxt)
del messages[(CONTEXT_DEFAULT, "")]
print_warnings
if do_messages:
print("Writing messages…")
num_written = 0
num_filtered = 0
with open(FILE_NAME_MESSAGES, 'w', encoding="utf8") as message_file:
for (ctx, key), value in messages.items():
# filter out junk values
if filter_message(key):
num_filtered += 1
continue
# Remove newlines in key and values!
message_file.write("\n".join(COMMENT_PREFIX + msgsrc.replace("\n", "") for msgsrc in value))
message_file.write("\n")
if ctx:
message_file.write(CONTEXT_PREFIX + ctx.replace("\n", "") + "\n")
message_file.write(key.replace("\n", "") + "\n")
num_written += 1
print("Written {} messages to: {} ({} were filtered out)."
"".format(num_written, FILE_NAME_MESSAGES, num_filtered))
def main():
try:
import bpy
except ImportError:
print("This script must run from inside blender")
return
import sys
back_argv = sys.argv
# Get rid of Blender args!
sys.argv = sys.argv[sys.argv.index("--") + 1:]
import argparse
parser = argparse.ArgumentParser(description="Process UI messages from inside Blender.")
parser.add_argument('-c', '--no_checks', default=True, action="store_false", help="No checks over UI messages.")
parser.add_argument('-m', '--no_messages', default=True, action="store_false", help="No export of UI messages.")
parser.add_argument('-o', '--output', help="Output messages file path.")
args = parser.parse_args()
if args.output:
global FILE_NAME_MESSAGES
FILE_NAME_MESSAGES = args.output
dump_messages(do_messages=args.no_messages, do_checks=args.no_checks)
sys.argv = back_argv
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
main()

View File

@@ -0,0 +1,96 @@
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Update "languages" text file used by Blender at runtime to build translations menu.
import os
OK = 0
MISSING = 1
TOOLOW = 2
FORBIDDEN = 3
FLAG_MESSAGES = {
OK: "",
MISSING: "No translation yet!",
TOOLOW: "Not enough advanced to be included...",
FORBIDDEN: "Explicitly forbidden!",
}
def gen_menu_file(stats, settings):
# Generate languages file used by Blender's i18n system.
# First, match all entries in LANGUAGES to a lang in stats, if possible!
tmp = []
for uid_num, label, uid, in settings.LANGUAGES:
if uid in stats:
if uid in settings.IMPORT_LANGUAGES_SKIP:
tmp.append((stats[uid], uid_num, label, uid, FORBIDDEN))
else:
tmp.append((stats[uid], uid_num, label, uid, OK))
else:
tmp.append((0.0, uid_num, label, uid, MISSING))
stats = tmp
limits = sorted(settings.LANGUAGES_CATEGORIES, key=lambda it: it[0], reverse=True)
idx = 0
stats = sorted(stats, key=lambda it: it[0], reverse=True)
langs_cats = [[] for i in range(len(limits))]
highest_uid = 0
for lvl, uid_num, label, uid, flag in stats:
if lvl < limits[idx][0]:
# Sub-sort languages by iso-codes.
langs_cats[idx].sort(key=lambda it: it[2])
idx += 1
if lvl < settings.IMPORT_MIN_LEVEL and flag == OK:
flag = TOOLOW
langs_cats[idx].append((uid_num, label, uid, flag))
if abs(uid_num) > highest_uid:
highest_uid = abs(uid_num)
# Sub-sort last group of languages by iso-codes!
langs_cats[idx].sort(key=lambda it: it[2])
data_lines = [
"# File used by Blender to know which languages (translations) are available, ",
"# and to generate translation menu.",
"#",
"# File format:",
"# ID:MENULABEL:ISOCODE",
"# ID must be unique, except for 0 value (marks categories for menu).",
"# Line starting with a # are comments!",
"#",
"# Automatically generated by bl_i18n_utils/update_languages_menu.py script.",
"# Highest ID currently in use: {}".format(highest_uid),
]
for cat, langs_cat in zip(limits, langs_cats):
data_lines.append("#")
# Write "category menu label"...
if langs_cat:
data_lines.append("0:{}:".format(cat[1]))
else:
# Do not write the category if it has no language!
data_lines.append("# Void category! #0:{}:".format(cat[1]))
# ...and all matching language entries!
for uid_num, label, uid, flag in langs_cat:
if flag == OK:
data_lines.append("{}:{}:{}".format(uid_num, label, uid))
else:
# Non-existing, commented entry!
data_lines.append("# {} #{}:{}:{}".format(FLAG_MESSAGES[flag], uid_num, label, uid))
with open(os.path.join(settings.TRUNK_MO_DIR, settings.LANGUAGES_FILE), 'w') as f:
f.write("\n".join(data_lines))

View File

@@ -36,18 +36,6 @@ import sys
import ctypes
import re
try:
import settings
import utils
except:
from . import (settings, utils)
FRIBIDI_LIB = settings.FRIBIDI_LIB
###### Import C library and recreate "defines". #####
fbd = ctypes.CDLL(FRIBIDI_LIB)
#define FRIBIDI_MASK_NEUTRAL 0x00000040L /* Is neutral */
FRIBIDI_PAR_ON = 0x00000040
@@ -80,12 +68,9 @@ FRIBIDI_FLAG_REMOVE_SPECIALS = 0x00040000
FRIBIDI_FLAG_SHAPE_ARAB_PRES = 0x00000100
FRIBIDI_FLAG_SHAPE_ARAB_LIGA = 0x00000200
FRIBIDI_FLAGS_DEFAULT = FRIBIDI_FLAG_SHAPE_MIRRORING | \
FRIBIDI_FLAG_REORDER_NSM | \
FRIBIDI_FLAG_REMOVE_SPECIALS
FRIBIDI_FLAGS_DEFAULT = FRIBIDI_FLAG_SHAPE_MIRRORING | FRIBIDI_FLAG_REORDER_NSM | FRIBIDI_FLAG_REMOVE_SPECIALS
FRIBIDI_FLAGS_ARABIC = FRIBIDI_FLAG_SHAPE_ARAB_PRES | \
FRIBIDI_FLAG_SHAPE_ARAB_LIGA
FRIBIDI_FLAGS_ARABIC = FRIBIDI_FLAG_SHAPE_ARAB_PRES | FRIBIDI_FLAG_SHAPE_ARAB_LIGA
MENU_DETECT_REGEX = re.compile("%x\\d+\\|")
@@ -158,11 +143,13 @@ def protect_format_seq(msg):
return "".join(ret)
def log2vis(msgs):
def log2vis(msgs, settings):
"""
Globally mimics deprecated fribidi_log2vis.
msgs should be an iterable of messages to rtl-process.
"""
fbd = ctypes.CDLL(settings.FRIBIDI_LIB)
for msg in msgs:
msg = protect_format_seq(msg)
@@ -206,52 +193,3 @@ def log2vis(msgs):
# print(*(ord(c) for c in fbc_str))
yield fbc_str.value
##### Command line stuff. #####
def main():
import argparse
parser = argparse.ArgumentParser(description="" \
"Preprocesses right-to-left languages.\n" \
"You can use it either standalone, or through " \
"import_po_from_branches or update_trunk.\n\n" \
"Note: This has been tested on Linux, not 100% it will " \
"work nicely on Windows or OsX.\n" \
"Note: This uses ctypes, as there is no py3 binding for " \
"fribidi currently. This implies you only need the " \
"compiled C library to run it.\n" \
"Note: It handles some formating/escape codes (like " \
"\\\", %s, %x12, %.4f, etc.), protecting them from ugly " \
"(evil) fribidi, which seems completely unaware of such " \
"things (as unicode is...).")
parser.add_argument('dst', metavar='dst.po',
help="The dest po into which write the " \
"pre-processed messages.")
parser.add_argument('src', metavar='src.po',
help="The po's to pre-process messages.")
args = parser.parse_args()
msgs, state, u1 = utils.parse_messages(args.src)
if state["is_broken"]:
print("Source po is BROKEN, aborting.")
return 1
keys = []
trans = []
for key, val in msgs.items():
keys.append(key)
trans.append("".join(val["msgstr_lines"]))
trans = log2vis(trans)
for key, trn in zip(keys, trans):
# Mono-line for now...
msgs[key]["msgstr_lines"] = [trn]
utils.write_messages(args.dst, msgs, state["comm_msg"], state["fuzzy_msg"])
print("RTL pre-process completed.")
return 0
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
sys.exit(main())

View File

@@ -24,8 +24,12 @@
# XXX This is a template, most values should be OK, but some youll have to
# edit (most probably, BLENDER_EXEC and SOURCE_DIR).
import os.path
import json
import os
import sys
import bpy
###############################################################################
# MISC
@@ -86,15 +90,24 @@ LANGUAGES = (
(40, "Hindi (मानक हिन्दी)", "hi_IN"),
)
# Default context, in py!
DEFAULT_CONTEXT = bpy.app.translations.contexts.default
# Name of language file used by Blender to generate translations' menu.
LANGUAGES_FILE = "languages"
# The min level of completeness for a po file to be imported from /branches
# into /trunk, as a percentage. -1 means "import everything".
IMPORT_MIN_LEVEL = -1
# The min level of completeness for a po file to be imported from /branches into /trunk, as a percentage.
IMPORT_MIN_LEVEL = 0.0
# Languages in /branches we do not want to import in /trunk currently...
IMPORT_LANGUAGES_SKIP = {'am', 'bg', 'fi', 'el', 'et', 'ne', 'pl', 'ro', 'uz', 'uz@cyrillic'}
IMPORT_LANGUAGES_SKIP = {
'am_ET', 'bg_BG', 'fi_FI', 'el_GR', 'et_EE', 'ne_NP', 'pl_PL', 'ro_RO', 'uz_UZ', 'uz_UZ@cyrillic',
}
# Languages that need RTL pre-processing.
IMPORT_LANGUAGES_RTL = {
'ar_EG', 'fa_IR', 'he_IL',
}
# The comment prefix used in generated messages.txt file.
MSG_COMMENT_PREFIX = "#~ "
@@ -111,6 +124,9 @@ PO_COMMENT_PREFIX_SOURCE = "#: "
# The comment prefix used to mark sources of msgids, in po's.
PO_COMMENT_PREFIX_SOURCE_CUSTOM = "#. :src: "
# The general "generated" comment prefix, in po's.
PO_COMMENT_PREFIX_GENERATED = "#. "
# The comment prefix used to comment entries in po's.
PO_COMMENT_PREFIX_MSG= "#~ "
@@ -127,16 +143,16 @@ PO_MSGID = "msgid "
PO_MSGSTR = "msgstr "
# The 'header' key of po files.
PO_HEADER_KEY = ("", "")
PO_HEADER_KEY = (DEFAULT_CONTEXT, "")
PO_HEADER_MSGSTR = (
"Project-Id-Version: Blender {blender_ver} (r{blender_rev})\\n\n"
"Project-Id-Version: {blender_ver} (r{blender_rev})\\n\n"
"Report-Msgid-Bugs-To: \\n\n"
"POT-Creation-Date: {time}\\n\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n\n"
"Language-Team: LANGUAGE <LL@li.org>\\n\n"
"Language: {iso}\\n\n"
"Language: {uid}\\n\n"
"MIME-Version: 1.0\\n\n"
"Content-Type: text/plain; charset=UTF-8\\n\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -154,8 +170,8 @@ PO_HEADER_COMMENT = (
TEMPLATE_ISO_ID = "__TEMPLATE__"
# Default context.
CONTEXT_DEFAULT = ""
# Num buttons report their label with a trailing ': '...
NUM_BUTTON_SUFFIX = ": "
# Undocumented operator placeholder string.
UNDOC_OPS_STR = "(undocumented operator)"
@@ -241,11 +257,6 @@ PYGETTEXT_KEYWORDS = (() +
for it in ("BLF_I18N_MSGID_MULTI_CTXT",))
)
ESCAPE_RE = (
(r'((?<!\\)"|(?<!\\)\\(?!\\|"))', r"\\\1"),
('\t', r"\\t"),
)
# Should po parser warn when finding a first letter not capitalized?
WARN_MSGID_NOT_CAPITALIZED = True
@@ -291,40 +302,42 @@ WARN_MSGID_NOT_CAPITALIZED_ALLOWED = {
}
WARN_MSGID_NOT_CAPITALIZED_ALLOWED |= set(lng[2] for lng in LANGUAGES)
WARN_MSGID_END_POINT_ALLOWED = {
"Numpad .",
"Circle|Alt .",
"Temp. Diff.",
"Float Neg. Exp.",
}
PARSER_CACHE_HASH = 'sha1'
PARSER_TEMPLATE_ID = "__POT__"
PARSER_PY_ID = "__PY__"
PARSER_PY_MARKER_BEGIN = "\n# ##### BEGIN AUTOGENERATED I18N SECTION #####\n"
PARSER_PY_MARKER_END = "\n# ##### END AUTOGENERATED I18N SECTION #####\n"
PARSER_MAX_FILE_SIZE = 2**24 # in bytes, i.e. 16 Mb.
###############################################################################
# PATHS
###############################################################################
# The tools path, should be OK.
TOOLS_DIR = os.path.join(os.path.dirname(__file__))
# The Python3 executable.Youll likely have to edit it in your user_settings.py
# if youre under Windows.
PYTHON3_EXEC = "python3"
# The Blender executable!
# This is just an example, youll most likely have to edit it in your user_settings.py!
BLENDER_EXEC = os.path.abspath(os.path.join(TOOLS_DIR, "..", "..", "..", "..", "blender"))
# This is just an example, youll have to edit it in your user_settings.py!
BLENDER_EXEC = os.path.abspath(os.path.join("foo", "bar", "blender"))
# check for blender.bin
if not os.path.exists(BLENDER_EXEC):
if os.path.exists(BLENDER_EXEC + ".bin"):
BLENDER_EXEC = BLENDER_EXEC + ".bin"
# The xgettext tool. Youll likely have to edit it in your user_settings.py if youre under Windows.
GETTEXT_XGETTEXT_EXECUTABLE = "xgettext"
# The gettext msgmerge tool. Youll likely have to edit it in your user_settings.py if youre under Windows.
GETTEXT_MSGMERGE_EXECUTABLE = "msgmerge"
# The gettext msgfmt "compiler". Youll likely have to edit it in your user_settings.py if youre under Windows.
GETTEXT_MSGFMT_EXECUTABLE = "msgfmt"
# The svn binary... Youll likely have to edit it in your user_settings.py if youre under Windows.
SVN_EXECUTABLE = "svn"
# The FriBidi C compiled library (.so under Linux, .dll under windows...).
# Youll likely have to edit it in your user_settings.py if youre under Windows., e.g. using the included one:
# FRIBIDI_LIB = os.path.join(TOOLS_DIR, "libfribidi.dll")
@@ -334,53 +347,63 @@ FRIBIDI_LIB = "libfribidi.so.0"
RTL_PREPROCESS_FILE = "is_rtl"
# The Blender source root path.
# This is just an example, youll most likely have to override it in your user_settings.py!
SOURCE_DIR = os.path.abspath(os.path.join(TOOLS_DIR, "..", "..", "..", "..", "..", "..", "blender_msgs"))
# This is just an example, youll have to override it in your user_settings.py!
SOURCE_DIR = os.path.abspath(os.path.join("blender"))
# The bf-translation repository (you'll likely have to override this in your user_settings.py).
I18N_DIR = os.path.abspath(os.path.join(TOOLS_DIR, "..", "..", "..", "..", "..", "..", "i18n"))
# The bf-translation repository (you'll have to override this in your user_settings.py).
I18N_DIR = os.path.abspath(os.path.join("i18n"))
# The /branches path (overriden in bf-translation's i18n_override_settings.py).
BRANCHES_DIR = os.path.join(I18N_DIR, "branches")
# The /branches path (relative to I18N_DIR).
REL_BRANCHES_DIR = os.path.join("branches")
# The /trunk path (overriden in bf-translation's i18n_override_settings.py).
TRUNK_DIR = os.path.join(I18N_DIR, "trunk")
# The /trunk path (relative to I18N_DIR).
REL_TRUNK_DIR = os.path.join("trunk")
# The /trunk/po path (overriden in bf-translation's i18n_override_settings.py).
TRUNK_PO_DIR = os.path.join(TRUNK_DIR, "po")
# The /trunk/po path (relative to I18N_DIR).
REL_TRUNK_PO_DIR = os.path.join(REL_TRUNK_DIR, "po")
# The /trunk/mo path (overriden in bf-translation's i18n_override_settings.py).
TRUNK_MO_DIR = os.path.join(TRUNK_DIR, "locale")
# The /trunk/mo path (relative to I18N_DIR).
REL_TRUNK_MO_DIR = os.path.join(REL_TRUNK_DIR, "locale")
# The file storing Blender-generated messages.
FILE_NAME_MESSAGES = os.path.join(TRUNK_PO_DIR, "messages.txt")
# The Blender source path to check for i18n macros (relative to SOURCE_DIR).
REL_POTFILES_SOURCE_DIR = os.path.join("source")
# The Blender source path to check for i18n macros.
POTFILES_SOURCE_DIR = os.path.join(SOURCE_DIR, "source")
# The template messages file (relative to I18N_DIR).
REL_FILE_NAME_POT = os.path.join(REL_BRANCHES_DIR, DOMAIN + ".pot")
# The "source" file storing which files should be processed by xgettext, used to create FILE_NAME_POTFILES
FILE_NAME_SRC_POTFILES = os.path.join(TRUNK_PO_DIR, "_POTFILES.in")
# Mo root datapath.
REL_MO_PATH_ROOT = os.path.join(REL_TRUNK_DIR, "locale")
# The final (generated) file storing which files should be processed by xgettext.
FILE_NAME_POTFILES = os.path.join(TRUNK_PO_DIR, "POTFILES.in")
# Mo path generator for a given language.
REL_MO_PATH_TEMPLATE = os.path.join(REL_MO_PATH_ROOT, "{}", "LC_MESSAGES")
# The template messages file.
FILE_NAME_POT = os.path.join(TRUNK_PO_DIR, ".".join((DOMAIN, "pot")))
# Mo path generator for a given language (relative to any "locale" dir).
MO_PATH_ROOT_RELATIVE = os.path.join("locale")
MO_PATH_TEMPLATE_RELATIVE = os.path.join(MO_PATH_ROOT_RELATIVE, "{}", "LC_MESSAGES")
# Other py files that should be searched for ui strings, relative to SOURCE_DIR.
# Needed for Cycles, currently...
CUSTOM_PY_UI_FILES = [
# Mo file name.
MO_FILE_NAME = DOMAIN + ".mo"
# Where to search for py files that may contain ui strings (relative to SOURCE_DIR).
REL_CUSTOM_PY_UI_FILES = [
os.path.join("release", "scripts", "startup", "bl_ui"),
os.path.join("intern", "cycles", "blender", "addon", "ui.py"),
os.path.join("release", "scripts", "modules", "rna_prop_ui.py"),
]
# An optional text file listing files to force include/exclude from py_xgettext process.
SRC_POTFILES = ""
# A cache storing validated msgids, to avoid re-spellchecking them.
SPELL_CACHE = os.path.join("/tmp", ".spell_cache")
# Threshold defining whether a new msgid is similar enough with an old one to reuse its translation...
SIMILAR_MSGID_THRESHOLD = 0.75
# Additional import paths to add to sys.path (';' separated)...
INTERN_PY_SYS_PATHS = ""
# Custom override settings must be one dir above i18n tools itself!
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
try:
from bl_i18n_override_settings import *
@@ -392,3 +415,105 @@ try:
from user_settings import *
except ImportError: # If no user_settings available, its no error!
pass
for p in set(INTERN_PY_SYS_PATHS.split(";")):
if p:
sys.path.append(p)
# The settings class itself!
def _do_get(ref, path):
return os.path.normpath(os.path.join(ref, path))
def _do_set(ref, path):
path = os.path.normpath(path)
# If given path is absolute, make it relative to current ref one (else we consider it is already the case!)
if os.path.isabs(path):
return os.path.relpath(path, ref)
else:
return path
def _gen_get_set_path(ref, name):
def _get(self):
return _do_get(getattr(self, ref), getattr(self, name))
def _set(self, value):
setattr(self, name, _do_set(getattr(self, ref), value))
return _get, _set
def _gen_get_set_paths(ref, name):
def _get(self):
return [_do_get(getattr(self, ref), p) for p in getattr(self, name)]
def _set(self, value):
setattr(self, name, [_do_set(getattr(self, ref), p) for p in value])
return _get, _set
class I18nSettings:
"""
Class allowing persistence of our settings!
Saved in JSon format, so settings should be JSon'able objects!
"""
_settings = None
def __new__(cls, *args, **kwargs):
# Addon preferences are singleton by definition, so is this class!
if not I18nSettings._settings:
cls._settings = super(I18nSettings, cls).__new__(cls)
cls._settings.__dict__ = {uid: data for uid, data in globals().items() if not uid.startswith("_")}
return I18nSettings._settings
def from_json(self, string):
data = dict(json.loads(string))
# Special case... :/
if "INTERN_PY_SYS_PATHS" in data:
self.PY_SYS_PATHS = data["INTERN_PY_SYS_PATHS"]
self.__dict__.update(data)
def to_json(self):
# Only save the diff from default i18n_settings!
glob = globals()
export_dict = {uid: val for uid, val in self.__dict__.items() if glob.get(uid) != val}
return json.dumps(export_dict)
def load(self, fname, reset=False):
if reset:
self.__dict__ = {uid: data for uid, data in globals().items() if not uid.startswith("_")}
if isinstance(fname, str):
if not os.path.isfile(fname):
return
with open(fname) as f:
self.from_json(f.read())
# Else assume fname is already a file(like) object!
else:
self.from_json(fname.read())
def save(self, fname):
if isinstance(fname, str):
with open(fname, 'w') as f:
f.write(self.to_json())
# Else assume fname is already a file(like) object!
else:
fname.write(self.to_json())
BRANCHES_DIR = property(*(_gen_get_set_path("I18N_DIR", "REL_BRANCHES_DIR")))
TRUNK_DIR = property(*(_gen_get_set_path("I18N_DIR", "REL_TRUNK_DIR")))
TRUNK_PO_DIR = property(*(_gen_get_set_path("I18N_DIR", "REL_TRUNK_PO_DIR")))
TRUNK_MO_DIR = property(*(_gen_get_set_path("I18N_DIR", "REL_TRUNK_MO_DIR")))
POTFILES_SOURCE_DIR = property(*(_gen_get_set_path("SOURCE_DIR", "REL_POTFILES_SOURCE_DIR")))
FILE_NAME_POT = property(*(_gen_get_set_path("I18N_DIR", "REL_FILE_NAME_POT")))
MO_PATH_ROOT = property(*(_gen_get_set_path("I18N_DIR", "REL_MO_PATH_ROOT")))
MO_PATH_TEMPLATE = property(*(_gen_get_set_path("I18N_DIR", "REL_MO_PATH_TEMPLATE")))
CUSTOM_PY_UI_FILES = property(*(_gen_get_set_paths("SOURCE_DIR", "REL_CUSTOM_PY_UI_FILES")))
def _get_py_sys_paths(self):
return self.INTERN_PY_SYS_PATHS
def _set_py_sys_paths(self, val):
old_paths = set(self.INTERN_PY_SYS_PATHS.split(";")) - {""}
new_paths = set(val.split(";")) - {""}
for p in old_paths - new_paths:
if p in sys.path:
sys.path.remove(p)
for p in new_paths - old_paths:
sys.path.append(p)
self.INTERN_PY_SYS_PATHS = val
PY_SYS_PATHS = property(_get_py_sys_paths, _set_py_sys_paths)

View File

@@ -18,518 +18,561 @@
# <pep8 compliant>
import enchant
import os
import pickle
import re
_valid_before = "(?<=[\\s*'\"`])|(?<=[a-zA-Z][/-])|(?<=^)"
_valid_after = "(?=[\\s'\"`.!?,;:])|(?=[/-]\\s*[a-zA-Z])|(?=$)"
_valid_words = "(?:{})(?:(?:[A-Z]+[a-z]*)|[A-Z]*|[a-z]*)(?:{})".format(_valid_before, _valid_after)
_reg = re.compile(_valid_words)
class SpellChecker():
"""
A basic spell checker.
"""
# These must be all lower case for comparisons
uimsgs = {
# OK words
"aren", # aren't
"betweens", # yuck! in-betweens!
"boolean", "booleans",
"couldn", # couldn't
"decrement",
"derivate",
"doesn", # doesn't
"fader",
"hasn", # hasn't
"hoc", # ad-hoc
"indices",
"iridas",
"isn", # isn't
"iterable",
"kyrgyz",
"latin",
"merchantability",
"mplayer",
"vertices",
def split_words(text):
return [w for w in _reg.findall(text) if w]
# Merged words
"addon", "addons",
"antialiasing",
"arcsine", "arccosine", "arctangent",
"autoclip",
"autocomplete",
"autoname",
"autosave",
"autoscale",
"autosmooth",
"autosplit",
"backface", "backfacing",
"backimage",
"backscattered",
"bandnoise",
"bindcode",
"bitrate",
"blendin",
"bonesize",
"boundbox",
"boxpack",
"buffersize",
"builtin", "builtins",
"bytecode",
"chunksize",
"de",
"defocus",
"denoise",
"despill", "despilling",
"filebrowser",
"filelist",
"filename", "filenames",
"filepath", "filepaths",
"forcefield", "forcefields",
"fulldome", "fulldomes",
"fullscreen",
"gridline",
"hemi",
"inbetween",
"inscatter", "inscattering",
"libdata",
"lightless",
"lookup", "lookups",
"mathutils",
"midlevel",
"midground",
"mixdown",
"multi",
"multifractal",
"multires", "multiresolution",
"multisampling",
"multitexture",
"multiuser",
"namespace",
"keyconfig",
"playhead",
"polyline",
"popup", "popups",
"pre",
"precalculate",
"prefetch",
"premultiply", "premultiplied",
"prepass",
"prepend",
"preprocess", "preprocessing",
"preseek",
"readonly",
"realtime",
"rekey",
"remesh",
"reprojection",
"resize",
"restpose",
"retarget", "retargets", "retargeting", "retargeted",
"ringnoise",
"rolloff",
"screencast", "screenshot", "screenshots",
"selfcollision",
"singletexture",
"startup",
"stateful",
"starfield",
"subflare", "subflares",
"subframe", "subframes",
"subclass", "subclasses", "subclassing",
"subdirectory", "subdirectories", "subdir", "subdirs",
"submodule", "submodules",
"subpath",
"subsize",
"substep", "substeps",
"targetless",
"textbox", "textboxes",
"tilemode",
"timestamp", "timestamps",
"timestep", "timesteps",
"todo",
"un",
"unbake",
"uncomment",
"undeformed",
"undistort", "undistortion",
"ungroup",
"unhide",
"unindent",
"unkeyed",
"unpremultiply",
"unprojected",
"unreacted",
"unregister",
"unselected",
"unsubdivided", "unsubdivide",
"unshadowed",
"unspill",
"unstitchable",
"vectorscope",
"whitespace", "whitespaces",
"worldspace",
"workflow",
# Neologisms, slangs
"affectable",
"automagic", "automagically",
"blobby",
"blockiness", "blocky",
"collider", "colliders",
"deformer", "deformers",
"determinator",
"editability",
"keyer",
"lacunarity",
"numerics",
"occluder",
"passepartout",
"perspectively",
"pixelate",
"polygonization",
"selectability",
"slurph",
"stitchable",
"symmetrize",
"trackability",
"transmissivity",
"rasterized", "rasterization", "rasterizer",
"renderer", "renderable", "renderability",
# These must be all lower case for comparisons
dict_uimsgs = {
# OK words
"aren", # aren't
"betweens", # yuck! in-betweens!
"boolean", "booleans",
"couldn", # couldn't
"decrement",
"derivate",
"doesn", # doesn't
"fader",
"hasn", # hasn't
"hoc", # ad-hoc
"indices",
"iridas",
"isn", # isn't
"iterable",
"kyrgyz",
"latin",
"merchantability",
"mplayer",
"vertices",
# Abbreviations
"aero",
"amb",
"anim",
"bool",
"calc",
"config", "configs",
"const",
"coord", "coords",
"degr",
"dof",
"dupli", "duplis",
"eg",
"esc",
"expr",
"fac",
"fra",
"frs",
"grless",
"http",
"init",
"kbit", "kb",
"lensdist",
"loc", "rot", "pos",
"lorem",
"luma",
"mem",
"multicam",
"num",
"ok",
"orco",
"ortho",
"persp",
"pref", "prefs",
"prev",
"param",
"premul",
"quad", "quads",
"quat", "quats",
"recalc", "recalcs",
"refl",
"sel",
"spec",
"struct", "structs",
"tex",
"tri", "tris",
"uv", "uvs", "uvw", "uw", "uvmap",
"vec",
"vel", # velocity!
"vert", "verts",
"vis",
"xyz", "xzy", "yxz", "yzx", "zxy", "zyx",
"xy", "xz", "yx", "yz", "zx", "zy",
# Merged words
"addon", "addons",
"antialiasing",
"arcsine", "arccosine", "arctangent",
"autoclip",
"autocomplete",
"autoname",
"autosave",
"autoscale",
"autosmooth",
"autosplit",
"backface", "backfacing",
"backimage",
"backscattered",
"bandnoise",
"bindcode",
"bitrate",
"blendin",
"bonesize",
"boundbox",
"boxpack",
"buffersize",
"builtin", "builtins",
"bytecode",
"chunksize",
"de",
"defocus",
"denoise",
"despill", "despilling",
"filebrowser",
"filelist",
"filename", "filenames",
"filepath", "filepaths",
"forcefield", "forcefields",
"fulldome", "fulldomes",
"fullscreen",
"gridline",
"hemi",
"inbetween",
"inscatter", "inscattering",
"libdata",
"lightless",
"lookup", "lookups",
"mathutils",
"midlevel",
"midground",
"mixdown",
"multi",
"multifractal",
"multires", "multiresolution",
"multisampling",
"multitexture",
"multiuser",
"namespace",
"keyconfig",
"playhead",
"polyline",
"popup", "popups",
"pre",
"precalculate",
"prefetch",
"premultiply", "premultiplied",
"prepass",
"prepend",
"preprocess", "preprocessing",
"preseek",
"readonly",
"realtime",
"rekey",
"remesh",
"reprojection",
"resize",
"restpose",
"retarget", "retargets", "retargeting", "retargeted",
"ringnoise",
"rolloff",
"screencast", "screenshot", "screenshots",
"selfcollision",
"singletexture",
"startup",
"stateful",
"starfield",
"subflare", "subflares",
"subframe", "subframes",
"subclass", "subclasses", "subclassing",
"subdirectory", "subdirectories", "subdir", "subdirs",
"submodule", "submodules",
"subpath",
"subsize",
"substep", "substeps",
"targetless",
"textbox", "textboxes",
"tilemode",
"timestamp", "timestamps",
"timestep", "timesteps",
"todo",
"un",
"unbake",
"uncomment",
"undeformed",
"undistort", "undistortion",
"ungroup",
"unhide",
"unindent",
"unkeyed",
"unpremultiply",
"unprojected",
"unreacted",
"unregister",
"unselected",
"unsubdivided", "unsubdivide",
"unshadowed",
"unspill",
"unstitchable",
"vectorscope",
"whitespace", "whitespaces",
"worldspace",
"workflow",
# General computer/science terms
"boid", "boids",
"equisolid",
"euler", "eulers",
"hashable",
"intrinsics",
"isosurface",
"jitter", "jittering", "jittered",
"keymap", "keymaps",
"lambertian",
"laplacian",
"metadata",
"nand", "xnor",
"normals",
"numpad",
"octree",
"opengl",
"pulldown", "pulldowns",
"quantized",
"samplerate",
"scrollback",
"scrollbar",
"scroller",
"searchable",
"spacebar",
"tooltip", "tooltips",
"trackpad",
"unicode",
"viewport", "viewports",
"viscoelastic",
"wildcard", "wildcards",
# Neologisms, slangs
"affectable",
"automagic", "automagically",
"blobby",
"blockiness", "blocky",
"collider", "colliders",
"deformer", "deformers",
"determinator",
"editability",
"keyer",
"lacunarity",
"numerics",
"occluder",
"passepartout",
"perspectively",
"pixelate",
"polygonization",
"selectability",
"slurph",
"stitchable",
"symmetrize",
"trackability",
"transmissivity",
"rasterized", "rasterization", "rasterizer",
"renderer", "renderable", "renderability",
# General computer graphics terms
"anaglyph",
"bezier", "beziers",
"bicubic",
"bilinear",
"blackpoint", "whitepoint",
"blinn",
"bokeh",
"catadioptric",
"centroid",
"chrominance",
"codec", "codecs",
"collada",
"compositing",
"crossfade",
"deinterlace",
"dropoff",
"dv",
"eigenvectors",
"equirectangular",
"fisheye",
"framerate",
"gimbal",
"grayscale",
"icosphere",
"inpaint",
"lightmap",
"lossless", "lossy",
"matcap",
"midtones",
"mipmap", "mipmaps", "mip",
"ngon", "ngons",
"ntsc",
"nurb", "nurbs",
"perlin",
"phong",
"radiosity",
"raytrace", "raytracing", "raytraced",
"renderfarm",
"shader", "shaders",
"specular", "specularity",
"spillmap",
"sobel",
"tonemap",
"toon",
"timecode",
"voronoi",
"voxel", "voxels",
"wireframe",
"zmask",
"ztransp",
# Abbreviations
"aero",
"amb",
"anim",
"bool",
"calc",
"config", "configs",
"const",
"coord", "coords",
"degr",
"dof",
"dupli", "duplis",
"eg",
"esc",
"expr",
"fac",
"fra",
"frs",
"grless",
"http",
"init",
"kbit", "kb",
"lensdist",
"loc", "rot", "pos",
"lorem",
"luma",
"mem",
"multicam",
"num",
"ok",
"orco",
"ortho",
"persp",
"pref", "prefs",
"prev",
"param",
"premul",
"quad", "quads",
"quat", "quats",
"recalc", "recalcs",
"refl",
"sel",
"spec",
"struct", "structs",
"tex",
"tri", "tris",
"uv", "uvs", "uvw", "uw", "uvmap",
"vec",
"vel", # velocity!
"vert", "verts",
"vis",
"xyz", "xzy", "yxz", "yzx", "zxy", "zyx",
"xy", "xz", "yx", "yz", "zx", "zy",
# Blender terms
"audaspace",
"bbone",
"breakdowner",
"bspline",
"bweight",
"colorband",
"datablock", "datablocks",
"despeckle",
"dopesheet",
"dupliface", "duplifaces",
"dupliframe", "dupliframes",
"dupliobject", "dupliob",
"dupligroup",
"duplivert",
"editbone",
"editmode",
"fcurve", "fcurves",
"fluidsim",
"frameserver",
"enum",
"keyframe", "keyframes", "keyframing", "keyframed",
"metaball", "metaballs",
"metaelement", "metaelements",
"metastrip", "metastrips",
"movieclip",
"mpoly",
"mtex",
"nabla",
"navmesh",
"outliner",
"paintmap", "paintmaps",
"polygroup", "polygroups",
"poselib",
"pushpull",
"pyconstraint", "pyconstraints",
"shapekey", "shapekeys",
"shrinkfatten",
"shrinkwrap",
"softbody",
"stucci",
"sunsky",
"subsurf",
"tessface", "tessfaces",
"texface",
"timeline", "timelines",
"tosphere",
"uilist",
"vcol", "vcols",
"vgroup", "vgroups",
"vinterlace",
"wetmap", "wetmaps",
"wpaint",
"uvwarp",
# General computer/science terms
"boid", "boids",
"equisolid",
"euler", "eulers",
"hashable",
"intrinsics",
"isosurface",
"jitter", "jittering", "jittered",
"keymap", "keymaps",
"lambertian",
"laplacian",
"metadata",
"nand", "xnor",
"normals",
"numpad",
"octree",
"opengl",
"pulldown", "pulldowns",
"quantized",
"samplerate",
"scrollback",
"scrollbar",
"scroller",
"searchable",
"spacebar",
"tooltip", "tooltips",
"trackpad",
"unicode",
"viewport", "viewports",
"viscoelastic",
"wildcard", "wildcards",
# Algorithm names
"beckmann",
"catmull",
"catrom",
"chebychev",
"courant",
"kutta",
"lennard",
"minkowski",
"minnaert",
"musgrave",
"nayar",
"netravali",
"oren",
"prewitt",
"runge",
"verlet",
"worley",
# General computer graphics terms
"anaglyph",
"bezier", "beziers",
"bicubic",
"bilinear",
"blackpoint", "whitepoint",
"blinn",
"bokeh",
"catadioptric",
"centroid",
"chrominance",
"codec", "codecs",
"collada",
"compositing",
"crossfade",
"deinterlace",
"dropoff",
"dv",
"eigenvectors",
"equirectangular",
"fisheye",
"framerate",
"gimbal",
"grayscale",
"icosphere",
"inpaint",
"lightmap",
"lossless", "lossy",
"matcap",
"midtones",
"mipmap", "mipmaps", "mip",
"ngon", "ngons",
"ntsc",
"nurb", "nurbs",
"perlin",
"phong",
"radiosity",
"raytrace", "raytracing", "raytraced",
"renderfarm",
"shader", "shaders",
"specular", "specularity",
"spillmap",
"sobel",
"tonemap",
"toon",
"timecode",
"voronoi",
"voxel", "voxels",
"wireframe",
"zmask",
"ztransp",
# Acronyms
"aa", "msaa",
"api",
"asc", "cdl",
"ascii",
"atrac",
"bw",
"ccd",
"cmd",
"cpus",
"ctrl",
"cw", "ccw",
"dev",
"djv",
"dpi",
"dvar",
"dx",
"eo",
"fh",
"fov",
"fft",
"futura",
"gfx",
"gl",
"glsl",
"gpl",
"gpu", "gpus",
"hc",
"hdc",
"hdr",
"hh", "mm", "ss", "ff", # hh:mm:ss:ff timecode
"hsv", "hsva",
"id",
"itu",
"lhs",
"lmb", "mmb", "rmb",
"mux",
"ndof",
"ppc",
"precisa",
"px",
"qmc",
"rgb", "rgba",
"rhs",
"rv",
"sdl",
"sl",
"smpte",
"svn",
"ui",
"unix",
"vbo", "vbos",
"ycc", "ycca",
"yuv", "yuva",
# Blender terms
"audaspace",
"bbone",
"breakdowner",
"bspline",
"bweight",
"colorband",
"datablock", "datablocks",
"despeckle",
"dopesheet",
"dupliface", "duplifaces",
"dupliframe", "dupliframes",
"dupliobject", "dupliob",
"dupligroup",
"duplivert",
"editbone",
"editmode",
"fcurve", "fcurves",
"fluidsim",
"frameserver",
"enum",
"keyframe", "keyframes", "keyframing", "keyframed",
"metaball", "metaballs",
"metaelement", "metaelements",
"metastrip", "metastrips",
"movieclip",
"mpoly",
"mtex",
"nabla",
"navmesh",
"outliner",
"paintmap", "paintmaps",
"polygroup", "polygroups",
"poselib",
"pushpull",
"pyconstraint", "pyconstraints",
"shapekey", "shapekeys",
"shrinkfatten",
"shrinkwrap",
"softbody",
"stucci",
"sunsky",
"subsurf",
"tessface", "tessfaces",
"texface",
"timeline", "timelines",
"tosphere",
"uilist",
"vcol", "vcols",
"vgroup", "vgroups",
"vinterlace",
"wetmap", "wetmaps",
"wpaint",
"uvwarp",
# Blender acronyms
"bge",
"bli",
"bvh",
"dbvt",
"dop", # BLI K-Dop BVH
"ik",
"nla",
"py",
"qbvh",
"rna",
"rvo",
"simd",
"sph",
"svbvh",
# Algorithm names
"beckmann",
"catmull",
"catrom",
"chebychev",
"courant",
"kutta",
"lennard",
"minkowski",
"minnaert",
"musgrave",
"nayar",
"netravali",
"oren",
"prewitt",
"runge",
"verlet",
"worley",
# CG acronyms
"ao",
"bsdf",
"ior",
"mocap",
# Acronyms
"aa", "msaa",
"api",
"asc", "cdl",
"ascii",
"atrac",
"bw",
"ccd",
"cmd",
"cpus",
"ctrl",
"cw", "ccw",
"dev",
"djv",
"dpi",
"dvar",
"dx",
"eo",
"fh",
"fov",
"fft",
"futura",
"gfx",
"gl",
"glsl",
"gpl",
"gpu", "gpus",
"hc",
"hdc",
"hdr",
"hh", "mm", "ss", "ff", # hh:mm:ss:ff timecode
"hsv", "hsva",
"id",
"itu",
"lhs",
"lmb", "mmb", "rmb",
"mux",
"ndof",
"ppc",
"precisa",
"px",
"qmc",
"rgb", "rgba",
"rhs",
"rv",
"sdl",
"sl",
"smpte",
"svn",
"ui",
"unix",
"vbo", "vbos",
"ycc", "ycca",
"yuv", "yuva",
# Files types/formats
"avi",
"attrac",
"autocad",
"autodesk",
"bmp",
"btx",
"cineon",
"dpx",
"dxf",
"eps",
"exr",
"fbx",
"ffmpeg",
"flac",
"gzip",
"ico",
"jpg", "jpeg",
"matroska",
"mdd",
"mkv",
"mpeg", "mjpeg",
"mtl",
"ogg",
"openjpeg",
"osl",
"oso",
"piz",
"png",
"po",
"quicktime",
"rle",
"sgi",
"stl",
"svg",
"targa", "tga",
"tiff",
"theora",
"vorbis",
"wav",
"xiph",
"xml",
"xna",
"xvid",
}
# Blender acronyms
"bge",
"bli",
"bvh",
"dbvt",
"dop", # BLI K-Dop BVH
"ik",
"nla",
"py",
"qbvh",
"rna",
"rvo",
"simd",
"sph",
"svbvh",
_valid_before = "(?<=[\\s*'\"`])|(?<=[a-zA-Z][/-])|(?<=^)"
_valid_after = "(?=[\\s'\"`.!?,;:])|(?=[/-]\\s*[a-zA-Z])|(?=$)"
_valid_words = "(?:{})(?:(?:[A-Z]+[a-z]*)|[A-Z]*|[a-z]*)(?:{})".format(_valid_before, _valid_after)
_split_words = re.compile(_valid_words).findall
# CG acronyms
"ao",
"bsdf",
"ior",
"mocap",
@classmethod
def split_words(cls, text):
return [w for w in cls._split_words(text) if w]
# Files types/formats
"avi",
"attrac",
"autocad",
"autodesk",
"bmp",
"btx",
"cineon",
"dpx",
"dxf",
"eps",
"exr",
"fbx",
"ffmpeg",
"flac",
"gzip",
"ico",
"jpg", "jpeg",
"matroska",
"mdd",
"mkv",
"mpeg", "mjpeg",
"mtl",
"ogg",
"openjpeg",
"osl",
"oso",
"piz",
"png",
"po",
"quicktime",
"rle",
"sgi",
"stl",
"svg",
"targa", "tga",
"tiff",
"theora",
"vorbis",
"wav",
"xiph",
"xml",
"xna",
"xvid",
}
def __init__(self, settings, lang="en_US"):
self.settings = settings
self.dict_spelling = enchant.Dict(lang)
self.cache = set(self.uimsgs)
cache = self.settings.SPELL_CACHE
if cache and os.path.exists(cache):
with open(cache, 'rb') as f:
self.cache |= set(pickle.load(f))
def __del__(self):
cache = self.settings.SPELL_CACHE
if cache and os.path.exists(cache):
with open(cache, 'wb') as f:
pickle.dump(self.cache, f)
def check(self, txt):
ret = []
if txt in self.cache:
return ret
for w in self.split_words(txt):
w_lower = w.lower()
if w_lower in self.cache:
continue
if not self.dict_spelling.check(w):
ret.append((w, self.dict_spelling.suggest(w)))
else:
self.cache.add(w_lower)
if not ret:
self.cache.add(txt)
return ret

View File

@@ -1,148 +0,0 @@
#!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Update "languages" text file used by Blender at runtime to build translations menu.
import os
import sys
import shutil
try:
import settings
import utils
except:
from . import (settings, utils)
TRUNK_PO_DIR = settings.TRUNK_PO_DIR
TRUNK_MO_DIR = settings.TRUNK_MO_DIR
LANGUAGES_CATEGORIES = settings.LANGUAGES_CATEGORIES
LANGUAGES = settings.LANGUAGES
LANGUAGES_FILE = settings.LANGUAGES_FILE
OK = 0
MISSING = 1
TOOLOW = 2
FORBIDDEN = 3
FLAG_MESSAGES = {
OK: "",
MISSING: "No translation yet!",
TOOLOW: "Not enough advanced to be included...",
FORBIDDEN: "Explicitly forbidden!",
}
def find_matching_po(languages, stats, forbidden):
"""Match languages defined in LANGUAGES setting to relevant po, if possible!"""
ret = []
for uid, label, org_key, in languages:
key = org_key
if key not in stats:
# Try to simplify the key (eg from es_ES to es).
if '_' in org_key:
key = org_key[0:org_key.index('_')]
# For stuff like sr_SR@latin -> sr@latin...
if '@' in org_key:
key = key + org_key[org_key.index('@'):]
if key in stats:
if key in forbidden:
ret.append((stats[key], uid, label, org_key, FORBIDDEN))
else:
ret.append((stats[key], uid, label, org_key, OK))
else:
ret.append((0.0, uid, label, org_key, MISSING))
return ret
def main():
import argparse
parser = argparse.ArgumentParser(description="Update 'languages' text file used by Blender at runtime to build "
"translations menu.")
parser.add_argument('-m', '--min_translation', type=int, default=-100,
help="Minimum level of translation, as a percentage (translations below this are commented out "
"in menu).")
parser.add_argument('langs', metavar='ISO_code', nargs='*',
help="Unconditionally exclude those languages from the menu.")
args = parser.parse_args()
ret = 0
min_trans = args.min_translation / 100.0
forbidden = set(args.langs)
# 'DEFAULT' and en_US are always valid, fully-translated "languages"!
stats = {"DEFAULT": 1.0, "en_US": 1.0}
# Get the "done level" of each po in trunk...
for po in os.listdir(TRUNK_PO_DIR):
if po.endswith(".po") and not po.endswith("_raw.po"):
lang = os.path.basename(po)[:-3]
msgs = utils.I18nMessages(kind='PO', src=os.path.join(TRUNK_PO_DIR, po))
stats[lang] = msgs.nbr_trans_msgs / msgs.nbr_msgs
# Generate languages file used by Blender's i18n system.
# First, match all entries in LANGUAGES to a lang in stats, if possible!
stats = find_matching_po(LANGUAGES, stats, forbidden)
limits = sorted(LANGUAGES_CATEGORIES, key=lambda it: it[0], reverse=True)
idx = 0
stats = sorted(stats, key=lambda it: it[0], reverse=True)
langs_cats = [[] for i in range(len(limits))]
highest_uid = 0
for prop, uid, label, key, flag in stats:
if prop < limits[idx][0]:
# Sub-sort languages by iso-codes.
langs_cats[idx].sort(key=lambda it: it[2])
idx += 1
if prop < min_trans and flag == OK:
flag = TOOLOW
langs_cats[idx].append((uid, label, key, flag))
if abs(uid) > highest_uid:
highest_uid = abs(uid)
# Sub-sort last group of languages by iso-codes!
langs_cats[idx].sort(key=lambda it: it[2])
with open(os.path.join(TRUNK_MO_DIR, LANGUAGES_FILE), 'w', encoding="utf-8") as f:
f.write("# File used by Blender to know which languages (translations) are available, \n")
f.write("# and to generate translation menu.\n")
f.write("#\n")
f.write("# File format:\n")
f.write("# ID:MENULABEL:ISOCODE\n")
f.write("# ID must be unique, except for 0 value (marks categories for menu).\n")
f.write("# Line starting with a # are comments!\n")
f.write("#\n")
f.write("# Automatically generated by bl_i18n_utils/update_languages_menu.py script.\n")
f.write("# Highest ID currently in use: {}\n".format(highest_uid))
for cat, langs_cat in zip(limits, langs_cats):
f.write("#\n")
# Write "category menu label"...
if langs_cat:
f.write("0:{}::\n".format(cat[1]))
else:
# Do not write the category if it has no language!
f.write("# Void category! #0:{}:\n".format(cat[1]))
# ...and all matching language entries!
for uid, label, key, flag in langs_cat:
if flag == OK:
f.write("{}:{}:{}\n".format(uid, label, key))
else:
# Non-existing, commented entry!
f.write("# {} #{}:{}:{}\n".format(FLAG_MESSAGES[flag], uid, label, key))
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
sys.exit(main())

View File

@@ -23,38 +23,20 @@
import collections
import concurrent.futures
import copy
import hashlib
import os
import re
import struct
import sys
import tempfile
from bl_i18n_utils import settings
from bl_i18n_utils import settings, rtl_utils
PO_COMMENT_PREFIX = settings.PO_COMMENT_PREFIX
PO_COMMENT_PREFIX_MSG = settings.PO_COMMENT_PREFIX_MSG
PO_COMMENT_PREFIX_SOURCE = settings.PO_COMMENT_PREFIX_SOURCE
PO_COMMENT_PREFIX_SOURCE_CUSTOM = settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM
PO_COMMENT_FUZZY = settings.PO_COMMENT_FUZZY
PO_MSGCTXT = settings.PO_MSGCTXT
PO_MSGID = settings.PO_MSGID
PO_MSGSTR = settings.PO_MSGSTR
PO_HEADER_KEY = settings.PO_HEADER_KEY
PO_HEADER_COMMENT = settings.PO_HEADER_COMMENT
PO_HEADER_COMMENT_COPYRIGHT = settings.PO_HEADER_COMMENT_COPYRIGHT
PO_HEADER_MSGSTR = settings.PO_HEADER_MSGSTR
PARSER_CACHE_HASH = settings.PARSER_CACHE_HASH
WARN_NC = settings.WARN_MSGID_NOT_CAPITALIZED
NC_ALLOWED = settings.WARN_MSGID_NOT_CAPITALIZED_ALLOWED
PARSER_CACHE_HASH = settings.PARSER_CACHE_HASH
import bpy
##### Misc Utils #####
def stripeol(s):
return s.rstrip("\n\r")
from bpy.app.translations import locale_explode
_valid_po_path_re = re.compile(r"^\S+:[0-9]+$")
@@ -91,14 +73,64 @@ def get_best_similar(data):
return key, tmp
def locale_match(loc1, loc2):
"""
Return:
-n if loc1 is a subtype of loc2 (e.g. 'fr_FR' is a subtype of 'fr').
+n if loc2 is a subtype of loc1.
n becomes smaller when both locales are more similar (e.g. (sr, sr_SR) are more similar than (sr, sr_SR@latin)).
0 if they are exactly the same.
... (Ellipsis) if they cannot match!
Note: We consider that 'sr_SR@latin' is a subtype of 'sr@latin', 'sr_SR' and 'sr', but 'sr_SR' and 'sr@latin' won't
match (will return ...)!
Note: About similarity, diff in variants are more important than diff in countries, currently here are the cases:
(sr, sr_SR) -> 1
(sr@latin, sr_SR@latin) -> 1
(sr, sr@latin) -> 2
(sr_SR, sr_SR@latin) -> 2
(sr, sr_SR@latin) -> 3
"""
if loc1 == loc2:
return 0
l1, c1, v1, *_1 = locale_explode(loc1)
l2, c2, v2, *_2 = locale_explode(loc2)
if l1 == l2:
if c1 == c2:
if v1 == v2:
return 0
elif v2 is None:
return -2
elif v1 is None:
return 2
return ...
elif c2 is None:
if v1 == v2:
return -1
elif v2 is None:
return -3
return ...
elif c1 is None:
if v1 == v2:
return 1
elif v1 is None:
return 3
return ...
return ...
##### Main Classes #####
class I18nMessage:
"""
Internal representation of a message.
"""
__slots__ = ("msgctxt_lines", "msgid_lines", "msgstr_lines", "comment_lines", "is_fuzzy", "is_commented")
__slots__ = ("msgctxt_lines", "msgid_lines", "msgstr_lines", "comment_lines", "is_fuzzy", "is_commented",
"settings")
def __init__(self, msgctxt_lines=[], msgid_lines=[], msgstr_lines=[], comment_lines=[],
is_commented=False, is_fuzzy=False):
is_commented=False, is_fuzzy=False, settings=settings):
self.settings = settings
self.msgctxt_lines = msgctxt_lines
self.msgid_lines = msgid_lines
self.msgstr_lines = msgstr_lines
@@ -107,42 +139,42 @@ class I18nMessage:
self.is_commented = is_commented
def _get_msgctxt(self):
return ("".join(self.msgctxt_lines)).replace("\\n", "\n")
return "".join(self.msgctxt_lines)
def _set_msgctxt(self, ctxt):
self.msgctxt_lines = [ctxt]
msgctxt = property(_get_msgctxt, _set_msgctxt)
def _get_msgid(self):
return ("".join(self.msgid_lines)).replace("\\n", "\n")
return "".join(self.msgid_lines)
def _set_msgid(self, msgid):
self.msgid_lines = [msgid]
msgid = property(_get_msgid, _set_msgid)
def _get_msgstr(self):
return ("".join(self.msgstr_lines)).replace("\\n", "\n")
return "".join(self.msgstr_lines)
def _set_msgstr(self, msgstr):
self.msgstr_lines = [msgstr]
msgstr = property(_get_msgstr, _set_msgstr)
def _get_sources(self):
lstrip1 = len(PO_COMMENT_PREFIX_SOURCE)
lstrip2 = len(PO_COMMENT_PREFIX_SOURCE_CUSTOM)
return ([l[lstrip1:] for l in self.comment_lines if l.startswith(PO_COMMENT_PREFIX_SOURCE)] +
[l[lstrip2:] for l in self.comment_lines if l.startswith(PO_COMMENT_PREFIX_SOURCE_CUSTOM)])
lstrip1 = len(self.settings.PO_COMMENT_PREFIX_SOURCE)
lstrip2 = len(self.settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM)
return ([l[lstrip1:] for l in self.comment_lines if l.startswith(self.settings.PO_COMMENT_PREFIX_SOURCE)] +
[l[lstrip2:] for l in self.comment_lines
if l.startswith(self.settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM)])
def _set_sources(self, sources):
# list.copy() is not available in py3.2 ...
cmmlines = []
cmmlines[:] = self.comment_lines
cmmlines = self.comment_lines.copy()
for l in cmmlines:
if l.startswith(PO_COMMENT_PREFIX_SOURCE) or l.startswith(PO_COMMENT_PREFIX_SOURCE_CUSTOM):
if (l.startswith(self.settings.PO_COMMENT_PREFIX_SOURCE) or
l.startswith(self.settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM)):
self.comment_lines.remove(l)
lines_src = []
lines_src_custom = []
for src in sources:
if is_valid_po_path(src):
lines_src.append(PO_COMMENT_PREFIX_SOURCE + src)
lines_src.append(self.settings.PO_COMMENT_PREFIX_SOURCE + src)
else:
lines_src_custom.append(PO_COMMENT_PREFIX_SOURCE_CUSTOM + src)
lines_src_custom.append(self.settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM + src)
self.comment_lines += lines_src_custom + lines_src
sources = property(_get_sources, _set_sources)
@@ -151,18 +183,29 @@ class I18nMessage:
return len(self.msgid) > 30
is_tooltip = property(_get_is_tooltip)
def copy(self):
# Deepcopy everything but the settings!
return self.__class__(msgctxt_lines=self.msgctxt_lines[:], msgid_lines=self.msgid_lines[:],
msgstr_lines=self.msgstr_lines[:], comment_lines=self.comment_lines[:],
is_commented=self.is_commented, is_fuzzy=self.is_fuzzy, settings=self.settings)
def normalize(self, max_len=80):
"""
Normalize this message, call this before exporting it...
Currently normalize msgctxt, msgid and msgstr lines to given max_len (if below 1, make them single line).
"""
max_len -= 2 # The two quotes!
def _splitlines(text):
lns = text.splitlines()
return [l + "\n" for l in lns[:-1]] + lns[-1:]
# We do not need the full power of textwrap... We just split first at escaped new lines, then into each line
# if needed... No word splitting, nor fancy spaces handling!
def _wrap(text, max_len, init_len):
if len(text) + init_len < max_len:
return [text]
lines = text.splitlines()
lines = _splitlines(text)
ret = []
for l in lines:
tmp = []
@@ -178,25 +221,64 @@ class I18nMessage:
if tmp:
ret.append(" ".join(tmp))
return ret
if max_len < 1:
self.msgctxt_lines = self.msgctxt.replace("\n", "\\n\n").splitlines()
self.msgid_lines = self.msgid.replace("\n", "\\n\n").splitlines()
self.msgstr_lines = self.msgstr.replace("\n", "\\n\n").splitlines()
self.msgctxt_lines = _splitlines(self.msgctxt)
self.msgid_lines = _splitlines(self.msgid)
self.msgstr_lines = _splitlines(self.msgstr)
else:
init_len = len(PO_MSGCTXT) + 1
init_len = len(self.settings.PO_MSGCTXT) + 1
if self.is_commented:
init_len += len(PO_COMMENT_PREFIX_MSG)
self.msgctxt_lines = _wrap(self.msgctxt.replace("\n", "\\n\n"), max_len, init_len)
init_len += len(self.settings.PO_COMMENT_PREFIX_MSG)
self.msgctxt_lines = _wrap(self.msgctxt, max_len, init_len)
init_len = len(PO_MSGID) + 1
init_len = len(self.settings.PO_MSGID) + 1
if self.is_commented:
init_len += len(PO_COMMENT_PREFIX_MSG)
self.msgid_lines = _wrap(self.msgid.replace("\n", "\\n\n"), max_len, init_len)
init_len += len(self.settings.PO_COMMENT_PREFIX_MSG)
self.msgid_lines = _wrap(self.msgid, max_len, init_len)
init_len = len(PO_MSGSTR) + 1
init_len = len(self.settings.PO_MSGSTR) + 1
if self.is_commented:
init_len += len(PO_COMMENT_PREFIX_MSG)
self.msgstr_lines = _wrap(self.msgstr.replace("\n", "\\n\n"), max_len, init_len)
init_len += len(self.settings.PO_COMMENT_PREFIX_MSG)
self.msgstr_lines = _wrap(self.msgstr, max_len, init_len)
# Be sure comment lines are not duplicated (can happen with sources...).
tmp = []
for l in self.comment_lines:
if l not in tmp:
tmp.append(l)
self.comment_lines = tmp
_esc_quotes = re.compile(r'(?!<\\)((?:\\\\)*)"')
_unesc_quotes = re.compile(r'(?!<\\)((?:\\\\)*)\\"')
_esc_names = ("msgctxt_lines", "msgid_lines", "msgstr_lines")
_esc_names_all = _esc_names + ("comment_lines",)
@classmethod
def do_escape(cls, txt):
"""Replace some chars by their escaped versions!"""
txt = txt.replace("\n", "\\n").replace("\t", "\\t")
txt = cls._esc_quotes.sub(r'\1\"', txt)
return txt
@classmethod
def do_unescape(cls, txt):
"""Replace escaped chars by real ones!"""
txt = txt.replace("\\n", "\n").replace("\\t", "\t")
txt = cls._unesc_quotes.sub(r'\1"', txt)
return txt
def escape(self, do_all=False):
names = self._esc_names_all if do_all else self._esc_names
for name in names:
setattr(self, name, [self.do_escape(l) for l in getattr(self, name)])
def unescape(self, do_all=True):
names = self._esc_names_all if do_all else self._esc_names
for name in names:
setattr(self, name, [self.do_unescape(l) for l in getattr(self, name)])
if None in getattr(self, name):
print(getattr(self, name))
class I18nMessages:
@@ -207,10 +289,11 @@ class I18nMessages:
# Avoid parsing again!
# Keys should be (pseudo) file-names, values are tuples (hash, I18nMessages)
# Note: only used by po parser currently!
_parser_cache = {}
#_parser_cache = {}
def __init__(self, iso="__POT__", kind=None, key=None, src=None):
self.iso = iso
def __init__(self, uid=None, kind=None, key=None, src=None, settings=settings):
self.settings = settings
self.uid = uid if uid is not None else settings.PARSER_TEMPLATE_ID
self.msgs = self._new_messages()
self.trans_msgs = set()
self.fuzzy_msgs = set()
@@ -229,22 +312,26 @@ class I18nMessages:
self.parse(kind, key, src)
self.update_info()
self._reverse_cache = None
@staticmethod
def _new_messages():
return getattr(collections, 'OrderedDict', dict)()
@classmethod
def gen_empty_messages(cls, iso, blender_ver, blender_rev, time, year, default_copyright=True):
def gen_empty_messages(cls, uid, blender_ver, blender_rev, time, year, default_copyright=True, settings=settings):
"""Generate an empty I18nMessages object (only header is present!)."""
msgstr = PO_HEADER_MSGSTR.format(blender_ver=str(blender_ver), blender_rev=int(blender_rev),
time=str(time), iso=str(iso))
fmt = settings.PO_HEADER_MSGSTR
msgstr = fmt.format(blender_ver=str(blender_ver), blender_rev=int(blender_rev), time=str(time), uid=str(uid))
comment = ""
if default_copyright:
comment = PO_HEADER_COMMENT_COPYRIGHT.format(year=str(year))
comment = comment + PO_HEADER_COMMENT
comment = settings.PO_HEADER_COMMENT_COPYRIGHT.format(year=str(year))
comment = comment + settings.PO_HEADER_COMMENT
msgs = cls(iso=iso)
msgs.msgs[PO_HEADER_KEY] = I18nMessage([], [""], [msgstr], [comment], False, True)
msgs = cls(uid=uid, settings=settings)
key = settings.PO_HEADER_KEY
msgs.msgs[key] = I18nMessage([key[0]], [key[1]], msgstr.split("\n"), comment.split("\n"),
False, False, settings=settings)
msgs.update_info()
return msgs
@@ -253,16 +340,79 @@ class I18nMessages:
for msg in self.msgs.values():
msg.normalize(max_len)
def escape(self, do_all=False):
for msg in self.msgs.values():
msg.escape(do_all)
def unescape(self, do_all=True):
for msg in self.msgs.values():
msg.unescape(do_all)
def check(self, fix=False):
"""
Check consistency between messages and their keys!
Check messages using format stuff are consistant between msgid and msgstr!
If fix is True, tries to fix the issues.
Return a list of found errors (empty if everything went OK!).
"""
ret = []
default_context = self.settings.DEFAULT_CONTEXT
_format = re.compile("%[.0-9]*[tslfd]").findall
done_keys = set()
tmp = {}
rem = set()
for key, msg in self.msgs.items():
msgctxt, msgid, msgstr = msg.msgctxt, msg.msgid, msg.msgstr
real_key = (msgctxt or default_context, msgid)
if key != real_key:
ret.append("Error! msg's context/message do not match its key ({} / {})".format(real_key, key))
if real_key in self.msgs:
ret.append("Error! msg's real_key already used!")
if fix:
rem.add(real_key)
elif fix:
tmp[real_key] = msg
done_keys.add(key)
if '%' in msgid and msgstr and len(_format(msgid)) != len(_format(msgstr)):
ret.append("Error! msg's format entities are not matched in msgid and msgstr ({})".format(real_key))
if fix:
msg.msgstr = ""
for k in rem:
del self.msgs[k]
self.msgs.update(tmp)
return ret
def clean_commented(self):
self.update_info()
nbr = len(self.comm_msgs)
for k in self.comm_msgs:
del self.msgs[k]
return nbr
def rtl_process(self):
keys = []
trans = []
for k, m in self.msgs.items():
keys.append(k)
trans.append(m.msgstr)
trans = rtl_utils.log2vis(trans, self.settings)
for k, t in zip(keys, trans):
self.msgs[k].msgstr = t
def merge(self, replace=False, *args):
# TODO
pass
def update(self, ref, use_similar=0.75, keep_old_commented=True):
def update(self, ref, use_similar=None, keep_old_commented=True):
"""
Update this I18nMessage with the ref one. Translations from ref are never used. Source comments from ref
completely replace current ones. If use_similar is not 0.0, it will try to match new messages in ref with an
existing one. Messages no more found in ref will be marked as commented if keep_old_commented is True,
or removed.
"""
if use_similar is None:
use_similar = self.settings.SIMILAR_MSGID_THRESHOLD
similar_pool = {}
if use_similar > 0.0:
for key, msg in self.msgs.items():
@@ -288,13 +438,15 @@ class I18nMessages:
with concurrent.futures.ProcessPoolExecutor() as exctr:
for key, msgid in exctr.map(get_best_similar,
tuple((nk, use_similar, tuple(similar_pool.keys())) for nk in new_keys)):
#for key, msgid in map(get_best_similar,
#tuple((nk, use_similar, tuple(similar_pool.keys())) for nk in new_keys)):
if msgid:
# Try to get the same context, else just get one...
skey = (key[0], msgid)
if skey not in similar_pool[msgid]:
skey = tuple(similar_pool[msgid])[0]
# We keep org translation and comments, and mark message as fuzzy.
msg, refmsg = copy.deepcopy(self.msgs[skey]), ref.msgs[key]
msg, refmsg = self.msgs[skey].copy(), ref.msgs[key]
msg.msgctxt = refmsg.msgctxt
msg.msgid = refmsg.msgid
msg.sources = refmsg.sources
@@ -316,7 +468,7 @@ class I18nMessages:
msgs[key].sources = []
# Special 'meta' message, change project ID version and pot creation date...
key = ("", "")
key = self.settings.PO_HEADER_KEY
rep = []
markers = ("Project-Id-Version:", "POT-Creation-Date:")
for mrk in markers:
@@ -340,7 +492,7 @@ class I18nMessages:
self.nbr_signs = 0
self.nbr_trans_signs = 0
for key, msg in self.msgs.items():
if key == PO_HEADER_KEY:
if key == self.settings.PO_HEADER_KEY:
continue
if msg.is_commented:
self.comm_msgs.add(key)
@@ -360,7 +512,7 @@ class I18nMessages:
self.nbr_trans_ttips = len(self.ttip_msgs & self.trans_msgs)
self.nbr_comm_msgs = len(self.comm_msgs)
def print_stats(self, prefix=""):
def print_stats(self, prefix="", output=print):
"""
Print out some stats about an I18nMessages object.
"""
@@ -390,7 +542,149 @@ class I18nMessages:
"{:>6.1%} of messages are commented ({} over {}).\n"
"".format(lvl_comm, self.nbr_comm_msgs, self.nbr_comm_msgs + self.nbr_msgs),
"This translation is currently made of {} signs.\n".format(self.nbr_trans_signs))
print(prefix.join(lines))
output(prefix.join(lines))
def invalidate_reverse_cache(self, rebuild_now=False):
"""
Invalidate the reverse cache used by find_best_messages_matches.
"""
self._reverse_cache = None
if rebuild_now:
src_to_msg, ctxt_to_msg, msgid_to_msg, msgstr_to_msg = {}, {}, {}, {}
for key, msg in self.msgs.items():
if msg.is_commented:
continue
ctxt, msgid = key
ctxt_to_msg.setdefault(ctxt, set()).add(key)
msgid_to_msg.setdefault(msgid, set()).add(key)
msgstr_to_msg.setdefault(msg.msgstr, set()).add(key)
for src in msg.sources:
src_to_msg.setdefault(src, set()).add(key)
self._reverse_cache = (src_to_msg, ctxt_to_msg, msgid_to_msg, msgstr_to_msg)
def find_best_messages_matches(self, msgs, msgmap, rna_ctxt, rna_struct_name, rna_prop_name, rna_enum_name):
"""
Try to find the best I18nMessages (i.e. context/msgid pairs) for the given UI messages:
msgs: an object containing properties listed in msgmap's values.
msgmap: a dict of various messages to use for search:
{"but_label": subdict, "rna_label": subdict, "enum_label": subdict,
"but_tip": subdict, "rna_tip": subdict, "enum_tip": subdict}
each subdict being like that:
{"msgstr": id, "msgid": id, "msg_flags": id, "key": set()}
where msgstr and msgid are identifiers of string props in msgs (resp. translated and org message),
msg_flags is not used here, and key is a set of matching (msgctxt, msgid) keys for the item.
The other parameters are about the RNA element from which the strings come from, if it could be determined:
rna_ctxt: the labels' i18n context.
rna_struct_name, rna_prop_name, rna_enum_name: should be self-explanatory!
"""
# Build helper mappings.
# Note it's user responsibility to know when to invalidate (and hence force rebuild) this cache!
if self._reverse_cache is None:
self.invalidate_reverse_cache(True)
src_to_msg, ctxt_to_msg, msgid_to_msg, msgstr_to_msg = self._reverse_cache
# print(len(src_to_msg), len(ctxt_to_msg), len(msgid_to_msg), len(msgstr_to_msg))
# Build RNA key.
src, src_rna, src_enum = bpy.utils.make_rna_paths(rna_struct_name, rna_prop_name, rna_enum_name)
print("src: ", src_rna, src_enum)
# Labels.
elbl = getattr(msgs, msgmap["enum_label"]["msgstr"])
if elbl:
# Enum items' labels have no i18n context...
k = ctxt_to_msg[self.settings.DEFAULT_CONTEXT].copy()
if elbl in msgid_to_msg:
k &= msgid_to_msg[elbl]
elif elbl in msgstr_to_msg:
k &= msgstr_to_msg[elbl]
else:
k = set()
# We assume if we already have only one key, it's the good one!
if len(k) > 1 and src_enum in src_to_msg:
k &= src_to_msg[src_enum]
msgmap["enum_label"]["key"] = k
rlbl = getattr(msgs, msgmap["rna_label"]["msgstr"])
#print("rna label: " + rlbl, rlbl in msgid_to_msg, rlbl in msgstr_to_msg)
if rlbl:
k = ctxt_to_msg[rna_ctxt].copy()
if k and rlbl in msgid_to_msg:
k &= msgid_to_msg[rlbl]
elif k and rlbl in msgstr_to_msg:
k &= msgstr_to_msg[rlbl]
else:
k = set()
# We assume if we already have only one key, it's the good one!
if len(k) > 1 and src_rna in src_to_msg:
k &= src_to_msg[src_rna]
msgmap["rna_label"]["key"] = k
blbl = getattr(msgs, msgmap["but_label"]["msgstr"])
blbls = [blbl]
if blbl.endswith(self.settings.NUM_BUTTON_SUFFIX):
# Num buttons report their label with a trailing ': '...
blbls.append(blbl[:-len(self.settings.NUM_BUTTON_SUFFIX)])
print("button label: " + blbl)
if blbl and elbl not in blbls and (rlbl not in blbls or rna_ctxt != self.settings.DEFAULT_CONTEXT):
# Always Default context for button label :/
k = ctxt_to_msg[self.settings.DEFAULT_CONTEXT].copy()
found = False
for bl in blbls:
if bl in msgid_to_msg:
k &= msgid_to_msg[bl]
found = True
break
elif bl in msgstr_to_msg:
k &= msgstr_to_msg[bl]
found = True
break
if not found:
k = set()
# XXX No need to check against RNA path here, if blabel is different
# from rlabel, should not match anyway!
msgmap["but_label"]["key"] = k
# Tips (they never have a specific context).
etip = getattr(msgs, msgmap["enum_tip"]["msgstr"])
#print("enum tip: " + etip)
if etip:
k = ctxt_to_msg[self.settings.DEFAULT_CONTEXT].copy()
if etip in msgid_to_msg:
k &= msgid_to_msg[etip]
elif etip in msgstr_to_msg:
k &= msgstr_to_msg[etip]
else:
k = set()
# We assume if we already have only one key, it's the good one!
if len(k) > 1 and src_enum in src_to_msg:
k &= src_to_msg[src_enum]
msgmap["enum_tip"]["key"] = k
rtip = getattr(msgs, msgmap["rna_tip"]["msgstr"])
#print("rna tip: " + rtip)
if rtip:
k = ctxt_to_msg[self.settings.DEFAULT_CONTEXT].copy()
if k and rtip in msgid_to_msg:
k &= msgid_to_msg[rtip]
elif k and rtip in msgstr_to_msg:
k &= msgstr_to_msg[rtip]
else:
k = set()
# We assume if we already have only one key, it's the good one!
if len(k) > 1 and src_rna in src_to_msg:
k &= src_to_msg[src_rna]
msgmap["rna_tip"]["key"] = k
#print(k)
btip = getattr(msgs, msgmap["but_tip"]["msgstr"])
#print("button tip: " + btip)
if btip and btip not in {rtip, etip}:
k = ctxt_to_msg[self.settings.DEFAULT_CONTEXT].copy()
if btip in msgid_to_msg:
k &= msgid_to_msg[btip]
elif btip in msgstr_to_msg:
k &= msgstr_to_msg[btip]
else:
k = set()
# XXX No need to check against RNA path here, if btip is different from rtip, should not match anyway!
msgmap["but_tip"]["key"] = k
def parse(self, kind, key, src):
del self.parsing_errors[:]
@@ -419,14 +713,16 @@ class I18nMessages:
msgstr_lines = []
comment_lines = []
default_context = self.settings.DEFAULT_CONTEXT
# Helper function
def finalize_message(self, line_nr):
nonlocal reading_msgid, reading_msgstr, reading_msgctxt, reading_comment
nonlocal is_commented, is_fuzzy, msgid_lines, msgstr_lines, msgctxt_lines, comment_lines
msgid = "".join(msgid_lines)
msgctxt = "".join(msgctxt_lines)
msgkey = (msgctxt, msgid)
msgid = I18nMessage.do_unescape("".join(msgid_lines))
msgctxt = I18nMessage.do_unescape("".join(msgctxt_lines))
msgkey = (msgctxt or default_context, msgid)
# Never allow overriding existing msgid/msgctxt pairs!
if msgkey in self.msgs:
@@ -434,7 +730,7 @@ class I18nMessages:
return
self.msgs[msgkey] = I18nMessage(msgctxt_lines, msgid_lines, msgstr_lines, comment_lines,
is_commented, is_fuzzy)
is_commented, is_fuzzy, settings=self.settings)
# Let's clean up and get ready for next message!
reading_msgid = reading_msgstr = reading_msgctxt = reading_comment = False
@@ -445,32 +741,31 @@ class I18nMessages:
comment_lines = []
# try to use src as file name...
if os.path.exists(src):
if os.path.isfile(src):
if os.stat(src).st_size > self.settings.PARSER_MAX_FILE_SIZE:
# Security, else we could read arbitrary huge files!
print("WARNING: skipping file {}, too huge!".format(src))
return
if not key:
key = src
with open(src, 'r', encoding="utf-8") as f:
src = f.read()
# Try to use values from cache!
curr_hash = None
if key and key in self._parser_cache:
old_hash, msgs = self._parser_cache[key]
import hashlib
curr_hash = hashlib.new(PARSER_CACHE_HASH, src.encode()).digest()
if curr_hash == old_hash:
self.msgs = copy.deepcopy(msgs) # we might edit self.msgs!
return
_comm_msgctxt = PO_COMMENT_PREFIX_MSG + PO_MSGCTXT
_len_msgctxt = len(PO_MSGCTXT + '"')
_msgctxt = self.settings.PO_MSGCTXT
_comm_msgctxt = self.settings.PO_COMMENT_PREFIX_MSG + _msgctxt
_len_msgctxt = len(_msgctxt + '"')
_len_comm_msgctxt = len(_comm_msgctxt + '"')
_comm_msgid = PO_COMMENT_PREFIX_MSG + PO_MSGID
_len_msgid = len(PO_MSGID + '"')
_msgid = self.settings.PO_MSGID
_comm_msgid = self.settings.PO_COMMENT_PREFIX_MSG + _msgid
_len_msgid = len(_msgid + '"')
_len_comm_msgid = len(_comm_msgid + '"')
_comm_msgstr = PO_COMMENT_PREFIX_MSG + PO_MSGSTR
_len_msgstr = len(PO_MSGSTR + '"')
_msgstr = self.settings.PO_MSGSTR
_comm_msgstr = self.settings.PO_COMMENT_PREFIX_MSG + _msgstr
_len_msgstr = len(_msgstr + '"')
_len_comm_msgstr = len(_comm_msgstr + '"')
_len_comm_str = len(PO_COMMENT_PREFIX_MSG + '"')
_comm_str = self.settings.PO_COMMENT_PREFIX_MSG
_comm_fuzzy = self.settings.PO_COMMENT_FUZZY
_len_comm_str = len(_comm_str + '"')
# Main loop over all lines in src...
for line_nr, line in enumerate(src.splitlines()):
@@ -479,20 +774,20 @@ class I18nMessages:
finalize_message(self, line_nr)
continue
elif line.startswith(PO_MSGCTXT) or line.startswith(_comm_msgctxt):
elif line.startswith(_msgctxt) or line.startswith(_comm_msgctxt):
reading_comment = False
reading_ctxt = True
if line.startswith(PO_COMMENT_PREFIX_MSG):
if line.startswith(_comm_str):
is_commented = True
line = line[_len_comm_msgctxt:-1]
else:
line = line[_len_msgctxt:-1]
msgctxt_lines.append(line)
elif line.startswith(PO_MSGID) or line.startswith(_comm_msgid):
elif line.startswith(_msgid) or line.startswith(_comm_msgid):
reading_comment = False
reading_msgid = True
if line.startswith(PO_COMMENT_PREFIX_MSG):
if line.startswith(_comm_str):
if not is_commented and reading_ctxt:
self.parsing_errors.append((line_nr, "commented msgid following regular msgctxt"))
is_commented = True
@@ -502,13 +797,13 @@ class I18nMessages:
reading_ctxt = False
msgid_lines.append(line)
elif line.startswith(PO_MSGSTR) or line.startswith(_comm_msgstr):
elif line.startswith(_msgstr) or line.startswith(_comm_msgstr):
if not reading_msgid:
self.parsing_errors.append((line_nr, "msgstr without a prior msgid"))
else:
reading_msgid = False
reading_msgstr = True
if line.startswith(PO_COMMENT_PREFIX_MSG):
if line.startswith(_comm_str):
line = line[_len_comm_msgstr:-1]
if not is_commented:
self.parsing_errors.append((line_nr, "commented msgstr following regular msgid"))
@@ -518,8 +813,8 @@ class I18nMessages:
self.parsing_errors.append((line_nr, "regular msgstr following commented msgid"))
msgstr_lines.append(line)
elif line.startswith(PO_COMMENT_PREFIX[0]):
if line.startswith(PO_COMMENT_PREFIX_MSG):
elif line.startswith(_comm_str[0]):
if line.startswith(_comm_str):
if reading_msgctxt:
if is_commented:
msgctxt_lines.append(line[_len_comm_str:-1])
@@ -542,7 +837,7 @@ class I18nMessages:
if reading_msgctxt or reading_msgid or reading_msgstr:
self.parsing_errors.append((line_nr,
"commented string within msgctxt, msgid or msgstr scope, ignored"))
elif line.startswith(PO_COMMENT_FUZZY):
elif line.startswith(_comm_fuzzy):
is_fuzzy = True
else:
comment_lines.append(line)
@@ -563,12 +858,7 @@ class I18nMessages:
# If no final empty line, last message is not finalized!
if reading_msgstr:
finalize_message(self, line_nr)
if key:
if not curr_hash:
import hashlib
curr_hash = hashlib.new(PARSER_CACHE_HASH, src.encode()).digest()
self._parser_cache[key] = (curr_hash, self.msgs)
self.unescape()
def write(self, kind, dest):
self.writers[kind](self, dest)
@@ -577,54 +867,139 @@ class I18nMessages:
"""
Write messages in fname po file.
"""
self.normalize(max_len=0) # No wrapping for now...
with open(fname, 'w', encoding="utf-8") as f:
for msg in self.msgs.values():
default_context = self.settings.DEFAULT_CONTEXT
def _write(self, f):
_msgctxt = self.settings.PO_MSGCTXT
_msgid = self.settings.PO_MSGID
_msgstr = self.settings.PO_MSGSTR
_comm = self.settings.PO_COMMENT_PREFIX_MSG
self.escape()
for num, msg in enumerate(self.msgs.values()):
f.write("\n".join(msg.comment_lines))
# Only mark as fuzzy if msgstr is not empty!
if msg.is_fuzzy and msg.msgstr:
f.write("\n" + PO_COMMENT_FUZZY)
_p = PO_COMMENT_PREFIX_MSG if msg.is_commented else ""
_pmsgctxt = _p + PO_MSGCTXT
_pmsgid = _p + PO_MSGID
_pmsgstr = _p + PO_MSGSTR
f.write("\n" + self.settings.PO_COMMENT_FUZZY)
_p = _comm if msg.is_commented else ""
chunks = []
if msg.msgctxt:
if msg.msgctxt and msg.msgctxt != default_context:
if len(msg.msgctxt_lines) > 1:
chunks += [
"\n" + _pmsgctxt + "\"\"\n" + _p + "\"",
"\n" + _p + _msgctxt + "\"\"\n" + _p + "\"",
("\"\n" + _p + "\"").join(msg.msgctxt_lines),
"\"",
]
else:
chunks += ["\n" + _pmsgctxt + "\"" + msg.msgctxt + "\""]
chunks += ["\n" + _p + _msgctxt + "\"" + msg.msgctxt + "\""]
if len(msg.msgid_lines) > 1:
chunks += [
"\n" + _pmsgid + "\"\"\n" + _p + "\"",
"\n" + _p + _msgid + "\"\"\n" + _p + "\"",
("\"\n" + _p + "\"").join(msg.msgid_lines),
"\"",
]
else:
chunks += ["\n" + _pmsgid + "\"" + msg.msgid + "\""]
chunks += ["\n" + _p + _msgid + "\"" + msg.msgid + "\""]
if len(msg.msgstr_lines) > 1:
chunks += [
"\n" + _pmsgstr + "\"\"\n" + _p + "\"",
"\n" + _p + _msgstr + "\"\"\n" + _p + "\"",
("\"\n" + _p + "\"").join(msg.msgstr_lines),
"\"",
]
else:
chunks += ["\n" + _pmsgstr + "\"" + msg.msgstr + "\""]
chunks += ["\n" + _p + _msgstr + "\"" + msg.msgstr + "\""]
chunks += ["\n\n"]
f.write("".join(chunks))
self.unescape()
self.normalize(max_len=0) # No wrapping for now...
if isinstance(fname, str):
with open(fname, 'w', encoding="utf-8") as f:
_write(self, f)
# Else assume fname is already a file(like) object!
else:
_write(self, fname)
def write_messages_to_mo(self, fname):
"""
Write messages in fname mo file.
"""
# XXX Temp solution, until I can make own mo generator working...
import subprocess
with tempfile.NamedTemporaryFile(mode='w+', encoding="utf-8") as tmp_po_f:
self.write_messages_to_po(tmp_po_f)
cmd = (self.settings.GETTEXT_MSGFMT_EXECUTABLE,
"--statistics", # show stats
tmp_po_f.name,
"-o",
fname,
)
print("Running ", " ".join(cmd))
ret = subprocess.call(cmd)
print("Finished.")
return
# XXX Code below is currently broken (generates corrupted mo files it seems :( )!
# Using http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html notation.
# Not generating hash table!
# Only translated, unfuzzy messages are taken into account!
default_context = self.settings.DEFAULT_CONTEXT
msgs = tuple(v for v in self.msgs.values() if not (v.is_fuzzy or v.is_commented) and v.msgstr and v.msgid)
msgs = sorted(msgs[:2],
key=lambda e: (e.msgctxt + e.msgid) if (e.msgctxt and e.msgctxt != default_context) else e.msgid)
magic_nbr = 0x950412de
format_rev = 0
N = len(msgs)
O = 32
T = O + N * 8
S = 0
H = T + N * 8
# Prepare our data! we need key (optional context and msgid), translation, and offset and length of both.
# Offset are relative to start of their own list.
EOT = b"0x04" # Used to concatenate context and msgid
_msgid_offset = 0
_msgstr_offset = 0
def _gen(v):
nonlocal _msgid_offset, _msgstr_offset
msgid = v.msgid.encode("utf-8")
msgstr = v.msgstr.encode("utf-8")
if v.msgctxt and v.msgctxt != default_context:
msgctxt = v.msgctxt.encode("utf-8")
msgid = msgctxt + EOT + msgid
# Don't forget the final NULL char!
_msgid_len = len(msgid) + 1
_msgstr_len = len(msgstr) + 1
ret = ((msgid, _msgid_len, _msgid_offset), (msgstr, _msgstr_len, _msgstr_offset))
_msgid_offset += _msgid_len
_msgstr_offset += _msgstr_len
return ret
msgs = tuple(_gen(v) for v in msgs)
msgid_start = H
msgstr_start = msgid_start + _msgid_offset
print(N, msgstr_start + _msgstr_offset)
print(msgs)
with open(fname, 'wb') as f:
# Header...
f.write(struct.pack("=8I", magic_nbr, format_rev, N, O, T, S, H, 0))
# Msgid's length and offset.
f.write(b"".join(struct.pack("=2I", length, msgid_start + offset) for (_1, length, offset), _2 in msgs))
# Msgstr's length and offset.
f.write(b"".join(struct.pack("=2I", length, msgstr_start + offset) for _1, (_2, length, offset) in msgs))
# No hash table!
# Msgid's.
f.write(b"\0".join(msgid for (msgid, _1, _2), _3 in msgs) + b"\0")
# Msgstr's.
f.write(b"\0".join(msgstr for _1, (msgstr, _2, _3) in msgs) + b"\0")
parsers = {
"PO": parse_messages_from_po,
# "PYTUPLE": parse_messages_from_pytuple,
}
writers = {
"PO": write_messages_to_po,
#"PYDICT": write_messages_to_pydict,
"MO": write_messages_to_mo,
}
@@ -633,10 +1008,67 @@ class I18n:
Internal representation of a whole translation set.
"""
def __init__(self, src):
@staticmethod
def _parser_check_file(path, maxsize=settings.PARSER_MAX_FILE_SIZE, _begin_marker=None, _end_marker=None):
if os.stat(path).st_size > maxsize:
# Security, else we could read arbitrary huge files!
print("WARNING: skipping file {}, too huge!".format(path))
return None, None, None
txt = ""
with open(path) as f:
txt = f.read()
_in = 0
_out = len(txt)
if _begin_marker:
_in = None
if _begin_marker in txt:
_in = txt.index(_begin_marker) + len(_begin_marker)
if _end_marker:
_out = None
if _end_marker in txt:
_out = txt.index(_end_marker)
if _in is not None and _out is not None:
return txt[:_in], txt[_in:_out], txt[_out:]
return txt, None, None
@staticmethod
def _dst(self, path, uid, kind):
if kind == 'PO':
if uid == self.settings.PARSER_TEMPLATE_ID:
if not path.endswith(".pot"):
return os.path.join(os.path.dirname(path), "blender.pot")
if not path.endswith(".po"):
return os.path.join(os.path.dirname(path), uid + ".po")
elif kind == 'PY':
if not path.endswith(".py"):
if self.src.get(self.settings.PARSER_PY_ID):
return self.src[self.settings.PARSER_PY_ID]
return os.path.join(os.path.dirname(path), "translations.py")
return path
def __init__(self, kind=None, src=None, langs=set(), settings=settings):
self.settings = settings
self.trans = {}
self.src = {} # Should have the same keys as self.trans (plus PARSER_PY_ID for py file)!
self.dst = self._dst # A callable that transforms src_path into dst_path!
if kind and src:
self.parse(kind, src, langs)
self.update_info()
def _py_file_get(self):
return self.src.get(self.settings.PARSER_PY_ID)
def _py_file_set(self, value):
self.src[self.settings.PARSER_PY_ID] = value
py_file = property(_py_file_get, _py_file_set)
def escape(self, do_all=False):
for trans in self.trans.values():
trans.escape(do_all)
def unescape(self, do_all=True):
for trans in self.trans.values():
trans.unescape(do_all)
def update_info(self):
self.nbr_trans = 0
self.lvl = 0.0
@@ -648,12 +1080,12 @@ class I18n:
self.nbr_trans_signs = 0
self.contexts = set()
if TEMPLATE_ISO_ID in self.trans:
if self.settings.PARSER_TEMPLATE_ID in self.trans:
self.nbr_trans = len(self.trans) - 1
self.nbr_signs = self.trans[TEMPLATE_ISO_ID].nbr_signs
self.nbr_signs = self.trans[self.settings.PARSER_TEMPLATE_ID].nbr_signs
else:
self.nbr_trans = len(self.trans)
for iso, msgs in self.trans.items():
for msgs in self.trans.values():
msgs.update_info()
if msgs.nbr_msgs > 0:
self.lvl += float(msgs.nbr_trans_msgs) / float(msgs.nbr_msgs)
@@ -675,69 +1107,299 @@ class I18n:
"""
if print_msgs:
msgs_prefix = prefix + " "
for key, msgs in self.trans:
if key == TEMPLATE_ISO_ID:
for key, msgs in self.trans.items():
if key == self.settings.PARSER_TEMPLATE_ID:
continue
print(prefix + key + ":")
msgs.print_stats(prefix=msgs_prefix)
print(prefix)
nbr_contexts = len(self.contexts - {CONTEXT_DEFAULT})
nbr_contexts = len(self.contexts - {bpy.app.translations.contexts.default})
if nbr_contexts != 1:
if nbr_contexts == 0:
nbr_contexts = "No"
_ctx_txt = "s are"
else:
_ctx_txt = " is"
lines = ("",
"Average stats for all {} translations:\n".format(self.nbr_trans),
" {:>6.1%} done!\n".format(self.lvl / self.nbr_trans),
" {:>6.1%} of messages are tooltips.\n".format(self.lvl_ttips / self.nbr_trans),
" {:>6.1%} of tooltips are translated.\n".format(self.lvl_trans_ttips / self.nbr_trans),
" {:>6.1%} of translated messages are tooltips.\n".format(self.lvl_ttips_in_trans / self.nbr_trans),
" {:>6.1%} of messages are commented.\n".format(self.lvl_comm / self.nbr_trans),
" The org msgids are currently made of {} signs.\n".format(self.nbr_signs),
" All processed translations are currently made of {} signs.\n".format(self.nbr_trans_signs),
" {} specific context{} present:\n {}\n"
"".format(self.nbr_contexts, _ctx_txt, "\n ".join(self.contexts - {CONTEXT_DEFAULT})),
"\n")
lines = (("",
"Average stats for all {} translations:\n".format(self.nbr_trans),
" {:>6.1%} done!\n".format(self.lvl / self.nbr_trans),
" {:>6.1%} of messages are tooltips.\n".format(self.lvl_ttips / self.nbr_trans),
" {:>6.1%} of tooltips are translated.\n".format(self.lvl_trans_ttips / self.nbr_trans),
" {:>6.1%} of translated messages are tooltips.\n".format(self.lvl_ttips_in_trans / self.nbr_trans),
" {:>6.1%} of messages are commented.\n".format(self.lvl_comm / self.nbr_trans),
" The org msgids are currently made of {} signs.\n".format(self.nbr_signs),
" All processed translations are currently made of {} signs.\n".format(self.nbr_trans_signs),
" {} specific context{} present:\n".format(self.nbr_contexts, _ctx_txt)) +
tuple(" " + c + "\n" for c in self.contexts - {bpy.app.translations.contexts.default}) +
("\n",)
)
print(prefix.join(lines))
def parse(self, kind, src, langs=set()):
self.parsers[kind](self, src, langs)
##### Parsers #####
def parse_from_po(self, src, langs=set()):
"""
src must be a tuple (dir_of_pos, pot_file), where:
* dir_of_pos may either contains iso_CODE.po files, and/or iso_CODE/iso_CODE.po files.
* pot_file may be None (in which case there will be no ref messages).
if langs set is void, all languages found are loaded.
"""
root_dir, pot_file = src
if pot_file and os.path.isfile(pot_file):
self.trans[self.settings.PARSER_TEMPLATE_ID] = I18nMessages(self.settings.PARSER_TEMPLATE_ID, 'PO',
pot_file, pot_file, settings=self.settings)
self.src_po[self.settings.PARSER_TEMPLATE_ID] = pot_file
#def parse_messages_from_pytuple(self, src, key=None):
#"""
#Returns a dict of tuples similar to the one returned by parse_messages_from_po (one per language, plus a 'pot'
#one keyed as '__POT__').
#"""
## src may be either a string to be interpreted as py code, or a real tuple!
#if isinstance(src, str):
#src = eval(src)
#
#curr_hash = None
#if key and key in _parser_cache:
#old_hash, ret = _parser_cache[key]
#import hashlib
#curr_hash = hashlib.new(PARSER_CACHE_HASH, str(src).encode()).digest()
#if curr_hash == old_hash:
#return ret
#
#pot = new_messages()
#states = gen_states()
#stats = gen_stats()
#ret = {"__POT__": (pot, states, stats)}
#for msg in src:
#key = msg[0]
#messages[msgkey] = gen_message(msgid_lines, msgstr_lines, comment_lines, msgctxt_lines)
#pot[key] = gen_message(msgid_lines=[key[1]], msgstr_lines=[
#for lang, trans, (is_fuzzy, comments) in msg[2:]:
#if trans and not is_fuzzy:
#i18n_dict.setdefault(lang, dict())[key] = trans
#
#if key:
#if not curr_hash:
#import hashlib
#curr_hash = hashlib.new(PARSER_CACHE_HASH, str(src).encode()).digest()
#_parser_cache[key] = (curr_hash, val)
#return ret
for p in os.listdir(root_dir):
uid = po_file = None
if p.endswith(".po") and os.path.isfile(p):
uid = p[:-3]
if langs and uid not in langs:
continue
po_file = os.path.join(root_dir, p)
elif os.path.isdir(p):
uid = p
if langs and uid not in langs:
continue
po_file = os.path.join(root_dir, p, p + ".po")
if not os.path.isfile(po_file):
continue
else:
continue
if uid in self.trans:
printf("WARNING! {} id has been found more than once! only first one has been loaded!".format(uid))
continue
self.trans[uid] = I18nMessages(uid, 'PO', po_file, po_file, settings=self.settings)
self.src_po[uid] = po_file
def parse_from_py(self, src, langs=set()):
"""
src must be a valid path, either a py file or a module directory (in which case all py files inside it
will be checked, first file macthing will win!).
if langs set is void, all languages found are loaded.
"""
default_context = self.settings.DEFAULT_CONTEXT
txt = None
if os.path.isdir(src):
for root, dnames, fnames in os.walk(src):
for fname in fnames:
path = os.path.join(root, fname)
_1, txt, _2 = self._parser_check_file(path)
if txt is not None:
self.src[self.settings.PARSER_PY_ID] = path
break
if txt is not None:
break
elif src.endswith(".py") and os.path.isfile(src):
_1, txt, _2 = _check_file(src, self.settings.PARSER_PY_MARKER_BEGIN, self.settings.PARSER_PY_MARKER_END)
if txt is not None:
self.src[self.settings.PARSER_PY_ID] = src
if txt is None:
return
env = globals()
exec(txt, env)
if "translations_tuple" not in env:
return # No data...
msgs = env["translations_tuple"]
for key, (sources, gen_comments), *translations in msgs:
if self.settings.PARSER_TEMPLATE_ID not in self.trans:
self.trans[self.settings.PARSER_TEMPLATE_ID] = I18nMessages(self.settings.PARSER_TEMPLATE_ID,
settings=self.settings)
self.src[self.settings.PARSER_TEMPLATE_ID] = self.src[self.settings.PARSER_PY_ID]
if key in self.trans[self.settings.PARSER_TEMPLATE_ID].msgs:
print("ERROR! key {} is defined more than once! Skipping re-definitions!")
continue
custom_src = [c for c in sources if c.startswith("bpy.")]
src = [c for c in sources if not c.startswith("bpy.")]
common_comment_lines = [self.settings.PO_COMMENT_PREFIX_GENERATED + c for c in gen_comments] + \
[self.settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM + c for c in custom_src] + \
[self.settings.PO_COMMENT_PREFIX_SOURCE + c for c in src]
ctxt = [key[0]] if key[0] else [default_context]
self.trans[self.settings.PARSER_TEMPLATE_ID].msgs[key] = I18nMessage(ctxt, [key[1]], [""],
common_comment_lines, False, False,
settings=self.settings)
for uid, msgstr, (is_fuzzy, user_comments) in translations:
if uid not in self.trans:
self.trans[uid] = I18nMessages(uid, settings=self.settings)
self.src[uid] = self.src[self.settings.PARSER_PY_ID]
comment_lines = [self.settings.PO_COMMENT_PREFIX + c for c in user_comments] + common_comment_lines
self.trans[uid].msgs[key] = I18nMessage(ctxt, [key[1]], [msgstr], comment_lines, False, is_fuzzy,
settings=self.settings)
self.unescape()
def write(self, kind, langs=set()):
self.writers[kind](self, langs)
def write_to_po(self, langs=set()):
"""
Write all translations into po files. By default, write in the same files (or dir) as the source, specify
a custom self.dst function to write somewhere else!
Note: If langs is set and you want to export the pot template as well, langs must contain PARSER_TEMPLATE_ID
({} currently).
""".format(self.settings.PARSER_TEMPLATE_ID)
keys = self.trans.keys()
if langs:
keys &= langs
for uid in keys:
dst = self.dst(self, self.src.get(uid, ""), uid, 'PO')
self.trans[uid].write('PO', dst)
def write_to_py(self, langs=set()):
"""
Write all translations as python code, either in a "translations.py" file under same dir as source(s), or in
specified file is self.py_file is set (default, as usual can be customized with self.dst callable!).
Note: If langs is set and you want to export the pot template as well, langs must contain PARSER_TEMPLATE_ID
({} currently).
""".format(self.settings.PARSER_TEMPLATE_ID)
default_context = self.settings.DEFAULT_CONTEXT
def _gen_py(self, langs, tab=" "):
_lencomm = len(self.settings.PO_COMMENT_PREFIX)
_lengen = len(self.settings.PO_COMMENT_PREFIX_GENERATED)
_lensrc = len(self.settings.PO_COMMENT_PREFIX_SOURCE)
_lencsrc = len(self.settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM)
ret = [
"# NOTE: You can safely move around this auto-generated block (with the begin/end markers!), and "
"edit the translations by hand.",
"# Just carefully respect the format of the tuple!",
"",
"# Tuple of tuples "
"((msgctxt, msgid), (sources, gen_comments), (lang, translation, (is_fuzzy, comments)), ...)",
"translations_tuple = (",
]
# First gather all keys (msgctxt, msgid) - theoretically, all translations should share the same, but...
keys = set()
for trans in self.trans.items:
keys |= trans.msgs.keys()
# Get the ref translation (ideally, PARSER_TEMPLATE_ID one, else the first one that pops up!
# Ref translation will be used to generate sources "comments"
ref = self.trans.get(self.settings.PARSER_TEMPLATE_ID) or self.trans[list(self.trans.keys())[0]]
# Get all languages (uids) and sort them (PARSER_TEMPLATE_ID excluded!)
translations = self.trans.keys() - {self.settings.PARSER_TEMPLATE_ID}
if langs:
translations &= langs
translations = [('"' + lng + '"', " " * len(lng) + 4, self.trans[lng]) for lng in sorted(translations)]
for key in keys:
if ref.msgs[key].is_commented:
continue
# Key (context + msgid).
msgctxt, msgid = key
if not msgctxt:
msgctxt = default_context
ret.append(tab + "(({}, \"{}\"),".format('"' + msgctxt + '"' if msgctxt else "None", msgid))
# Common comments (mostly sources!).
sources = []
gen_comments = []
for comment in ref.msgs[key].comment_lines:
if comment.startswith(self.settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM):
sources.append(comment[_lencsrc:])
elif comment.startswith(self.settings.PO_COMMENT_PREFIX_SOURCE):
sources.append(comment[_lensrc:])
elif comment.startswith(self.settings.PO_COMMENT_PREFIX_GENERATED):
gen_comments.append(comment[_lengen:])
if not (sources or gen_comments):
ret.append(tab + " ((), ()),")
else:
if len(sources) > 1:
ret.append(tab + " ((\"" + sources[0] + "\",")
ret += [tab + " \"" + s + "\"," for s in sources[1:-1]]
ret.append(tab + " \"" + sources[-1] + "\"),")
else:
ret.append(tab + " ((" + ('"' + sources[0] + '",' if sources else "") + "),")
if len(gen_comments) > 1:
ret.append(tab + " (\"" + gen_comments[0] + "\",")
ret += [tab + " \"" + s + "\"," for s in gen_comments[1:-1]]
ret.append(tab + " \"" + gen_comments[-1] + "\")),")
else:
ret.append(tab + " (" + ('"' + gen_comments[0] + '",' if gen_comments else "") + ")),")
# All languages
for lngstr, lngsp, trans in translations:
if trans.msgs[key].is_commented:
continue
# Language code and translation.
ret.append(tab + " (" + lngstr + ", \"" + trans.msgs[key].msgstr + "\",")
# User comments and fuzzy.
comments = []
for comment in trans.msgs[key].comment_lines:
if comment.startswith(self.settings.PO_COMMENT_PREFIX):
comments.append(comment[_lencomm:])
ret.append(tab + lngsp + "(" + ("True" if trans.msgs[key].is_fuzzy else "False") + ",")
if len(comments) > 1:
ret.append(tab + lngsp + " (\"" + comments[0] + "\",")
ret += [tab + lngsp + " \"" + s + "\"," for s in comments[1:-1]]
ret.append(tab + lngsp + " \"" + comments[-1] + "\"))),")
else:
ret[-1] = ret[-1] + " " + ('"' + comments[0] + '",' if comments else "") + "))),"
ret.append(tab + "),")
ret += [
")",
"",
"translations_dict = {}",
"for msg in translations_tuple:",
tab + "key = msg[0]",
tab + "for lang, trans, (is_fuzzy, comments) in msg[2:]:",
tab * 2 + "if trans and not is_fuzzy:",
tab * 3 + "translations_dict.setdefault(lang, {})[key] = trans",
"",
]
return ret
self.escape(True)
dst = self.dst(self, self.src.get(self.settings.PARSER_PY_ID, ""), self.settings.PARSER_PY_ID, 'PY')
prev = txt = next = ""
if os.path.exists(dst):
if not os.path.isfile(dst):
print("WARNING: trying to write as python code into {}, which is not a file! Aborting.".format(dst))
return
prev, txt, next = self._parser_check_file(dst, self.settings.PARSER_MAX_FILE_SIZE,
self.settings.PARSER_PY_MARKER_BEGIN,
self.settings.PARSER_PY_MARKER_END)
if prev is None:
return
if txt is None:
print("WARNING: given python file {} has no auto-generated translations yet, will be added at "
"the end of the file, you can move that section later if needed...".format(dst))
txt = _gen_py(self, langs)
else:
printf("Creating python file {} containing translations.".format(dst))
txt = [
"# ***** BEGIN GPL LICENSE BLOCK *****",
"#",
"# This program is free software; you can redistribute it and/or",
"# modify it under the terms of the GNU General Public License",
"# as published by the Free Software Foundation; either version 2",
"# of the License, or (at your option) any later version.",
"#",
"# This program is distributed in the hope that it will be useful,",
"# but WITHOUT ANY WARRANTY; without even the implied warranty of",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the",
"# GNU General Public License for more details.",
"#",
"# You should have received a copy of the GNU General Public License",
"# along with this program; if not, write to the Free Software Foundation,",
"# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.",
"#",
"# ***** END GPL LICENSE BLOCK *****",
"",
self.settings.PARSER_PY_MARKER_BEGIN,
"",
]
txt += _gen_py(self, langs)
txt += [
"",
self.settings.PARSER_PY_MARKER_END,
]
with open(dst, 'w') as f:
f.write(prev + "\n".join(txt) + (next or ""))
self.unescape()
parsers = {
"PO": parse_from_po,
"PY": parse_from_py,
}
writers = {
"PO": write_to_po,
"PY": write_to_py,
}

View File

@@ -35,6 +35,7 @@ __all__ = (
"register_module",
"register_manual_map",
"unregister_manual_map",
"make_rna_paths",
"manual_map",
"resource_path",
"script_path_user",
@@ -640,3 +641,29 @@ def manual_map():
continue
yield prefix, url_manual_mapping
# Build an RNA path from struct/property/enum names.
def make_rna_paths(struct_name, prop_name, enum_name):
"""
Create RNA "paths" from given names.
:arg struct_name: Name of a RNA struct (like e.g. "Scene").
:type struct_name: string
:arg prop_name: Name of a RNA struct's property.
:type prop_name: string
:arg enum_name: Name of a RNA enum identifier.
:type enum_name: string
:return: A triple of three "RNA paths" (most_complete_path, "struct.prop", "struct.prop:'enum'").
If no enum_name is given, the third element will always be void.
:rtype: tuple of strings
"""
src = src_rna = src_enum = ""
if struct_name:
if prop_name:
src = src_rna = ".".join((struct_name, prop_name))
if enum_name:
src = src_enum = "{}:'{}'".format(src_rna, enum_name)
else:
src = src_rna = struct_name
return src, src_rna, src_enum