svn merge ^/trunk/blender -r48095:48592

This commit is contained in:
Ove Murberg Henriksen
2012-07-04 15:56:42 +00:00
1126 changed files with 21322 additions and 11234 deletions

View File

@@ -139,6 +139,8 @@ def modules(module_cache):
return mod
else:
print("fake_module: addon missing 'bl_info' "
"gives bad performance!: %r" % mod_path)
return None
modules_stale = set(module_cache.keys())
@@ -183,8 +185,8 @@ def modules(module_cache):
del modules_stale
mod_list = list(module_cache.values())
mod_list.sort(key=lambda mod: (mod.bl_info['category'],
mod.bl_info['name'],
mod_list.sort(key=lambda mod: (mod.bl_info["category"],
mod.bl_info["name"],
))
return mod_list

View File

@@ -157,8 +157,8 @@ def find_path_new(id_data, data_path, rna_update_dict, rna_update_from_map):
def update_data_paths(rna_update):
''' rna_update triple [(class_name, from, to), ...]
'''
""" rna_update triple [(class_name, from, to), ...]
"""
# make a faster lookup dict
rna_update_dict = {}
@@ -887,9 +887,9 @@ if __name__ == "__main__":
# Example, should be called externally
# (class, from, to)
replace_ls = [
('AnimVizMotionPaths', 'frame_after', 'frame_after'),
('AnimVizMotionPaths', 'frame_before', 'frame_before'),
('AnimVizOnionSkinning', 'frame_after', 'frame_after'),
("AnimVizMotionPaths", "frame_after", "frame_after"),
("AnimVizMotionPaths", "frame_before", "frame_before"),
("AnimVizOnionSkinning", "frame_after", "frame_after"),
]
update_data_paths(replace_ls)

View File

@@ -0,0 +1,21 @@
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
"""Package for translation (i18n) tools."""

View File

@@ -0,0 +1,547 @@
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8-80 compliant>
# Write out messages.txt from Blender.
# XXX: This script is meant to be used from inside Blender!
# You should not directly use this script, rather use update_msg.py!
import os
# XXX Relative import does not work here when used from Blender...
#from . import settings
import bl_i18n_utils.settings as settings
#classes = set()
SOURCE_DIR = settings.SOURCE_DIR
CUSTOM_PY_UI_FILES = [os.path.abspath(os.path.join(SOURCE_DIR, p))
for p in settings.CUSTOM_PY_UI_FILES]
FILE_NAME_MESSAGES = settings.FILE_NAME_MESSAGES
COMMENT_PREFIX = settings.COMMENT_PREFIX
CONTEXT_PREFIX = settings.CONTEXT_PREFIX
CONTEXT_DEFAULT = settings.CONTEXT_DEFAULT
UNDOC_OPS_STR = settings.UNDOC_OPS_STR
NC_ALLOWED = settings.WARN_MSGID_NOT_CAPITALIZED_ALLOWED
def check(check_ctxt, messages, key, msgsrc):
if check_ctxt is None:
return
multi_rnatip = check_ctxt.get("multi_rnatip")
multi_lines = check_ctxt.get("multi_lines")
py_in_rna = check_ctxt.get("py_in_rna")
not_capitalized = check_ctxt.get("not_capitalized")
end_point = check_ctxt.get("end_point")
undoc_ops = check_ctxt.get("undoc_ops")
if multi_rnatip is not None:
if key in messages and key not in multi_rnatip:
multi_rnatip.add(key)
if multi_lines is not None:
if '\n' in key[1]:
multi_lines.add(key)
if py_in_rna is not None:
if key in py_in_rna[1]:
py_in_rna[0].add(key)
if not_capitalized is not None:
if(key[1] not in NC_ALLOWED and key[1][0].isalpha() and
not key[1][0].isupper()):
not_capitalized.add(key)
if end_point is not None:
if key[1].strip().endswith('.'):
end_point.add(key)
if undoc_ops is not None:
if key[1] == UNDOC_OPS_STR:
undoc_ops.add(key)
def dump_messages_rna(messages, check_ctxt):
import bpy
def classBlackList():
blacklist_rna_class = [# core classes
"Context", "Event", "Function", "UILayout",
"BlendData",
# registerable classes
"Panel", "Menu", "Header", "RenderEngine",
"Operator", "OperatorMacro", "Macro",
"KeyingSetInfo", "UnknownType",
# window classes
"Window",
]
# ---------------------------------------------------------------------
# Collect internal operators
# extend with all internal operators
# note that this uses internal api introspection functions
# all possible operator names
op_ids = set(cls.bl_rna.identifier for cls in
bpy.types.OperatorProperties.__subclasses__()) | \
set(cls.bl_rna.identifier for cls in
bpy.types.Operator.__subclasses__()) | \
set(cls.bl_rna.identifier for cls in
bpy.types.OperatorMacro.__subclasses__())
get_instance = __import__("_bpy").ops.get_instance
path_resolve = type(bpy.context).__base__.path_resolve
for idname in op_ids:
op = get_instance(idname)
# XXX Do not skip INTERNAL's anymore, some of those ops
# show up in UI now!
# if 'INTERNAL' in path_resolve(op, "bl_options"):
# blacklist_rna_class.append(idname)
# ---------------------------------------------------------------------
# Collect builtin classes we don't need to doc
blacklist_rna_class.append("Property")
blacklist_rna_class.extend(
[cls.__name__ for cls in
bpy.types.Property.__subclasses__()])
# ---------------------------------------------------------------------
# Collect classes which are attached to collections, these are api
# access only.
collection_props = set()
for cls_id in dir(bpy.types):
cls = getattr(bpy.types, cls_id)
for prop in cls.bl_rna.properties:
if prop.type == 'COLLECTION':
prop_cls = prop.srna
if prop_cls is not None:
collection_props.add(prop_cls.identifier)
blacklist_rna_class.extend(sorted(collection_props))
return blacklist_rna_class
blacklist_rna_class = classBlackList()
def filterRNA(bl_rna):
rid = bl_rna.identifier
if rid in blacklist_rna_class:
print(" skipping", rid)
return True
return False
check_ctxt_rna = check_ctxt_rna_tip = None
if check_ctxt:
check_ctxt_rna = {"multi_lines": check_ctxt.get("multi_lines"),
"not_capitalized": check_ctxt.get("not_capitalized"),
"end_point": check_ctxt.get("end_point"),
"undoc_ops": check_ctxt.get("undoc_ops")}
check_ctxt_rna_tip = check_ctxt_rna
check_ctxt_rna_tip["multi_rnatip"] = check_ctxt.get("multi_rnatip")
# -------------------------------------------------------------------------
# Function definitions
def walkProperties(bl_rna):
import bpy
# Get our parents' properties, to not export them multiple times.
bl_rna_base = bl_rna.base
if bl_rna_base:
bl_rna_base_props = bl_rna_base.properties.values()
else:
bl_rna_base_props = ()
for prop in bl_rna.properties:
# Only write this property if our parent hasn't got it.
if prop in bl_rna_base_props:
continue
if prop.identifier == "rna_type":
continue
msgsrc = "bpy.types.{}.{}".format(bl_rna.identifier, prop.identifier)
context = getattr(prop, "translation_context", CONTEXT_DEFAULT)
if prop.name and (prop.name != prop.identifier or context):
key = (context, prop.name)
check(check_ctxt_rna, messages, key, msgsrc)
messages.setdefault(key, []).append(msgsrc)
if prop.description:
key = (CONTEXT_DEFAULT, prop.description)
check(check_ctxt_rna_tip, messages, key, msgsrc)
messages.setdefault(key, []).append(msgsrc)
if isinstance(prop, bpy.types.EnumProperty):
for item in prop.enum_items:
msgsrc = "bpy.types.{}.{}:'{}'".format(bl_rna.identifier,
prop.identifier,
item.identifier)
if item.name and item.name != item.identifier:
key = (CONTEXT_DEFAULT, item.name)
check(check_ctxt_rna, messages, key, msgsrc)
messages.setdefault(key, []).append(msgsrc)
if item.description:
key = (CONTEXT_DEFAULT, item.description)
check(check_ctxt_rna_tip, messages, key, msgsrc)
messages.setdefault(key, []).append(msgsrc)
def walkRNA(bl_rna):
if filterRNA(bl_rna):
return
msgsrc = ".".join(("bpy.types", bl_rna.identifier))
context = getattr(bl_rna, "translation_context", CONTEXT_DEFAULT)
if bl_rna.name and (bl_rna.name != bl_rna.identifier or context):
key = (context, bl_rna.name)
check(check_ctxt_rna, messages, key, msgsrc)
messages.setdefault(key, []).append(msgsrc)
if bl_rna.description:
key = (CONTEXT_DEFAULT, bl_rna.description)
check(check_ctxt_rna_tip, messages, key, msgsrc)
messages.setdefault(key, []).append(msgsrc)
if hasattr(bl_rna, 'bl_label') and bl_rna.bl_label:
key = (context, bl_rna.bl_label)
check(check_ctxt_rna, messages, key, msgsrc)
messages.setdefault(key, []).append(msgsrc)
walkProperties(bl_rna)
def walkClass(cls):
walkRNA(cls.bl_rna)
def walk_keymap_hierarchy(hier, msgsrc_prev):
for lvl in hier:
msgsrc = "{}.{}".format(msgsrc_prev, lvl[1])
messages.setdefault((CONTEXT_DEFAULT, lvl[0]), []).append(msgsrc)
if lvl[3]:
walk_keymap_hierarchy(lvl[3], msgsrc)
# -------------------------------------------------------------------------
# Dump Messages
def process_cls_list(cls_list):
if not cls_list:
return 0
def full_class_id(cls):
""" gives us 'ID.Lamp.AreaLamp' which is best for sorting.
"""
cls_id = ""
bl_rna = cls.bl_rna
while bl_rna:
cls_id = "{}.{}".format(bl_rna.identifier, cls_id)
bl_rna = bl_rna.base
return cls_id
cls_list.sort(key=full_class_id)
processed = 0
for cls in cls_list:
walkClass(cls)
# classes.add(cls)
# Recursively process subclasses.
processed += process_cls_list(cls.__subclasses__()) + 1
return processed
# Parse everything (recursively parsing from bpy_struct "class"...).
processed = process_cls_list(type(bpy.context).__base__.__subclasses__())
print("{} classes processed!".format(processed))
# import pickle
# global classes
# classes = {str(c) for c in classes}
# with open("/home/i7deb64/Bureau/tpck_2", "wb") as f:
# pickle.dump(classes, f, protocol=0)
from bpy_extras.keyconfig_utils import KM_HIERARCHY
walk_keymap_hierarchy(KM_HIERARCHY, "KM_HIERARCHY")
def dump_messages_pytext(messages, check_ctxt):
""" dumps text inlined in the python user interface: eg.
layout.prop("someprop", text="My Name")
"""
import ast
# -------------------------------------------------------------------------
# Gather function names
import bpy
# key: func_id
# val: [(arg_kw, arg_pos), (arg_kw, arg_pos), ...]
func_translate_args = {}
# so far only 'text' keywords, but we may want others translated later
translate_kw = ("text", )
# Break recursive nodes look up on some kind of nodes.
# E.g. we dont want to get strings inside subscripts (blah["foo"])!
stopper_nodes = {ast.Subscript,}
for func_id, func in bpy.types.UILayout.bl_rna.functions.items():
# check it has a 'text' argument
for (arg_pos, (arg_kw, arg)) in enumerate(func.parameters.items()):
if ((arg_kw in translate_kw) and
(arg.is_output == False) and
(arg.type == 'STRING')):
func_translate_args.setdefault(func_id, []).append((arg_kw,
arg_pos))
# print(func_translate_args)
check_ctxt_py = None
if check_ctxt:
check_ctxt_py = {"py_in_rna": (check_ctxt["py_in_rna"], messages.copy()),
"multi_lines": check_ctxt["multi_lines"],
"not_capitalized": check_ctxt["not_capitalized"],
"end_point": check_ctxt["end_point"]}
# -------------------------------------------------------------------------
# Function definitions
def extract_strings(fp_rel, node):
""" Recursively get strings, needed in case we have "Blah" + "Blah",
passed as an argument in that case it wont evaluate to a string.
However, break on some kind of stopper nodes, like e.g. Subscript.
"""
if type(node) == ast.Str:
eval_str = ast.literal_eval(node)
if eval_str:
key = (CONTEXT_DEFAULT, eval_str)
msgsrc = "{}:{}".format(fp_rel, node.lineno)
check(check_ctxt_py, messages, key, msgsrc)
messages.setdefault(key, []).append(msgsrc)
return
for nd in ast.iter_child_nodes(node):
if type(nd) not in stopper_nodes:
extract_strings(fp_rel, nd)
def extract_strings_from_file(fp):
filedata = open(fp, 'r', encoding="utf8")
root_node = ast.parse(filedata.read(), fp, 'exec')
filedata.close()
fp_rel = os.path.relpath(fp, SOURCE_DIR)
for node in ast.walk(root_node):
if type(node) == ast.Call:
# print("found function at")
# print("%s:%d" % (fp, node.lineno))
# lambda's
if type(node.func) == ast.Name:
continue
# getattr(self, con.type)(context, box, con)
if not hasattr(node.func, "attr"):
continue
translate_args = func_translate_args.get(node.func.attr, ())
# do nothing if not found
for arg_kw, arg_pos in translate_args:
if arg_pos < len(node.args):
extract_strings(fp_rel, node.args[arg_pos])
else:
for kw in node.keywords:
if kw.arg == arg_kw:
extract_strings(fp_rel, kw.value)
# -------------------------------------------------------------------------
# Dump Messages
mod_dir = os.path.join(SOURCE_DIR,
"release",
"scripts",
"startup",
"bl_ui")
files = [os.path.join(mod_dir, fn)
for fn in sorted(os.listdir(mod_dir))
if not fn.startswith("_")
if fn.endswith("py")
]
# Dummy Cycles has its py addon in its own dir!
files += CUSTOM_PY_UI_FILES
for fp in files:
extract_strings_from_file(fp)
def dump_messages(do_messages, do_checks):
import collections
def enable_addons():
"""For now, enable all official addons, before extracting msgids."""
import addon_utils
import bpy
userpref = bpy.context.user_preferences
used_ext = {ext.module for ext in userpref.addons}
support = {"OFFICIAL"}
# collect the categories that can be filtered on
addons = [(mod, addon_utils.module_bl_info(mod)) for mod in
addon_utils.modules(addon_utils.addons_fake_modules)]
for mod, info in addons:
module_name = mod.__name__
if module_name in used_ext or info["support"] not in support:
continue
print(" Enabling module ", module_name)
bpy.ops.wm.addon_enable(module=module_name)
# XXX There are currently some problems with bpy/rna...
# *Very* tricky to solve!
# So this is a hack to make all newly added operator visible by
# bpy.types.OperatorProperties.__subclasses__()
for cat in dir(bpy.ops):
cat = getattr(bpy.ops, cat)
for op in dir(cat):
getattr(cat, op).get_rna()
# check for strings like ": %d"
ignore = ("%d", "%f", "%s", "%r", # string formatting
"*", ".", "(", ")", "-", "/", "\\", "+", ":", "#", "%"
"0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
"x", # used on its own eg: 100x200
"X", "Y", "Z", "W", # used alone. no need to include
)
def filter_message(msg):
msg_tmp = msg
for ign in ignore:
msg_tmp = msg_tmp.replace(ign, "")
if not msg_tmp.strip():
return True
# we could filter out different strings here
return False
if hasattr(collections, 'OrderedDict'):
messages = collections.OrderedDict()
else:
messages = {}
messages[(CONTEXT_DEFAULT, "")] = []
# Enable all wanted addons.
enable_addons()
check_ctxt = None
if do_checks:
check_ctxt = {"multi_rnatip": set(),
"multi_lines": set(),
"py_in_rna": set(),
"not_capitalized": set(),
"end_point": set(),
"undoc_ops": set()}
# get strings from RNA
dump_messages_rna(messages, check_ctxt)
# get strings from UI layout definitions text="..." args
dump_messages_pytext(messages, check_ctxt)
del messages[(CONTEXT_DEFAULT, "")]
if do_checks:
print("WARNINGS:")
keys = set()
for c in check_ctxt.values():
keys |= c
# XXX Temp, see below
keys -= check_ctxt["multi_rnatip"]
for key in keys:
if key in check_ctxt["undoc_ops"]:
print("\tThe following operators are undocumented:")
else:
print("\t{}”|“{}”:".format(*key))
if key in check_ctxt["multi_lines"]:
print("\t\t-> newline in this message!")
if key in check_ctxt["not_capitalized"]:
print("\t\t-> message not capitalized!")
if key in check_ctxt["end_point"]:
print("\t\t-> message with endpoint!")
# XXX Hide this one for now, too much false positives.
# if key in check_ctxt["multi_rnatip"]:
# print("\t\t-> tip used in several RNA items")
if key in check_ctxt["py_in_rna"]:
print("\t\t-> RNA message also used in py UI code:")
print("\t\t{}".format("\n\t\t".join(messages[key])))
if do_messages:
print("Writing messages…")
num_written = 0
num_filtered = 0
with open(FILE_NAME_MESSAGES, 'w', encoding="utf8") as message_file:
for (ctx, key), value in messages.items():
# filter out junk values
if filter_message(key):
num_filtered += 1
continue
# Remove newlines in key and values!
message_file.write("\n".join(COMMENT_PREFIX + msgsrc.replace("\n", "") for msgsrc in value))
message_file.write("\n")
if ctx:
message_file.write(CONTEXT_PREFIX + ctx.replace("\n", "") + "\n")
message_file.write(key.replace("\n", "") + "\n")
num_written += 1
print("Written {} messages to: {} ({} were filtered out)." \
"".format(num_written, FILE_NAME_MESSAGES, num_filtered))
def main():
try:
import bpy
except ImportError:
print("This script must run from inside blender")
return
import sys
back_argv = sys.argv
sys.argv = sys.argv[sys.argv.index("--") + 1:]
import argparse
parser = argparse.ArgumentParser(description="Process UI messages " \
"from inside Blender.")
parser.add_argument('-c', '--no_checks', default=True,
action="store_false",
help="No checks over UI messages.")
parser.add_argument('-m', '--no_messages', default=True,
action="store_false",
help="No export of UI messages.")
parser.add_argument('-o', '--output', help="Output messages file path.")
args = parser.parse_args()
if args.output:
global FILE_NAME_MESSAGES
FILE_NAME_MESSAGES = args.output
dump_messages(do_messages=args.no_messages, do_checks=args.no_checks)
sys.argv = back_argv
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
main()

View File

@@ -0,0 +1,175 @@
#!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Check pos in branches (or in trunk) for missing/unneeded messages.
import os
import sys
from codecs import open
import settings
import utils
TRUNK_PO_DIR = settings.TRUNK_PO_DIR
BRANCHES_DIR = settings.BRANCHES_DIR
FILE_NAME_POT = settings.FILE_NAME_POT
def print_diff(ref_messages, messages, states):
# Remove comments from messages list!
messages = set(messages.keys()) - states["comm_msg"]
unneeded = (messages - ref_messages)
for msgid in unneeded:
print('\tUnneeded message id "{}"'.format(msgid))
missing = (ref_messages - messages)
for msgid in missing:
print('\tMissing message id "{}"'.format(msgid))
for msgid in states["comm_msg"]:
print('\tCommented message id "{}"'.format(msgid))
print("\t{} unneeded messages, {} missing messages, {} commented messages." \
"".format(len(unneeded), len(missing), len(states["comm_msg"])))
return 0
def process_po(ref_messages, po, glob_stats, do_stats, do_messages):
print("Checking {}...".format(po))
ret = 0
messages, states, stats = utils.parse_messages(po)
if do_messages:
t = print_diff(ref_messages, messages, states)
if t:
ret = t
if do_stats:
print("\tStats:")
t = utils.print_stats(stats, glob_stats, prefix=" ")
if t:
ret = t
if states["is_broken"]:
print("\tERROR! This .po is broken!")
ret = 1
return ret
def main():
import argparse
parser = argparse.ArgumentParser(description="Check pos in branches " \
"(or in trunk) for missing" \
"/unneeded messages.")
parser.add_argument('-s', '--stats', action="store_true",
help="Print pos stats.")
parser.add_argument('-m', '--messages', action="store_true",
help="Print pos missing/unneeded/commented messages.")
parser.add_argument('-t', '--trunk', action="store_true",
help="Check pos in /trunk/po rather than /branches.")
parser.add_argument('-p', '--pot',
help="Specify the .pot file used as reference.")
parser.add_argument('langs', metavar='ISO_code', nargs='*',
help="Restrict processed languages to those.")
args = parser.parse_args()
if args.pot:
global FILE_NAME_POT
FILE_NAME_POT = args.pot
glob_stats = {"nbr" : 0.0,
"lvl" : 0.0,
"lvl_ttips" : 0.0,
"lvl_trans_ttips" : 0.0,
"lvl_ttips_in_trans": 0.0,
"lvl_comm" : 0.0,
"nbr_signs" : 0,
"nbr_trans_signs" : 0,
"contexts" : set()}
ret = 0
pot_messages = None
if args.messages:
pot_messages, u1, pot_stats = utils.parse_messages(FILE_NAME_POT)
pot_messages = set(pot_messages.keys())
glob_stats["nbr_signs"] = pot_stats["nbr_signs"]
if args.langs:
for lang in args.langs:
if args.trunk:
po = os.path.join(TRUNK_PO_DIR, ".".join((lang, "po")))
else:
po = os.path.join(BRANCHES_DIR, lang, ".".join((lang, "po")))
if os.path.exists(po):
t = process_po(pot_messages, po, glob_stats,
args.stats, args.messages)
if t:
ret = t
elif args.trunk:
for po in os.listdir(TRUNK_PO_DIR):
if po.endswith(".po"):
po = os.path.join(TRUNK_PO_DIR, po)
t = process_po(pot_messages, po, glob_stats,
args.stats, args.messages)
if t:
ret = t
else:
for lang in os.listdir(BRANCHES_DIR):
for po in os.listdir(os.path.join(BRANCHES_DIR, lang)):
if po.endswith(".po"):
po = os.path.join(BRANCHES_DIR, lang, po)
t = process_po(pot_messages, po, glob_stats,
args.stats, args.messages)
if t:
ret = t
if args.stats and glob_stats["nbr"] != 0.0:
nbr_contexts = len(glob_stats["contexts"]-{""})
if nbr_contexts != 1:
if nbr_contexts == 0:
nbr_contexts = "No"
_ctx_txt = "s are"
else:
_ctx_txt = " is"
print("\nAverage stats for all {:.0f} processed files:\n" \
" {:>6.1%} done!\n" \
" {:>6.1%} of messages are tooltips.\n" \
" {:>6.1%} of tooltips are translated.\n" \
" {:>6.1%} of translated messages are tooltips.\n" \
" {:>6.1%} of messages are commented.\n" \
" The org msgids are currently made of {} signs.\n" \
" All processed translations are currently made of {} signs.\n" \
" {} specific context{} present:\n {}\n" \
"".format(glob_stats["nbr"], glob_stats["lvl"]/glob_stats["nbr"],
glob_stats["lvl_ttips"]/glob_stats["nbr"],
glob_stats["lvl_trans_ttips"]/glob_stats["nbr"],
glob_stats["lvl_ttips_in_trans"]/glob_stats["nbr"],
glob_stats["lvl_comm"]/glob_stats["nbr"], glob_stats["nbr_signs"],
glob_stats["nbr_trans_signs"], nbr_contexts, _ctx_txt,
"\n ".join(glob_stats["contexts"]-{""})))
return ret
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
print(" *** WARNING! Number of tooltips is only an estimation! ***\n")
sys.exit(main())

View File

@@ -0,0 +1,97 @@
#!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Clean (i.e. remove commented messages) pos in branches or trunk.
import os
import sys
import collections
from codecs import open
import settings
import utils
TRUNK_PO_DIR = settings.TRUNK_PO_DIR
BRANCHES_DIR = settings.BRANCHES_DIR
def do_clean(po, strict):
print("Cleaning {}...".format(po))
messages, states, u1 = utils.parse_messages(po)
if strict and states["is_broken"]:
print("ERROR! This .po file is broken!")
return 1
for msgkey in states["comm_msg"]:
del messages[msgkey]
utils.write_messages(po, messages, states["comm_msg"], states["fuzzy_msg"])
print("Removed {} commented messages.".format(len(states["comm_msg"])))
return 0
def main():
import argparse
parser = argparse.ArgumentParser(description="Clean pos in branches " \
"or trunk (i.e. remove " \
"all commented messages).")
parser.add_argument('-t', '--trunk', action="store_true",
help="Clean pos in trunk rather than branches.")
parser.add_argument('-s', '--strict', action="store_true",
help="Raise an error if a po is broken.")
parser.add_argument('langs', metavar='ISO_code', nargs='*',
help="Restrict processed languages to those.")
args = parser.parse_args()
ret = 0
if args.langs:
for lang in args.langs:
if args.trunk:
po = os.path.join(TRUNK_PO_DIR, ".".join((lang, "po")))
else:
po = os.path.join(BRANCHES_DIR, lang, ".".join((lang, "po")))
if os.path.exists(po):
t = do_clean(po, args.strict)
if t:
ret = t
elif args.trunk:
for po in os.listdir(TRUNK_PO_DIR):
if po.endswith(".po"):
po = os.path.join(TRUNK_PO_DIR, po)
t = do_clean(po, args.strict)
if t:
ret = t
else:
for lang in os.listdir(BRANCHES_DIR):
for po in os.listdir(os.path.join(BRANCHES_DIR, lang)):
if po.endswith(".po"):
po = os.path.join(BRANCHES_DIR, lang, po)
t = do_clean(po, args.strict)
if t:
ret = t
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
sys.exit(main())

View File

@@ -0,0 +1,119 @@
#!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Import in trunk/po all po from branches translated above the given threshold.
import os
import shutil
import sys
import subprocess
from codecs import open
import settings
import utils
import rtl_preprocess
TRUNK_PO_DIR = settings.TRUNK_PO_DIR
BRANCHES_DIR = settings.BRANCHES_DIR
RTL_PREPROCESS_FILE = settings.RTL_PREPROCESS_FILE
PY3 = settings.PYTHON3_EXEC
def main():
import argparse
parser = argparse.ArgumentParser(description="Import advanced enough pos " \
"from branches to trunk.")
parser.add_argument('-t', '--threshold', type=int,
help="Import threshold, as a percentage.")
parser.add_argument('-s', '--strict', action="store_true",
help="Raise an error if a po is broken.")
parser.add_argument('langs', metavar='ISO_code', nargs='*',
help="Restrict processed languages to those.")
args = parser.parse_args()
ret = 0
threshold = float(settings.IMPORT_MIN_LEVEL)/100.0
if args.threshold is not None:
threshold = float(args.threshold)/100.0
for lang in os.listdir(BRANCHES_DIR):
if args.langs and lang not in args.langs:
continue
po = os.path.join(BRANCHES_DIR, lang, ".".join((lang, "po")))
if os.path.exists(po):
po_is_rtl = os.path.join(BRANCHES_DIR, lang, RTL_PREPROCESS_FILE)
msgs, state, stats = utils.parse_messages(po)
tot_msgs = stats["tot_msg"]
trans_msgs = stats["trans_msg"]
lvl = 0.0
if tot_msgs:
lvl = float(trans_msgs)/float(tot_msgs)
if lvl > threshold:
if state["is_broken"] and args.strict:
print("{:<10}: {:>6.1%} done, but BROKEN, skipped." \
"".format(lang, lvl))
ret = 1
else:
if os.path.exists(po_is_rtl):
out_po = os.path.join(TRUNK_PO_DIR,
".".join((lang, "po")))
out_raw_po = os.path.join(TRUNK_PO_DIR,
"_".join((lang, "raw.po")))
keys = []
trans = []
for k, m in msgs.items():
keys.append(k)
trans.append("".join(m["msgstr_lines"]))
trans = rtl_preprocess.log2vis(trans)
for k, t in zip(keys, trans):
# Mono-line for now...
msgs[k]["msgstr_lines"] = [t]
utils.write_messages(out_po, msgs, state["comm_msg"],
state["fuzzy_msg"])
# Also copies org po!
shutil.copy(po, out_raw_po)
print("{:<10}: {:>6.1%} done, enough translated " \
"messages, processed and copied to trunk." \
"".format(lang, lvl))
else:
shutil.copy(po, TRUNK_PO_DIR)
print("{:<10}: {:>6.1%} done, enough translated " \
"messages, copied to trunk.".format(lang, lvl))
else:
if state["is_broken"] and args.strict:
print("{:<10}: {:>6.1%} done, BROKEN and not enough " \
"translated messages, skipped".format(lang, lvl))
ret = 1
else:
print("{:<10}: {:>6.1%} done, not enough translated " \
"messages, skipped.".format(lang, lvl))
return ret
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
sys.exit(main())

View File

@@ -0,0 +1,156 @@
#!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Merge one or more .po files into the first dest one.
# If a msgkey is present in more than one merged po, the one in the first file wins, unless
# its marked as fuzzy and one later is not.
# The fuzzy flag is removed if necessary.
# All other comments are never modified.
# However, commented messages in dst will always remain commented, and commented messages are
# never merged from sources.
import sys
from codecs import open
import utils
def main():
import argparse
parser = argparse.ArgumentParser(description="" \
"Merge one or more .po files into the first dest one.\n" \
"If a msgkey (msgid, msgctxt) is present in more than " \
"one merged po, the one in the first file wins, unless " \
"its marked as fuzzy and one later is not.\n" \
"The fuzzy flag is removed if necessary.\n" \
"All other comments are never modified.\n" \
"Commented messages in dst will always remain " \
"commented, and commented messages are never merged " \
"from sources.")
parser.add_argument('-s', '--stats', action="store_true",
help="Show statistics info.")
parser.add_argument('-r', '--replace', action="store_true",
help="Replace existing messages of same \"level\" already in dest po.")
parser.add_argument('dst', metavar='dst.po',
help="The dest po into which merge the others.")
parser.add_argument('src', metavar='src.po', nargs='+',
help="The po's to merge into the dst.po one.")
args = parser.parse_args()
ret = 0
done_msgkeys = set()
done_fuzzy_msgkeys = set()
nbr_merged = 0
nbr_replaced = 0
nbr_added = 0
nbr_unfuzzied = 0
dst_messages, dst_states, dst_stats = utils.parse_messages(args.dst)
if dst_states["is_broken"]:
print("Dest po is BROKEN, aborting.")
return 1
if args.stats:
print("Dest po, before merging:")
utils.print_stats(dst_stats, prefix="\t")
# If we dont want to replace existing valid translations, pre-populate
# done_msgkeys and done_fuzzy_msgkeys.
if not args.replace:
done_msgkeys = dst_states["trans_msg"].copy()
done_fuzzy_msgkeys = dst_states["fuzzy_msg"].copy()
for po in args.src:
messages, states, stats = utils.parse_messages(po)
if states["is_broken"]:
print("\tSrc po {} is BROKEN, skipping.".format(po))
ret = 1
continue
print("\tMerging {}...".format(po))
if args.stats:
print("\t\tMerged po stats:")
utils.print_stats(stats, prefix="\t\t\t")
for msgkey, val in messages.items():
msgctxt, msgid = msgkey
# This msgkey has already been completely merged, or is a commented one,
# or the new message is commented, skip it.
if msgkey in (done_msgkeys | dst_states["comm_msg"] | states["comm_msg"]):
continue
is_ttip = utils.is_tooltip(msgid)
# New messages does not yet exists in dest.
if msgkey not in dst_messages:
dst_messages[msgkey] = messages[msgkey]
if msgkey in states["fuzzy_msg"]:
done_fuzzy_msgkeys.add(msgkey)
dst_states["fuzzy_msg"].add(msgkey)
elif msgkey in states["trans_msg"]:
done_msgkeys.add(msgkey)
dst_states["trans_msg"].add(msgkey)
dst_stats["trans_msg"] += 1
if is_ttip:
dst_stats["trans_ttips"] += 1
nbr_added += 1
dst_stats["tot_msg"] += 1
if is_ttip:
dst_stats["tot_ttips"] += 1
# From now on, the new messages is already in dst.
# New message is neither translated nor fuzzy, skip it.
elif msgkey not in (states["trans_msg"] | states["fuzzy_msg"]):
continue
# From now on, the new message is either translated or fuzzy!
# The new message is translated.
elif msgkey in states["trans_msg"]:
dst_messages[msgkey]["msgstr_lines"] = messages[msgkey]["msgstr_lines"]
done_msgkeys.add(msgkey)
done_fuzzy_msgkeys.discard(msgkey)
if msgkey in dst_states["fuzzy_msg"]:
dst_states["fuzzy_msg"].remove(msgkey)
nbr_unfuzzied += 1
if msgkey not in dst_states["trans_msg"]:
dst_states["trans_msg"].add(msgkey)
dst_stats["trans_msg"] += 1
if is_ttip:
dst_stats["trans_ttips"] += 1
else:
nbr_replaced += 1
nbr_merged += 1
# The new message is fuzzy, org one is fuzzy too,
# and this msgkey has not yet been merged.
elif msgkey not in (dst_states["trans_msg"] | done_fuzzy_msgkeys):
dst_messages[msgkey]["msgstr_lines"] = messages[msgkey]["msgstr_lines"]
done_fuzzy_msgkeys.add(msgkey)
dst_states["fuzzy_msg"].add(msgkey)
nbr_merged += 1
nbr_replaced += 1
utils.write_messages(args.dst, dst_messages, dst_states["comm_msg"], dst_states["fuzzy_msg"])
print("Merged completed. {} messages were merged (among which {} were replaced), " \
"{} were added, {} were \"un-fuzzied\"." \
"".format(nbr_merged, nbr_replaced, nbr_added, nbr_unfuzzied))
if args.stats:
print("Final merged po stats:")
utils.print_stats(dst_stats, prefix="\t")
return ret
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
sys.exit(main())

View File

@@ -0,0 +1,231 @@
#!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Preprocess right-to-left languages.
# You can use it either standalone, or through import_po_from_branches or
# update_trunk.
#
# Notes: This has been tested on Linux, not 100% it will work nicely on
# Windows or OsX.
# This uses ctypes, as there is no py3 binding for fribidi currently.
# This implies you only need the compiled C library to run it.
# Finally, note that it handles some formating/escape codes (like
# \", %s, %x12, %.4f, etc.), protecting them from ugly (evil) fribidi,
# which seems completely unaware of such things (as unicode is...).
import sys
import ctypes
import settings
import utils
FRIBIDI_LIB = settings.FRIBIDI_LIB
###### Import C library and recreate "defines". #####
fbd = ctypes.CDLL(FRIBIDI_LIB)
#define FRIBIDI_MASK_NEUTRAL 0x00000040L /* Is neutral */
FRIBIDI_PAR_ON = 0x00000040
#define FRIBIDI_FLAG_SHAPE_MIRRORING 0x00000001
#define FRIBIDI_FLAG_REORDER_NSM 0x00000002
#define FRIBIDI_FLAG_SHAPE_ARAB_PRES 0x00000100
#define FRIBIDI_FLAG_SHAPE_ARAB_LIGA 0x00000200
#define FRIBIDI_FLAG_SHAPE_ARAB_CONSOLE 0x00000400
#define FRIBIDI_FLAG_REMOVE_BIDI 0x00010000
#define FRIBIDI_FLAG_REMOVE_JOINING 0x00020000
#define FRIBIDI_FLAG_REMOVE_SPECIALS 0x00040000
#define FRIBIDI_FLAGS_DEFAULT ( \
# FRIBIDI_FLAG_SHAPE_MIRRORING | \
# FRIBIDI_FLAG_REORDER_NSM | \
# FRIBIDI_FLAG_REMOVE_SPECIALS )
#define FRIBIDI_FLAGS_ARABIC ( \
# FRIBIDI_FLAG_SHAPE_ARAB_PRES | \
# FRIBIDI_FLAG_SHAPE_ARAB_LIGA )
FRIBIDI_FLAG_SHAPE_MIRRORING = 0x00000001
FRIBIDI_FLAG_REORDER_NSM = 0x00000002
FRIBIDI_FLAG_REMOVE_SPECIALS = 0x00040000
FRIBIDI_FLAG_SHAPE_ARAB_PRES = 0x00000100
FRIBIDI_FLAG_SHAPE_ARAB_LIGA = 0x00000200
FRIBIDI_FLAGS_DEFAULT = FRIBIDI_FLAG_SHAPE_MIRRORING | \
FRIBIDI_FLAG_REORDER_NSM | \
FRIBIDI_FLAG_REMOVE_SPECIALS
FRIBIDI_FLAGS_ARABIC = FRIBIDI_FLAG_SHAPE_ARAB_PRES | \
FRIBIDI_FLAG_SHAPE_ARAB_LIGA
##### Kernel processing funcs. #####
def protect_format_seq(msg):
"""
Find some specific escaping/formating sequences (like \", %s, etc.,
and protect them from any modification!
"""
LRE = "\u202A"
PDF = "\u202C"
# Most likely incomplete, but seems to cover current needs.
format_codes = set("tslfd")
digits = set(".0123456789")
idx = 0
ret = []
ln = len(msg)
while idx < ln:
dlt = 1
# \" or \'
if idx < (ln - 1) and msg[idx] == '\\' and msg[idx + 1] in "\"\'":
dlt = 2
# %x12
elif idx < (ln - 2) and msg[idx] == '%' and msg[idx + 1] in "x" and \
msg[idx + 2] in digits:
dlt = 2
while (idx + dlt + 1) < ln and msg[idx + dlt + 1] in digits:
dlt += 1
# %.4f
elif idx < (ln - 3) and msg[idx] == '%' and msg[idx + 1] in digits:
dlt = 2
while (idx + dlt + 1) < ln and msg[idx + dlt + 1] in digits:
dlt += 1
if (idx + dlt + 1) < ln and msg[idx + dlt + 1] in format_codes:
dlt += 1
else:
dlt = 1
# %s
elif idx < (ln - 1) and msg[idx] == '%' and \
msg[idx + 1] in format_codes:
dlt = 2
if dlt > 1:
ret.append(LRE)
ret += msg[idx:idx + dlt]
idx += dlt
if dlt > 1:
ret.append(PDF)
return "".join(ret)
def log2vis(msgs):
"""
Globally mimics deprecated fribidi_log2vis.
msgs should be an iterable of messages to rtl-process.
"""
for msg in msgs:
msg = protect_format_seq(msg)
fbc_str = ctypes.create_unicode_buffer(msg)
ln = len(fbc_str) - 1
# print(fbc_str.value, ln)
btypes = (ctypes.c_int * ln)()
embed_lvl = (ctypes.c_uint8 * ln)()
pbase_dir = ctypes.c_int(FRIBIDI_PAR_ON)
jtypes = (ctypes.c_uint8 * ln)()
flags = FRIBIDI_FLAGS_DEFAULT | FRIBIDI_FLAGS_ARABIC
# Find out direction of each char.
fbd.fribidi_get_bidi_types(fbc_str, ln, ctypes.byref(btypes))
# print(*btypes)
fbd.fribidi_get_par_embedding_levels(btypes, ln,
ctypes.byref(pbase_dir),
embed_lvl)
# print(*embed_lvl)
# Joinings for arabic chars.
fbd.fribidi_get_joining_types(fbc_str, ln, jtypes)
# print(*jtypes)
fbd.fribidi_join_arabic(btypes, ln, embed_lvl, jtypes)
# print(*jtypes)
# Final Shaping!
fbd.fribidi_shape(flags, embed_lvl, ln, jtypes, fbc_str)
# print(fbc_str.value)
# print(*(ord(c) for c in fbc_str))
# And now, the reordering.
# Note that here, we expect a single line, so no need to do
# fancy things...
fbd.fribidi_reorder_line(flags, btypes, ln, 0, pbase_dir, embed_lvl,
fbc_str, None)
# print(fbc_str.value)
# print(*(ord(c) for c in fbc_str))
yield fbc_str.value
##### Command line stuff. #####
def main():
import argparse
parser = argparse.ArgumentParser(description="" \
"Preprocesses right-to-left languages.\n" \
"You can use it either standalone, or through " \
"import_po_from_branches or update_trunk.\n\n" \
"Note: This has been tested on Linux, not 100% it will " \
"work nicely on Windows or OsX.\n" \
"Note: This uses ctypes, as there is no py3 binding for " \
"fribidi currently. This implies you only need the " \
"compiled C library to run it.\n" \
"Note: It handles some formating/escape codes (like " \
"\\\", %s, %x12, %.4f, etc.), protecting them from ugly " \
"(evil) fribidi, which seems completely unaware of such " \
"things (as unicode is...).")
parser.add_argument('dst', metavar='dst.po',
help="The dest po into which write the " \
"pre-processed messages.")
parser.add_argument('src', metavar='src.po',
help="The po's to pre-process messages.")
args = parser.parse_args()
msgs, state, u1 = utils.parse_messages(args.src)
if state["is_broken"]:
print("Source po is BROKEN, aborting.")
return 1
keys = []
trans = []
for key, val in msgs.items():
keys.append(key)
trans.append("".join(val["msgstr_lines"]))
trans = log2vis(trans)
for key, trn in zip(keys, trans):
# Mono-line for now...
msgs[key]["msgstr_lines"] = [trn]
utils.write_messages(args.dst, msgs, state["comm_msg"], state["fuzzy_msg"])
print("RTL pre-process completed.")
return 0
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
sys.exit(main())

View File

@@ -0,0 +1,286 @@
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Global settings used by all scripts in this dir.
# XXX Before any use of the tools in this dir, please make a copy of this file
# named "setting.py"
# XXX This is a template, most values should be OK, but some youll have to
# edit (most probably, BLENDER_EXEC and SOURCE_DIR).
import os.path
###############################################################################
# MISC
###############################################################################
# The min level of completeness for a po file to be imported from /branches
# into /trunk, as a percentage. -1 means "import everything".
IMPORT_MIN_LEVEL = -1
# The comment prefix used in generated messages.txt file.
COMMENT_PREFIX = "#~ "
# The comment prefix used to mark sources of msgids, in po's.
COMMENT_PREFIX_SOURCE = "#: "
# The comment prefix used in generated messages.txt file.
CONTEXT_PREFIX = "MSGCTXT:"
# Default context.
CONTEXT_DEFAULT = ""
# Undocumented operator placeholder string.
UNDOC_OPS_STR = "(undocumented operator)"
# The gettext domain.
DOMAIN = "blender"
# Our own "gettext" stuff.
# File type (ext) to parse.
PYGETTEXT_ALLOWED_EXTS = {".c", ".cpp", ".cxx", ".hpp", ".hxx", ".h"}
# Where to search contexts definitions, relative to SOURCE_DIR (defined below).
PYGETTEXT_CONTEXTS_DEFSRC = os.path.join("source", "blender", "blenfont",
"BLF_translation.h")
# Regex to extract contexts defined in BLF_translation.h
# XXX Not full-proof, but should be enough here!
PYGETTEXT_CONTEXTS = "#define\\s+(BLF_I18NCONTEXT_[A-Z_0-9]+)\\s+\"([^\"]*)\""
# Keywords' regex.
# XXX Most unfortunately, we can't use named backreferences inside character sets,
# which makes the regexes even more twisty... :/
_str_base = (
# Match void string
"(?P<{_}1>[\"'])(?P={_}1)" # Get opening quote (' or "), and closing immediately.
"|"
# Or match non-void string
"(?P<{_}2>[\"'])" # Get opening quote (' or ").
"(?{capt}(?:"
# This one is for crazy things like "hi \\\\\" folks!"...
r"(?:(?!<\\)(?:\\\\)*\\(?=(?P={_}2)))|"
# The most common case.
".(?!(?P={_}2))"
")+.)" # Don't forget the last char!
"(?P={_}2)" # And closing quote.
)
str_clean_re = _str_base.format(_="g", capt="P<clean>")
# Here we have to consider two different cases (empty string and other).
_str_whole_re = (
_str_base.format(_="{_}1_", capt=":") +
# Optional loop start, this handles "split" strings...
"(?:(?<=[\"'])\\s*(?=[\"'])(?:"
+ _str_base.format(_="{_}2_", capt=":") +
# End of loop.
"))*"
)
_ctxt_re = r"(?P<ctxt_raw>(?:" + _str_whole_re.format(_="_ctxt") + r")|(?:[A-Z_0-9]+))"
_msg_re = r"(?P<msg_raw>" + _str_whole_re.format(_="_msg") + r")"
PYGETTEXT_KEYWORDS = (() +
tuple((r"{}\(\s*" + _msg_re + r"\s*\)").format(it)
for it in ("IFACE_", "TIP_", "N_")) +
tuple((r"{}\(\s*" + _ctxt_re + r"\s*,\s*"+ _msg_re + r"\s*\)").format(it)
for it in ("CTX_IFACE_", "CTX_TIP_", "CTX_N_"))
)
#GETTEXT_KEYWORDS = ("IFACE_", "CTX_IFACE_:1c,2", "TIP_", "CTX_TIP_:1c,2",
# "N_", "CTX_N_:1c,2")
# Should po parser warn when finding a first letter not capitalized?
WARN_MSGID_NOT_CAPITALIZED = True
# Strings that should not raise above warning!
WARN_MSGID_NOT_CAPITALIZED_ALLOWED = {
"", # Simplifies things... :p
"sin(x) / x",
"fBM",
"sqrt(x*x+y*y+z*z)",
"iTaSC",
"bItasc",
"px",
"mm",
"fStop",
"sRGB",
"iso-8859-15",
"utf-8",
"ascii",
"re",
"y",
"ac3",
"flac",
"mkv",
"mp2",
"mp3",
"ogg",
"wav",
"iTaSC parameters",
"vBVH",
"rv",
"en_US",
"fr_FR",
"it_IT",
"ru_RU",
"zh_CN",
"es",
"zh_TW",
"ar_EG",
"pt",
"bg_BG",
"ca_AD",
"hr_HR",
"cs_CZ",
"nl_NL",
"fi_FI",
"de_DE",
"el_GR",
"id_ID",
"ja_JP",
"ky_KG",
"ko_KR",
"ne_NP",
"fa_IR",
"pl_PL",
"ro_RO",
"sr_RS",
"sr_RS@latin",
"sv_SE",
"uk_UA",
"tr_TR",
"hu_HU",
"available with", # Is part of multi-line msg.
"virtual parents", # Is part of multi-line msg.
"description", # Addons' field. :/
"location", # Addons' field. :/
"author", # Addons' field. :/
"in memory to enable editing!", # Is part of multi-line msg.
"iScale",
"dx",
"p0",
"res",
}
###############################################################################
# PATHS
###############################################################################
# The tools path, should be OK.
TOOLS_DIR = os.path.join(os.path.dirname(__file__))
# The Python3 executable.Youll likely have to edit it in your user_settings.py
# if youre under Windows.
PYTHON3_EXEC = "python3"
# The Blender executable!
# This is just an example, youll most likely have to edit it in your
# user_settings.py!
BLENDER_EXEC = os.path.abspath(os.path.join(TOOLS_DIR, "..", "..", "..", "..",
"blender"))
# The xgettext tool. Youll likely have to edit it in your user_settings.py
# if youre under Windows.
GETTEXT_XGETTEXT_EXECUTABLE = "xgettext"
# The gettext msgmerge tool. Youll likely have to edit it in your
# user_settings.py if youre under Windows.
GETTEXT_MSGMERGE_EXECUTABLE = "msgmerge"
# The gettext msgfmt "compiler". Youll likely have to edit it in your
# user_settings.py if youre under Windows.
GETTEXT_MSGFMT_EXECUTABLE = "msgfmt"
# The svn binary... Youll likely have to edit it in your
# user_settings.py if youre under Windows.
SVN_EXECUTABLE = "svn"
# The FriBidi C compiled library (.so under Linux, .dll under windows...).
# Youll likely have to edit it in your user_settings.py if youre under
# Windows., e.g. using the included one:
# FRIBIDI_LIB = os.path.join(TOOLS_DIR, "libfribidi.dll")
FRIBIDI_LIB = "libfribidi.so.0"
# The name of the (currently empty) file that must be present in a po's
# directory to enable rtl-preprocess.
RTL_PREPROCESS_FILE = "is_rtl"
# The Blender source root path.
# This is just an example, youll most likely have to override it in your
# user_settings.py!
SOURCE_DIR = os.path.abspath(os.path.join(TOOLS_DIR, "..", "..", "..", "..",
"..", "..", "blender_msgs"))
# The bf-translation repository (you'll likely have to override this in your
# user_settings.py).
I18N_DIR = os.path.abspath(os.path.join(TOOLS_DIR, "..", "..", "..", "..",
"..", "..", "i18n"))
# The /branches path (overriden in bf-translation's i18n_override_settings.py).
BRANCHES_DIR = os.path.join(I18N_DIR, "branches")
# The /trunk path (overriden in bf-translation's i18n_override_settings.py).
TRUNK_DIR = os.path.join(I18N_DIR, "trunk")
# The /trunk/po path (overriden in bf-translation's i18n_override_settings.py).
TRUNK_PO_DIR = os.path.join(TRUNK_DIR, "po")
# The /trunk/mo path (overriden in bf-translation's i18n_override_settings.py).
TRUNK_MO_DIR = os.path.join(TRUNK_DIR, "locale")
# The file storing Blender-generated messages.
FILE_NAME_MESSAGES = os.path.join(TRUNK_PO_DIR, "messages.txt")
# The Blender source path to check for i18n macros.
POTFILES_SOURCE_DIR = os.path.join(SOURCE_DIR, "source")
# The "source" file storing which files should be processed by xgettext,
# used to create FILE_NAME_POTFILES
FILE_NAME_SRC_POTFILES = os.path.join(TRUNK_PO_DIR, "_POTFILES.in")
# The final (generated) file storing which files
# should be processed by xgettext.
FILE_NAME_POTFILES = os.path.join(TRUNK_PO_DIR, "POTFILES.in")
# The template messages file.
FILE_NAME_POT = os.path.join(TRUNK_PO_DIR, ".".join((DOMAIN, "pot")))
# Other py files that should be searched for ui strings, relative to SOURCE_DIR.
# Needed for Cycles, currently...
CUSTOM_PY_UI_FILES = [os.path.join("intern", "cycles", "blender",
"addon", "ui.py"),
]
# A cache storing validated msgids, to avoid re-spellchecking them.
SPELL_CACHE = os.path.join("/tmp", ".spell_cache")
# Custom override settings must be one dir above i18n tools itself!
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
try:
from i18n_override_settings import *
except ImportError: # If no i18n_override_settings available, its no error!
pass
# Override with custom user settings, if available.
try:
from user_settings import *
except ImportError: # If no user_settings available, its no error!
pass

View File

@@ -0,0 +1,503 @@
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import re
_valid_before = "(?<=[\\s*'\"`])|(?<=[a-zA-Z][/-])|(?<=^)"
_valid_after = "(?=[\\s'\"`.!?,;:])|(?=[/-]\\s*[a-zA-Z])|(?=$)"
_valid_words = "(?:{})(?:(?:[A-Z]+[a-z]*)|[A-Z]*|[a-z]*)(?:{})".format(_valid_before, _valid_after)
_reg = re.compile(_valid_words)
def split_words(text):
return [w for w in _reg.findall(text) if w]
# These must be all lower case for comparisons
dict_uimsgs = {
# OK words
"aren", # aren't
"betweens", # yuck! in-betweens!
"boolean", "booleans",
"decrement",
"derivate",
"doesn", # doesn't
"fader",
"hoc", # ad-hoc
"indices",
"iridas",
"isn", # isn't
"iterable",
"kyrgyz",
"latin",
"merchantability",
"mplayer",
"vertices",
# Merged words
"addon", "addons",
"antialiasing",
"arcsine", "arccosine", "arctangent",
"autoclip",
"autocomplete",
"autoname",
"autosave",
"autoscale",
"autosmooth",
"autosplit",
"backface", "backfacing",
"backimage",
"backscattered",
"bandnoise",
"bindcode",
"bitrate",
"blendin",
"bonesize",
"boundbox",
"boxpack",
"buffersize",
"builtin", "builtins",
"chunksize",
"de",
"defocus",
"denoise",
"despill", "despilling",
"filebrowser",
"filelist",
"filename", "filenames",
"filepath", "filepaths",
"forcefield", "forcefields",
"fulldome", "fulldomes",
"fullscreen",
"gridline",
"hemi",
"inscatter", "inscattering",
"lightless",
"lookup", "lookups",
"mathutils",
"midlevel",
"midground",
"mixdown",
"multi",
"multifractal",
"multires", "multiresolution",
"multisampling",
"multitexture",
"namespace",
"keyconfig",
"playhead",
"polyline",
"popup", "popups",
"pre",
"precalculate",
"prefetch",
"premultiply", "premultiplied",
"prepass",
"prepend",
"preprocess", "preprocessing",
"preseek",
"readonly",
"realtime",
"rekey",
"remesh",
"reprojection",
"resize",
"restpose",
"retarget", "retargets", "retargeting", "retargeted",
"ringnoise",
"rolloff",
"screencast", "screenshot", "screenshots",
"selfcollision",
"singletexture",
"startup",
"stateful",
"starfield",
"subflare", "subflares",
"subframe", "subframes",
"subclass", "subclasses", "subclassing",
"subdirectory", "subdirectories", "subdir", "subdirs",
"submodule", "submodules",
"subpath",
"subsize",
"substep", "substeps",
"targetless",
"textbox", "textboxes",
"tilemode",
"timestamp", "timestamps",
"timestep", "timesteps",
"un",
"unbake",
"uncomment",
"undeformed",
"undistort", "undistortion",
"ungroup",
"unhide",
"unindent",
"unkeyed",
"unpremultiply",
"unprojected",
"unreacted",
"unregister",
"unselected",
"unsubdivided",
"unshadowed",
"unspill",
"unstitchable",
"vectorscope",
"whitespace",
"worldspace",
"workflow",
# Neologisms, slangs
"affectable",
"automagic", "automagically",
"blobby",
"blockiness", "blocky",
"collider", "colliders",
"deformer", "deformers",
"determinator",
"editability",
"keyer",
"lacunarity",
"numerics",
"occluder",
"passepartout",
"perspectively",
"polygonization",
"selectability",
"slurph",
"stitchable",
"trackability",
"transmissivity",
"rasterized", "rasterization",
"renderer", "renderable", "renderability",
# Abbreviations
"aero",
"amb",
"anim",
"bool",
"calc",
"config", "configs",
"const",
"coord", "coords",
"degr",
"dof",
"dupli", "duplis",
"eg",
"esc",
"expr",
"fac",
"fra",
"frs",
"grless",
"http",
"init",
"kbit",
"lensdist",
"loc", "rot", "pos",
"lorem",
"luma",
"multicam",
"num",
"ok",
"orco",
"ortho",
"persp",
"pref", "prefs",
"prev",
"param",
"premul",
"quad", "quads",
"quat", "quats",
"recalc", "recalcs",
"refl",
"spec",
"struct", "structs",
"tex",
"tri", "tris",
"uv", "uvs", "uvw", "uw", "uvmap",
"vec",
"vert", "verts",
"vis",
"xyz", "xzy", "yxz", "yzx", "zxy", "zyx",
"xy", "xz", "yx", "yz", "zx", "zy",
# General computer/science terms
"boid", "boids",
"equisolid",
"euler", "eulers",
"hashable",
"intrinsics",
"isosurface",
"jitter", "jittering", "jittered",
"keymap", "keymaps",
"lambertian",
"laplacian",
"metadata",
"nand", "xnor",
"normals",
"numpad",
"octree",
"opengl",
"pulldown", "pulldowns",
"quantized",
"samplerate",
"scrollback",
"scrollbar",
"scroller",
"searchable",
"spacebar",
"tooltip", "tooltips",
"trackpad",
"unicode",
"viewport", "viewports",
"viscoelastic",
"wildcard", "wildcards",
# General computer graphics terms
"anaglyph",
"bezier", "beziers",
"bicubic",
"bilinear",
"blackpoint", "whitepoint",
"blinn",
"bokeh",
"catadioptric",
"centroid",
"chrominance",
"codec", "codecs",
"collada",
"compositing",
"crossfade",
"deinterlace",
"dropoff",
"eigenvectors",
"equirectangular",
"fisheye",
"framerate",
"gimbal",
"grayscale",
"icosphere",
"lightmap",
"lossless", "lossy",
"midtones",
"mipmap", "mipmaps", "mip",
"ngon", "ngons",
"nurb", "nurbs",
"perlin",
"phong",
"radiosity",
"raytrace", "raytracing", "raytraced",
"renderfarm",
"shader", "shaders",
"specular", "specularity",
"spillmap",
"sobel",
"tonemap",
"toon",
"timecode",
"voronoi",
"voxel", "voxels",
"wireframe",
"zmask",
"ztransp",
# Blender terms
"bbone",
"breakdowner",
"bspline",
"bweight",
"colorband",
"datablock", "datablocks",
"dopesheet",
"dupliface", "duplifaces",
"dupliframe", "dupliframes",
"dupliobject", "dupliob",
"dupligroup",
"duplivert",
"editbone",
"editmode",
"fcurve", "fcurves",
"fluidsim",
"frameserver",
"enum",
"keyframe", "keyframes", "keyframing", "keyframed",
"metaball", "metaballs",
"metaelement", "metaelements",
"metastrip", "metastrips",
"movieclip",
"nabla",
"navmesh",
"outliner",
"paintmap", "paintmaps",
"polygroup", "polygroups",
"poselib",
"pushpull",
"pyconstraint", "pyconstraints",
"shapekey", "shapekeys",
"shrinkfatten",
"shrinkwrap",
"softbody",
"stucci",
"sunsky",
"subsurf",
"texface",
"timeline", "timelines",
"tosphere",
"vcol", "vcols",
"vgroup", "vgroups",
"vinterlace",
"wetmap", "wetmaps",
"wpaint",
# Algorithm names
"beckmann",
"catmull",
"catrom",
"chebychev",
"kutta",
"lennard",
"minkowsky",
"minnaert",
"musgrave",
"nayar",
"netravali",
"oren",
"prewitt",
"runge",
"verlet",
"worley",
# Acronyms
"aa", "msaa",
"api",
"asc", "cdl",
"ascii",
"atrac",
"bw",
"ccd",
"cmd",
"cpus",
"ctrl",
"cw", "ccw",
"dev",
"djv",
"dpi",
"dvar",
"dx",
"fh",
"fov",
"fft",
"gfx",
"gl",
"glsl",
"gpl",
"gpu", "gpus",
"hc",
"hdr",
"hh", "mm", "ss", "ff", # hh:mm:ss:ff timecode
"hsv", "hsva",
"id",
"itu",
"lhs",
"lmb", "mmb", "rmb",
"mux",
"ndof",
"ppc",
"px",
"qmc",
"rgb", "rgba",
"rhs",
"rv",
"sdl",
"sl",
"smpte",
"svn",
"ui",
"unix",
"vbo", "vbos",
"ycc", "ycca",
"yuv", "yuva",
# Blender acronyms
"bge",
"bli",
"bvh",
"dbvt",
"dop", # BLI K-Dop BVH
"ik",
"nla",
"qbvh",
"rna",
"rvo",
"simd",
"sph",
"svbvh",
# CG acronyms
"ao",
"bsdf",
"ior",
"mocap",
# Files types/formats
"avi",
"attrac",
"autodesk",
"bmp",
"btx",
"cineon",
"dpx",
"dxf",
"eps",
"exr",
"fbx",
"ffmpeg",
"flac",
"gzip",
"ico",
"jpg", "jpeg",
"matroska",
"mdd",
"mkv",
"mpeg", "mjpeg",
"mtl",
"ogg",
"openjpeg",
"piz",
"png",
"po",
"quicktime",
"rle",
"sgi",
"stl",
"svg",
"targa", "tga",
"tiff",
"theora",
"vorbis",
"wav",
"xiph",
"xml",
"xna",
"xvid",
}

View File

@@ -0,0 +1,104 @@
#!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Update all branches:
# * Generate a temp messages.txt file.
# * Use it to generate a temp .pot file.
# * Use it to update all .pos in /branches.
import subprocess
import os
import sys
import tempfile
import settings
PY3 = settings.PYTHON3_EXEC
def main():
import argparse
parser = argparse.ArgumentParser(description="" \
"Update all branches:\n" \
"* Generate a temp messages.txt file.\n" \
"* Use it to generate a temp .pot file.\n" \
"* Use it to update all .pos in /branches.")
parser.add_argument('--pproc-contexts', action="store_true",
help="Pre-process pos to avoid having plenty of "
"fuzzy msgids just because a context was "
"added/changed!")
parser.add_argument('-c', '--no_checks', default=True,
action="store_false",
help="No checks over UI messages.")
parser.add_argument('-a', '--add', action="store_true",
help="Add missing pos (useful only when one or " \
"more languages are given!).")
parser.add_argument('langs', metavar='ISO_code', nargs='*',
help="Restrict processed languages to those.")
args = parser.parse_args()
ret = 0
# Generate a temp messages file.
dummy, msgfile = tempfile.mkstemp(suffix=".txt",
prefix="blender_messages_")
os.close(dummy)
cmd = (PY3, "./update_msg.py", "-o", msgfile)
t = subprocess.call(cmd)
if t:
ret = t
# Regenerate POTFILES.in.
# cmd = (PY3, "./update_potinput.py")
# t = subprocess.call(cmd)
# if t:
# ret = t
# Generate a temp pot file.
dummy, potfile = tempfile.mkstemp(suffix=".pot",
prefix="blender_pot_")
os.close(dummy)
cmd = [PY3, "./update_pot.py", "-i", msgfile, "-o", potfile]
if not args.no_checks:
cmd.append("-c")
t = subprocess.call(cmd)
if t:
ret = t
# Update branches po files.
cmd = [PY3, "./update_po.py", "-i", potfile]
if args.langs:
if args.add:
cmd.append("-a")
cmd += args.langs
if args.pproc_contexts:
cmd.append("--pproc-contexts")
t = subprocess.call(cmd)
if t:
ret = t
return ret
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
sys.exit(main())

View File

@@ -0,0 +1,91 @@
#!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Create or update mos under /trunk/locale/…
import subprocess
import os
import sys
import settings
import utils
GETTEXT_MSGFMT_EXECUTABLE = settings.GETTEXT_MSGFMT_EXECUTABLE
SOURCE_DIR = settings.SOURCE_DIR
TRUNK_MO_DIR = settings.TRUNK_MO_DIR
TRUNK_PO_DIR = settings.TRUNK_PO_DIR
DOMAIN = settings.DOMAIN
def process_po(po, lang):
mo_dir = os.path.join(TRUNK_MO_DIR, lang, "LC_MESSAGES")
# Create dirs if not existing!
os.makedirs(mo_dir, exist_ok = True)
# show stats
cmd = (GETTEXT_MSGFMT_EXECUTABLE,
"--statistics",
po,
"-o",
os.path.join(mo_dir, ".".join((DOMAIN, "mo"))),
)
print("Running ", " ".join(cmd))
ret = subprocess.call(cmd)
print("Finished.")
return ret
def main():
import argparse
parser = argparse.ArgumentParser(description="Create or update mos " \
"under {}.".format(TRUNK_MO_DIR))
parser.add_argument('langs', metavar='ISO_code', nargs='*',
help="Restrict processed languages to those.")
args = parser.parse_args()
ret = 0
if args.langs:
for lang in args.langs:
po = os.path.join(TRUNK_PO_DIR, ".".join((lang, "po")))
if os.path.exists(po):
t = process_po(po, lang)
if t:
ret = t
else:
for po in os.listdir(TRUNK_PO_DIR):
if po.endswith(".po") and not po.endswith("_raw.po"):
lang = os.path.basename(po)[:-3]
po = os.path.join(TRUNK_PO_DIR, po)
t = process_po(po, lang)
if t:
ret = t
return ret
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
sys.exit(main())

View File

@@ -0,0 +1,69 @@
#!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8-80 compliant>
# Write out messages.txt from Blender.
import os
import sys
import subprocess
import settings
BLENDER_ARGS = [
settings.BLENDER_EXEC,
"--background",
"--factory-startup",
"--python",
os.path.join(settings.TOOLS_DIR, "bl_process_msg.py"),
"--",
"-m",
]
def main():
import argparse
parser = argparse.ArgumentParser(description="Write out messages.txt " \
"from Blender.")
parser.add_argument('-c', '--no_checks', default=True,
action="store_false",
help="No checks over UI messages.")
parser.add_argument('-b', '--blender', help="Blender executable path.")
parser.add_argument('-o', '--output', help="Output messages file path.")
args = parser.parse_args()
if args.blender:
BLENDER_ARGS[0] = args.blender
if not args.no_checks:
BLENDER_ARGS.append("-c")
if args.output:
BLENDER_ARGS.append("-o")
BLENDER_ARGS.append(args.output)
ret = subprocess.call(BLENDER_ARGS)
return ret
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
ret = main()
if ret:
raise(Exception(ret))

View File

@@ -0,0 +1,166 @@
#!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Update pos in the branches from blender.pot in /trunk/po dir.
import subprocess
import os
import sys
from codecs import open
import shutil
import settings
import utils
GETTEXT_MSGMERGE_EXECUTABLE = settings.GETTEXT_MSGMERGE_EXECUTABLE
BRANCHES_DIR = settings.BRANCHES_DIR
TRUNK_PO_DIR = settings.TRUNK_PO_DIR
FILE_NAME_POT = settings.FILE_NAME_POT
def pproc_newcontext_po(po, pot_messages, pot_stats):
print("Adding new contexts to {}...".format(po))
messages, state, stats = utils.parse_messages(po)
known_ctxt = stats["contexts"]
print("Already known (present) context(s): {}".format(str(known_ctxt)))
new_ctxt = set()
added = 0
# Only use valid already translated messages!
allowed_keys = state["trans_msg"] - state["fuzzy_msg"] - state["comm_msg"]
for key in pot_messages.keys():
ctxt, msgid = key
if ctxt in known_ctxt:
continue
new_ctxt.add(ctxt)
for t_ctxt in known_ctxt:
# XXX The first match will win, this might not be optimal...
t_key = (t_ctxt, msgid)
if t_key in allowed_keys:
# Wrong comments (sources) will be removed by msgmerge...
messages[key] = messages[t_key]
messages[key]["msgctxt_lines"] = [ctxt]
added += 1
utils.write_messages(po, messages, state["comm_msg"], state["fuzzy_msg"])
print("Finished!\n {} new context(s) was/were added {}, adding {} new "
"messages.\n".format(len(new_ctxt), str(new_ctxt), added))
return 0
def process_po(po, lang):
# update po file
cmd = (GETTEXT_MSGMERGE_EXECUTABLE,
"--update",
"--no-wrap",
"--backup=none",
"--lang={}".format(lang),
po,
FILE_NAME_POT,
)
print("Updating {}...".format(po))
print("Running ", " ".join(cmd))
ret = subprocess.call(cmd)
print("Finished!\n")
return ret
def main():
import argparse
parser = argparse.ArgumentParser(description="Write out messages.txt "
"from Blender.")
parser.add_argument('-t', '--trunk', action="store_true",
help="Update pos in /trunk/po rather than /branches.")
parser.add_argument('-i', '--input', metavar="File",
help="Input pot file path.")
parser.add_argument('--pproc-contexts', action="store_true",
help="Pre-process pos to avoid having plenty of "
"fuzzy msgids just because a context was "
"added/changed!")
parser.add_argument('-a', '--add', action="store_true",
help="Add missing pos (useful only when one or "
"more languages are given!).")
parser.add_argument('langs', metavar='ISO_code', nargs='*',
help="Restrict processed languages to those.")
args = parser.parse_args()
if args.input:
global FILE_NAME_POT
FILE_NAME_POT = args.input
ret = 0
if args.pproc_contexts:
_ctxt_proc = pproc_newcontext_po
pot_messages, _a, pot_stats = utils.parse_messages(FILE_NAME_POT)
else:
_ctxt_proc = lambda a, b, c: 0
pot_messages, pot_stats = None, None
if args.langs:
for lang in args.langs:
if args.trunk:
dr = TRUNK_PO_DIR
po = os.path.join(dr, ".".join((lang, "po")))
else:
dr = os.path.join(BRANCHES_DIR, lang)
po = os.path.join(dr, ".".join((lang, "po")))
if args.add:
if not os.path.exists(dr):
os.makedirs(dr)
if not os.path.exists(po):
shutil.copy(FILE_NAME_POT, po)
if args.add or os.path.exists(po):
t = _ctxt_proc(po, pot_messages, pot_stats)
if t:
ret = t
t = process_po(po, lang)
if t:
ret = t
elif args.trunk:
for po in os.listdir(TRUNK_PO_DIR):
if po.endswith(".po"):
lang = os.path.basename(po)[:-3]
po = os.path.join(TRUNK_PO_DIR, po)
t = _ctxt_proc(po, pot_messages, pot_stats)
if t:
ret = t
t = process_po(po, lang)
if t:
ret = t
else:
for lang in os.listdir(BRANCHES_DIR):
po = os.path.join(BRANCHES_DIR, lang, ".".join((lang, "po")))
if os.path.exists(po):
t = _ctxt_proc(po, pot_messages, pot_stats)
if t:
ret = t
t = process_po(po, lang)
if t:
ret = t
return ret
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
sys.exit(main())

View File

@@ -0,0 +1,314 @@
#!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Update blender.pot file from messages.txt
import subprocess
import os
import sys
import re
#from codecs import open
import tempfile
import argparse
import time
import pickle
import settings
import utils
COMMENT_PREFIX = settings.COMMENT_PREFIX
COMMENT_PREFIX_SOURCE = settings.COMMENT_PREFIX_SOURCE
CONTEXT_PREFIX = settings.CONTEXT_PREFIX
FILE_NAME_MESSAGES = settings.FILE_NAME_MESSAGES
#FILE_NAME_POTFILES = settings.FILE_NAME_POTFILES
FILE_NAME_POT = settings.FILE_NAME_POT
SOURCE_DIR = settings.SOURCE_DIR
POTFILES_DIR = settings.POTFILES_SOURCE_DIR
SRC_POTFILES = settings.FILE_NAME_SRC_POTFILES
#GETTEXT_XGETTEXT_EXECUTABLE = settings.GETTEXT_XGETTEXT_EXECUTABLE
#GETTEXT_KEYWORDS = settings.GETTEXT_KEYWORDS
CONTEXT_DEFAULT = settings.CONTEXT_DEFAULT
PYGETTEXT_ALLOWED_EXTS = settings.PYGETTEXT_ALLOWED_EXTS
SVN_EXECUTABLE = settings.SVN_EXECUTABLE
WARN_NC = settings.WARN_MSGID_NOT_CAPITALIZED
NC_ALLOWED = settings.WARN_MSGID_NOT_CAPITALIZED_ALLOWED
SPELL_CACHE = settings.SPELL_CACHE
#def generate_valid_potfiles(final_potfiles):
# "Generates a temp potfiles.in with aboslute paths."
# with open(FILE_NAME_POTFILES, 'r', 'utf-8') as f, \
# open(final_potfiles, 'w', 'utf-8') as w:
# for line in f:
# line = utils.stripeol(line)
# if line:
# w.write("".join((os.path.join(SOURCE_DIR,
# os.path.normpath(line)), "\n")))
# Do this only once!
# Get contexts defined in blf.
CONTEXTS = {}
with open(os.path.join(SOURCE_DIR, settings.PYGETTEXT_CONTEXTS_DEFSRC)) as f:
reg = re.compile(settings.PYGETTEXT_CONTEXTS)
f = f.read()
# This regex is supposed to yield tuples
# (key=C_macro_name, value=C_string).
CONTEXTS = dict(m.groups() for m in reg.finditer(f))
# Build regexes to extract messages (with optinal contexts) from C source.
pygettexts = tuple(re.compile(r).search
for r in settings.PYGETTEXT_KEYWORDS)
_clean_str = re.compile(settings.str_clean_re).finditer
clean_str = lambda s: "".join(m.group("clean") for m in _clean_str(s))
def check_file(path, rel_path, messages):
with open(path, encoding="utf-8") as f:
f = f.read()
for srch in pygettexts:
m = srch(f)
line = pos =0
while m:
d = m.groupdict()
# Context.
ctxt = d.get("ctxt_raw")
if ctxt:
if ctxt in CONTEXTS:
ctxt = CONTEXTS[ctxt]
elif '"' in ctxt or "'" in ctxt:
ctxt = clean_str(ctxt)
else:
print("WARNING: raw context “{}” couldnt be resolved!"
"".format(ctxt))
ctxt = CONTEXT_DEFAULT
else:
ctxt = CONTEXT_DEFAULT
# Message.
msg = d.get("msg_raw")
if msg:
if '"' in msg or "'" in msg:
msg = clean_str(msg)
else:
print("WARNING: raw message “{}” couldnt be resolved!"
"".format(msg))
msg = ""
else:
msg = ""
# Line.
line += f[pos:m.start()].count('\n')
# And we are done for this item!
messages.setdefault((ctxt, msg), []).append(":".join((rel_path, str(line))))
pos = m.end()
line += f[m.start():pos].count('\n')
m = srch(f, pos)
def py_xgettext(messages):
with open(SRC_POTFILES) as src:
forbidden = set()
forced = set()
for l in src:
if l[0] == '-':
forbidden.add(l[1:].rstrip('\n'))
elif l[0] != '#':
forced.add(l.rstrip('\n'))
for root, dirs, files in os.walk(POTFILES_DIR):
if "/.svn" in root:
continue
for fname in files:
if os.path.splitext(fname)[1] not in PYGETTEXT_ALLOWED_EXTS:
continue
path = os.path.join(root, fname)
rel_path = os.path.relpath(path, SOURCE_DIR)
if rel_path in forbidden | forced:
continue
check_file(path, rel_path, messages)
for path in forced:
if os.path.exists(path):
check_file(os.path.join(SOURCE_DIR, path), path, messages)
# Spell checking!
import enchant
dict_spelling = enchant.Dict("en_US")
from spell_check_utils import (dict_uimsgs,
split_words,
)
_spell_checked = set()
def spell_check(txt, cache):
ret = []
if cache is not None and txt in cache:
return ret
for w in split_words(txt):
w_lower = w.lower()
if w_lower in dict_uimsgs | _spell_checked:
continue
if not dict_spelling.check(w):
ret.append("{}: suggestions are ({})"
.format(w, "'" + "', '".join(dict_spelling.suggest(w))
+ "'"))
else:
_spell_checked.add(w_lower)
if not ret:
if cache is not None:
cache.add(txt)
return ret
def get_svnrev():
cmd = [SVN_EXECUTABLE,
"info",
"--xml",
SOURCE_DIR,
]
xml = subprocess.check_output(cmd)
return re.search(b'revision="(\d+)"', xml).group(1)
def gen_empty_pot():
blender_rev = get_svnrev()
utctime = time.gmtime()
time_str = time.strftime("%Y-%m-%d %H:%M+0000", utctime)
year_str = time.strftime("%Y", utctime)
return utils.gen_empty_messages(blender_rev, time_str, year_str)
def merge_messages(msgs, states, messages, do_checks, spell_cache):
num_added = num_present = 0
for (context, msgid), srcs in messages.items():
if do_checks:
err = spell_check(msgid, spell_cache)
if err:
print("WARNING: spell check failed on “" + msgid + "”:")
print("\t\t" + "\n\t\t".join(err))
print("\tFrom:\n\t\t" + "\n\t\t".join(srcs))
# Escape some chars in msgid!
msgid = msgid.replace("\\", "\\\\")
msgid = msgid.replace("\"", "\\\"")
msgid = msgid.replace("\t", "\\t")
srcs = [COMMENT_PREFIX_SOURCE + s for s in srcs]
key = (context, msgid)
if key not in msgs:
msgs[key] = {"msgid_lines": [msgid],
"msgstr_lines": [""],
"comment_lines": srcs,
"msgctxt_lines": [context]}
num_added += 1
else:
# We need to merge comments!
msgs[key]["comment_lines"].extend(srcs)
num_present += 1
return num_added, num_present
def main():
parser = argparse.ArgumentParser(description="Update blender.pot file " \
"from messages.txt")
parser.add_argument('-w', '--warning', action="store_true",
help="Show warnings.")
parser.add_argument('-i', '--input', metavar="File",
help="Input messages file path.")
parser.add_argument('-o', '--output', metavar="File",
help="Output pot file path.")
args = parser.parse_args()
if args.input:
global FILE_NAME_MESSAGES
FILE_NAME_MESSAGES = args.input
if args.output:
global FILE_NAME_POT
FILE_NAME_POT = args.output
print("Running fake py gettext…")
# Not using any more xgettext, simpler to do it ourself!
messages = {}
py_xgettext(messages)
print("Finished, found {} messages.".format(len(messages)))
if SPELL_CACHE and os.path.exists(SPELL_CACHE):
with open(SPELL_CACHE, 'rb') as f:
spell_cache = pickle.load(f)
else:
spell_cache = set()
print(len(spell_cache))
print("Generating POT file {}".format(FILE_NAME_POT))
msgs, states = gen_empty_pot()
tot_messages, _a = merge_messages(msgs, states, messages,
True, spell_cache)
# add messages collected automatically from RNA
print("\tMerging RNA messages from {}".format(FILE_NAME_MESSAGES))
messages = {}
with open(FILE_NAME_MESSAGES, encoding="utf-8") as f:
srcs = []
context = ""
for line in f:
line = utils.stripeol(line)
if line.startswith(COMMENT_PREFIX):
srcs.append(line[len(COMMENT_PREFIX):].strip())
elif line.startswith(CONTEXT_PREFIX):
context = line[len(CONTEXT_PREFIX):].strip()
else:
key = (context, line)
messages[key] = srcs
srcs = []
context = ""
num_added, num_present = merge_messages(msgs, states, messages,
True, spell_cache)
tot_messages += num_added
print("\tMerged {} messages ({} were already present)."
"".format(num_added, num_present))
# Write back all messages into blender.pot.
utils.write_messages(FILE_NAME_POT, msgs, states["comm_msg"],
states["fuzzy_msg"])
print(len(spell_cache))
if SPELL_CACHE and spell_cache:
with open(SPELL_CACHE, 'wb') as f:
pickle.dump(spell_cache, f)
print("Finished, total: {} messages!".format(tot_messages - 1))
return 0
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
sys.exit(main())

View File

@@ -0,0 +1,132 @@
#!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Update trunk from branches:
# * Remove pos in trunk.
# * Copy pos from branches advanced enough.
# * Clean pos in trunk.
# * Compile pos in trunk in mos, keeping track of those failing.
# * Remove pos, mos (and their dirs) that failed to compile or
# are no more present in trunk.
import subprocess
import os
import sys
import shutil
import settings
TRUNK_PO_DIR = settings.TRUNK_PO_DIR
TRUNK_MO_DIR = settings.TRUNK_MO_DIR
PY3 = settings.PYTHON3_EXEC
def main():
import argparse
parser = argparse.ArgumentParser(description="" \
"Update trunk from branches:\n" \
"* Remove pos in trunk.\n" \
"* Copy pos from branches advanced enough.\n" \
"* Clean pos in trunk.\n" \
"* Compile pos in trunk in mos, keeping " \
"track of those failing.\n" \
"* Remove pos and mos (and their dirs) that " \
"failed to compile or are no more present in trunk.")
parser.add_argument('-t', '--threshold', type=int,
help="Import threshold, as a percentage.")
parser.add_argument('-p', '--po', action="store_false",
help="Do not remove failing pos.")
parser.add_argument('-m', '--mo', action="store_false",
help="Do not remove failing mos.")
parser.add_argument('langs', metavar='ISO_code', nargs='*',
help="Restrict processed languages to those.")
args = parser.parse_args()
ret = 0
failed = set()
# Remove pos in trunk.
for po in os.listdir(TRUNK_PO_DIR):
if po.endswith(".po"):
lang = os.path.basename(po)[:-3]
if args.langs and lang not in args.langs:
continue
po = os.path.join(TRUNK_PO_DIR, po)
os.remove(po)
# Copy pos from branches.
cmd = [PY3, "./import_po_from_branches.py", "-s"]
if args.threshold is not None:
cmd += ["-t", str(args.threshold)]
if args.langs:
cmd += args.langs
t = subprocess.call(cmd)
if t:
ret = t
# Add in failed all mos no more having relevant pos in trunk.
for lang in os.listdir(TRUNK_MO_DIR):
if lang == ".svn":
continue # !!!
if not os.path.exists(os.path.join(TRUNK_PO_DIR, ".".join((lang, "po")))):
failed.add(lang)
# Check and compile each po separatly, to keep track of those failing.
# XXX There should not be any failing at this stage, import step is
# supposed to have already filtered them out!
for po in os.listdir(TRUNK_PO_DIR):
if po.endswith(".po") and not po.endswith("_raw.po"):
lang = os.path.basename(po)[:-3]
if args.langs and lang not in args.langs:
continue
cmd = [PY3, "./clean_po.py", "-t", "-s", lang]
t = subprocess.call(cmd)
if t:
ret = t
failed.add(lang)
continue
cmd = [PY3, "./update_mo.py", lang]
t = subprocess.call(cmd)
if t:
ret = t
failed.add(lang)
# Remove failing pos, mos and related dirs.
for lang in failed:
print("Lang “{}” failed, removing it...".format(lang))
if args.po:
po = os.path.join(TRUNK_PO_DIR, ".".join((lang, "po")))
if os.path.exists(po):
os.remove(po)
if args.mo:
mo = os.path.join(TRUNK_MO_DIR, lang)
if os.path.exists(mo):
shutil.rmtree(mo)
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
sys.exit(main())

View File

@@ -0,0 +1,23 @@
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
import os
import settings

View File

@@ -0,0 +1,377 @@
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Some misc utilities...
import os
import sys
import collections
from codecs import open
import settings
COMMENT_PREFIX = settings.COMMENT_PREFIX
WARN_NC = settings.WARN_MSGID_NOT_CAPITALIZED
NC_ALLOWED = settings.WARN_MSGID_NOT_CAPITALIZED_ALLOWED
def stripeol(s):
return s.rstrip("\n\r")
# XXX For now, we assume that all messages > 30 chars are tooltips!
def is_tooltip(msgid):
return len(msgid) > 30
def parse_messages(fname):
"""
Returns a tupple (messages, states, stats).
messages is an odereddict of dicts
{(ctxt, msgid): {msgid_lines:, msgstr_lines:,
comment_lines:, msgctxt_lines:}}.
states is a dict of three sets of (msgid, ctxt), and a boolean flag
indicating the .po is somewhat broken
{trans_msg:, fuzzy_msg:, comm_msg:, is_broken:}.
stats is a dict of values
{tot_msg:, trans_msg:, tot_ttips:, trans_ttips:, comm_msg:,
nbr_signs:, nbr_trans_signs:, contexts: set()}.
Note: This function will silently "arrange" mis-formated entries, thus
using afterward write_messages() should always produce a po-valid file,
though not correct!
"""
tot_messages = 0
tot_tooltips = 0
trans_messages = 0
trans_tooltips = 0
comm_messages = 0
nbr_signs = 0
nbr_trans_signs = 0
contexts = set()
reading_msgid = False
reading_msgstr = False
reading_msgctxt = False
reading_comment = False
is_translated = False
is_fuzzy = False
is_commented = False
is_broken = False
msgid_lines = []
msgstr_lines = []
msgctxt_lines = []
comment_lines = []
messages = getattr(collections, 'OrderedDict', dict)()
translated_messages = set()
fuzzy_messages = set()
commented_messages = set()
def clean_vars():
nonlocal reading_msgid, reading_msgstr, reading_msgctxt, \
reading_comment, is_fuzzy, is_translated, is_commented, \
msgid_lines, msgstr_lines, msgctxt_lines, comment_lines
reading_msgid = reading_msgstr = reading_msgctxt = \
reading_comment = False
is_tooltip = is_fuzzy = is_translated = is_commented = False
msgid_lines = []
msgstr_lines = []
msgctxt_lines = []
comment_lines = []
def finalize_message():
nonlocal reading_msgid, reading_msgstr, reading_msgctxt, \
reading_comment, is_fuzzy, is_translated, is_commented, \
msgid_lines, msgstr_lines, msgctxt_lines, comment_lines, \
messages, translated_messages, fuzzy_messages, \
commented_messages, \
tot_messages, tot_tooltips, trans_messages, trans_tooltips, \
comm_messages, nbr_signs, nbr_trans_signs, contexts
msgid = "".join(msgid_lines)
msgctxt = "".join(msgctxt_lines)
msgkey = (msgctxt, msgid)
is_ttip = is_tooltip(msgid)
# Never allow overriding existing msgid/msgctxt pairs!
if msgkey in messages:
clean_vars()
return
nbr_signs += len(msgid)
if is_commented:
commented_messages.add(msgkey)
elif is_fuzzy:
fuzzy_messages.add(msgkey)
elif is_translated:
translated_messages.add(msgkey)
nbr_trans_signs += len("".join(msgstr_lines))
messages[msgkey] = {"msgid_lines" : msgid_lines,
"msgstr_lines" : msgstr_lines,
"comment_lines": comment_lines,
"msgctxt_lines": msgctxt_lines}
if is_commented:
comm_messages += 1
else:
tot_messages += 1
if is_ttip:
tot_tooltips += 1
if not is_fuzzy and is_translated:
trans_messages += 1
if is_ttip:
trans_tooltips += 1
if msgctxt not in contexts:
contexts.add(msgctxt)
clean_vars()
with open(fname, 'r', "utf-8") as f:
for line_nr, line in enumerate(f):
line = stripeol(line)
if line == "":
finalize_message()
elif line.startswith("msgctxt") or \
line.startswith("".join((COMMENT_PREFIX, "msgctxt"))):
reading_comment = False
reading_ctxt = True
if line.startswith(COMMENT_PREFIX):
is_commented = True
line = line[9+len(COMMENT_PREFIX):-1]
else:
line = line[9:-1]
msgctxt_lines.append(line)
elif line.startswith("msgid") or \
line.startswith("".join((COMMENT_PREFIX, "msgid"))):
reading_comment = False
reading_msgid = True
if line.startswith(COMMENT_PREFIX):
is_commented = True
line = line[7+len(COMMENT_PREFIX):-1]
else:
line = line[7:-1]
msgid_lines.append(line)
elif line.startswith("msgstr") or \
line.startswith("".join((COMMENT_PREFIX, "msgstr"))):
if not reading_msgid:
is_broken = True
else:
reading_msgid = False
reading_msgstr = True
if line.startswith(COMMENT_PREFIX):
line = line[8+len(COMMENT_PREFIX):-1]
if not is_commented:
is_broken = True
else:
line = line[8:-1]
if is_commented:
is_broken = True
msgstr_lines.append(line)
if line:
is_translated = True
elif line.startswith("#"):
if reading_msgid:
if is_commented:
msgid_lines.append(line[1+len(COMMENT_PREFIX):-1])
else:
msgid_lines.append(line)
is_broken = True
elif reading_msgstr:
if is_commented:
msgstr_lines.append(line[1+len(COMMENT_PREFIX):-1])
else:
msgstr_lines.append(line)
is_broken = True
else:
if line.startswith("#, fuzzy"):
is_fuzzy = True
else:
comment_lines.append(line)
reading_comment = True
else:
if reading_msgid:
msgid_lines.append(line[1:-1])
elif reading_msgstr:
line = line[1:-1]
msgstr_lines.append(line)
if not is_translated and line:
is_translated = True
else:
is_broken = True
# If no final empty line, last message is not finalized!
if reading_msgstr:
finalize_message()
return (messages,
{"trans_msg": translated_messages,
"fuzzy_msg": fuzzy_messages,
"comm_msg" : commented_messages,
"is_broken": is_broken},
{"tot_msg" : tot_messages,
"trans_msg" : trans_messages,
"tot_ttips" : tot_tooltips,
"trans_ttips" : trans_tooltips,
"comm_msg" : comm_messages,
"nbr_signs" : nbr_signs,
"nbr_trans_signs": nbr_trans_signs,
"contexts" : contexts})
def write_messages(fname, messages, commented, fuzzy):
"Write in fname file the content of messages (similar to parse_messages " \
"returned values). commented and fuzzy are two sets containing msgid. " \
"Returns the number of written messages."
num = 0
with open(fname, 'w', "utf-8") as f:
for msgkey, val in messages.items():
msgctxt, msgid = msgkey
f.write("\n".join(val["comment_lines"]))
# Only mark as fuzzy if msgstr is not empty!
if msgkey in fuzzy and "".join(val["msgstr_lines"]):
f.write("\n#, fuzzy")
if msgkey in commented:
if msgctxt:
f.write("\n{}msgctxt \"".format(COMMENT_PREFIX))
f.write("\"\n{}\"".format(COMMENT_PREFIX).join(
val["msgctxt_lines"]))
f.write("\"")
f.write("\n{}msgid \"".format(COMMENT_PREFIX))
f.write("\"\n{}\"".format(COMMENT_PREFIX).join(
val["msgid_lines"]))
f.write("\"\n{}msgstr \"".format(COMMENT_PREFIX))
f.write("\"\n{}\"".format(COMMENT_PREFIX).join(
val["msgstr_lines"]))
f.write("\"\n\n")
else:
if msgctxt:
f.write("\nmsgctxt \"")
f.write("\"\n\"".join(val["msgctxt_lines"]))
f.write("\"")
f.write("\nmsgid \"")
f.write("\"\n\"".join(val["msgid_lines"]))
f.write("\"\nmsgstr \"")
f.write("\"\n\"".join(val["msgstr_lines"]))
f.write("\"\n\n")
num += 1
return num
def gen_empty_messages(blender_rev, time_str, year_str):
"""Generate an empty messages & state data (only header if present!)."""
header_key = ("", "")
messages = getattr(collections, 'OrderedDict', dict)()
messages[header_key] = {
"msgid_lines": [""],
"msgctxt_lines": [],
"msgstr_lines": [
"Project-Id-Version: Blender r{}\\n"
"".format(blender_rev),
"Report-Msgid-Bugs-To: \\n",
"POT-Creation-Date: {}\\n"
"".format(time_str),
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n",
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n",
"Language-Team: LANGUAGE <LL@li.org>\\n",
"Language: \\n",
"MIME-Version: 1.0\\n",
"Content-Type: text/plain; charset=UTF-8\\n",
"Content-Transfer-Encoding: 8bit\\n"
],
"comment_lines": [
"# Blender's translation file (po format).",
"# Copyright (C) {} The Blender Foundation."
"".format(year_str),
"# This file is distributed under the same "
"# license as the Blender package.",
"# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.",
"#",
],
}
states = {"trans_msg": set(),
"fuzzy_msg": {header_key},
"comm_msg": set(),
"is_broken": False}
return messages, states
def print_stats(stats, glob_stats=None, prefix=""):
"""
Print out some stats about a po file.
glob_stats is for making global stats over several po's.
"""
tot_msgs = stats["tot_msg"]
trans_msgs = stats["trans_msg"]
tot_ttips = stats["tot_ttips"]
trans_ttips = stats["trans_ttips"]
comm_msgs = stats["comm_msg"]
nbr_signs = stats["nbr_signs"]
nbr_trans_signs = stats["nbr_trans_signs"]
contexts = stats["contexts"]
lvl = lvl_ttips = lvl_trans_ttips = lvl_ttips_in_trans = lvl_comm = 0.0
if tot_msgs > 0:
lvl = float(trans_msgs)/float(tot_msgs)
lvl_ttips = float(tot_ttips)/float(tot_msgs)
lvl_comm = float(comm_msgs)/float(tot_msgs+comm_msgs)
if tot_ttips > 0:
lvl_trans_ttips = float(trans_ttips)/float(tot_ttips)
if trans_msgs > 0:
lvl_ttips_in_trans = float(trans_ttips)/float(trans_msgs)
if glob_stats:
glob_stats["nbr"] += 1.0
glob_stats["lvl"] += lvl
glob_stats["lvl_ttips"] += lvl_ttips
glob_stats["lvl_trans_ttips"] += lvl_trans_ttips
glob_stats["lvl_ttips_in_trans"] += lvl_ttips_in_trans
glob_stats["lvl_comm"] += lvl_comm
glob_stats["nbr_trans_signs"] += nbr_trans_signs
if glob_stats["nbr_signs"] == 0:
glob_stats["nbr_signs"] = nbr_signs
glob_stats["contexts"] |= contexts
lines = ("",
"{:>6.1%} done! ({} translated messages over {}).\n"
"".format(lvl, trans_msgs, tot_msgs),
"{:>6.1%} of messages are tooltips ({} over {}).\n"
"".format(lvl_ttips, tot_ttips, tot_msgs),
"{:>6.1%} of tooltips are translated ({} over {}).\n"
"".format(lvl_trans_ttips, trans_ttips, tot_ttips),
"{:>6.1%} of translated messages are tooltips ({} over {}).\n"
"".format(lvl_ttips_in_trans, trans_ttips, trans_msgs),
"{:>6.1%} of messages are commented ({} over {}).\n"
"".format(lvl_comm, comm_msgs, comm_msgs+tot_msgs),
"This translation is currently made of {} signs.\n"
"".format(nbr_trans_signs))
print(prefix.join(lines))
return 0

View File

@@ -36,14 +36,14 @@ def read_blend_rend_chunk(path):
import struct
blendfile = open(path, 'rb')
blendfile = open(path, "rb")
head = blendfile.read(7)
if head[0:2] == b'\x1f\x8b': # gzip magic
import gzip
blendfile.close()
blendfile = gzip.open(path, 'rb')
blendfile = gzip.open(path, "rb")
head = blendfile.read(7)
if head != b'BLENDER':
@@ -80,7 +80,7 @@ def read_blend_rend_chunk(path):
scene_name = scene_name[:scene_name.index(b'\0')]
try:
scene_name = str(scene_name, 'utf8')
scene_name = str(scene_name, "utf8")
except TypeError:
pass

View File

@@ -63,11 +63,11 @@ def main():
#~ if "-d" in sys.argv: # Enable this to measure start up speed
if 0:
import cProfile
cProfile.run('import bpy; bpy.utils.load_scripts()', 'blender.prof')
cProfile.run("import bpy; bpy.utils.load_scripts()", "blender.prof")
import pstats
p = pstats.Stats('blender.prof')
p.sort_stats('cumulative').print_stats(100)
p = pstats.Stats("blender.prof")
p.sort_stats("cumulative").print_stats(100)
else:
utils.load_scripts()

View File

@@ -31,16 +31,16 @@ op_get_instance = ops_module.get_instance
class BPyOps(object):
'''
"""
Fake module like class.
bpy.ops
'''
"""
def __getattr__(self, module):
'''
"""
gets a bpy.ops submodule
'''
"""
if module.startswith('__'):
raise AttributeError(module)
return BPyOpsSubMod(module)
@@ -69,20 +69,20 @@ class BPyOps(object):
class BPyOpsSubMod(object):
'''
"""
Utility class to fake submodules.
eg. bpy.ops.object
'''
__keys__ = ('module',)
"""
__keys__ = ("module",)
def __init__(self, module):
self.module = module
def __getattr__(self, func):
'''
"""
gets a bpy.ops.submodule function
'''
"""
if func.startswith('__'):
raise AttributeError(func)
return BPyOpsSubModOp(self.module, func)
@@ -105,13 +105,13 @@ class BPyOpsSubMod(object):
class BPyOpsSubModOp(object):
'''
"""
Utility class to fake submodule operators.
eg. bpy.ops.object.somefunc
'''
"""
__keys__ = ('module', 'func')
__keys__ = ("module", "func")
def _get_doc(self):
return op_as_string(self.idname())
@@ -120,20 +120,28 @@ class BPyOpsSubModOp(object):
def _parse_args(args):
C_dict = None
C_exec = 'EXEC_DEFAULT'
C_undo = False
if len(args) == 0:
pass
elif len(args) == 1:
if type(args[0]) != str:
C_dict = args[0]
is_dict = is_exec = is_undo = False
for i, arg in enumerate(args):
if is_dict is False and isinstance(arg, dict):
if is_exec is True or is_undo is True:
raise ValueError("dict arg must come first")
C_dict = arg
is_dict = True
elif is_exec is False and isinstance(arg, str):
if is_undo is True:
raise ValueError("string arg must come before the boolean")
C_exec = arg
is_exec = True
elif is_undo is False and isinstance(arg, int):
C_undo = arg
is_undo = True
else:
C_exec = args[0]
elif len(args) == 2:
C_exec, C_dict = args
else:
raise ValueError("1 or 2 args execution context is supported")
raise ValueError("1-3 args execution context is supported")
return C_dict, C_exec
return C_dict, C_exec, C_undo
@staticmethod
def _scene_update(context):
@@ -152,7 +160,7 @@ class BPyOpsSubModOp(object):
self.func = func
def poll(self, *args):
C_dict, C_exec = BPyOpsSubModOp._parse_args(args)
C_dict, C_exec, C_undo = BPyOpsSubModOp._parse_args(args)
return op_poll(self.idname_py(), C_dict, C_exec)
def idname(self):
@@ -174,8 +182,8 @@ class BPyOpsSubModOp(object):
BPyOpsSubModOp._scene_update(context)
if args:
C_dict, C_exec = BPyOpsSubModOp._parse_args(args)
ret = op_call(self.idname_py(), C_dict, kw, C_exec)
C_dict, C_exec, C_undo = BPyOpsSubModOp._parse_args(args)
ret = op_call(self.idname_py(), C_dict, kw, C_exec, C_undo)
else:
ret = op_call(self.idname_py(), None, kw)

View File

@@ -20,7 +20,7 @@
"""
This module contains utility functions specific to blender but
not assosiated with blenders internal data.
not associated with blenders internal data.
"""
__all__ = (
@@ -253,8 +253,8 @@ _scripts = (_os.path.normpath(_scripts), )
def user_script_path():
prefs = _bpy.context.user_preferences
path = prefs.filepaths.script_directory
# returns the env var and falls back to userprefs
path = _user_resource('SCRIPTS')
if path:
path = _os.path.normpath(path)
@@ -281,7 +281,7 @@ def script_paths(subdir=None, user_pref=True, check_all=False):
prefs = _bpy.context.user_preferences
# add user scripts dir
user_script = prefs.filepaths.script_directory if user_pref else None
user_script = user_script_path()
if check_all:
# all possible paths
@@ -489,7 +489,7 @@ def keyconfig_set(filepath):
try:
keyfile = open(filepath)
exec(compile(keyfile.read(), filepath, 'exec'), {"__file__": filepath})
exec(compile(keyfile.read(), filepath, "exec"), {"__file__": filepath})
keyfile.close()
except:
import traceback

View File

@@ -19,7 +19,7 @@
# <pep8-80 compliant>
"""
Utility modules assosiated with the bpy module.
Utility modules associated with the bpy module.
"""
__all__ = (

View File

@@ -71,7 +71,7 @@ def load_image(imagepath,
def _image_load_placeholder(path):
name = bpy.path.basename(path)
if type(name) == bytes:
name = name.decode('utf-8', "replace")
name = name.decode("utf-8", "replace")
image = bpy.data.images.new(name, 128, 128)
# allow the path to be resolved later
image.filepath = path

View File

@@ -330,7 +330,7 @@ path_reference_mode = EnumProperty(
description="Method used to reference paths",
items=(('AUTO', "Auto", "Use Relative paths with subdirectories only"),
('ABSOLUTE', "Absolute", "Always write absolute paths"),
('RELATIVE', "Relative", "Always write relative patsh "
('RELATIVE', "Relative", "Always write relative paths "
"(where possible)"),
('MATCH', "Match", "Match Absolute/Relative "
"setting with input path"),

View File

@@ -171,7 +171,7 @@ def keyconfig_export(wm, kc, filepath):
# First add all user_modified keymaps (found in keyconfigs.user.keymaps list),
# then add all remaining keymaps from the currently active custom keyconfig.
#
# This will create a final list of keymaps that can be used as a 'diff' against
# This will create a final list of keymaps that can be used as a "diff" against
# the default blender keyconfig, recreating the current setup from a fresh blender
# without needing to export keymaps which haven't been edited.

View File

@@ -19,6 +19,7 @@
# <pep8-80 compliant>
__all__ = (
"mesh_linked_uv_islands",
"mesh_linked_tessfaces",
"edge_face_count_dict",
"edge_face_count",
@@ -29,6 +30,66 @@ __all__ = (
)
def mesh_linked_uv_islands(mesh):
"""
Splits the mesh into connected polygons, use this for seperating cubes from
other mesh elements within 1 mesh datablock.
:arg mesh: the mesh used to group with.
:type mesh: :class:`bpy.types.Mesh`
:return: lists of lists containing polygon indices
:rtype: list
"""
uv_loops = [luv.uv[:] for luv in mesh.uv_layers.active.data]
poly_loops = [poly.loop_indices for poly in mesh.polygons]
luv_hash = {}
luv_hash_get = luv_hash.get
luv_hash_ls = [None] * len(uv_loops)
for pi, poly_indices in enumerate(poly_loops):
for li in poly_indices:
uv = uv_loops[li]
uv_hub = luv_hash_get(uv)
if uv_hub is None:
uv_hub = luv_hash[uv] = [pi]
else:
uv_hub.append(pi)
luv_hash_ls[li] = uv_hub
poly_islands = []
# 0 = none, 1 = added, 2 = searched
poly_tag = [0] * len(poly_loops)
while True:
poly_index = -1
for i in range(len(poly_loops)):
if poly_tag[i] == 0:
poly_index = i
break
if poly_index != -1:
island = [poly_index]
poly_tag[poly_index] = 1
poly_islands.append(island)
else:
break # we're done
added = True
while added:
added = False
for poly_index in island[:]:
if poly_tag[poly_index] == 1:
for li in poly_loops[poly_index]:
for poly_index_shared in luv_hash_ls[li]:
if poly_tag[poly_index_shared] == 0:
added = True
poly_tag[poly_index_shared] = 1
island.append(poly_index_shared)
poly_tag[poly_index] = 2
return poly_islands
def mesh_linked_tessfaces(mesh):
"""
Splits the mesh into connected faces, use this for seperating cubes from
@@ -258,7 +319,7 @@ def edge_loops_from_edges(mesh, edges=None):
def ngon_tessellate(from_data, indices, fix_loops=True):
'''
"""
Takes a polyline of indices (fgon) and returns a list of face
indicie lists. Designed to be used for importers that need indices for an
fgon to create from existing verts.
@@ -268,7 +329,7 @@ def ngon_tessellate(from_data, indices, fix_loops=True):
to fill, and can be a subset of the data given.
fix_loops: If this is enabled polylines that use loops to make multiple
polylines are delt with correctly.
'''
"""
from mathutils.geometry import tessellate_polygon
from mathutils import Vector
@@ -291,9 +352,9 @@ def ngon_tessellate(from_data, indices, fix_loops=True):
return v1[1], v2[1]
if not fix_loops:
'''
"""
Normal single concave loop filling
'''
"""
if type(from_data) in {tuple, list}:
verts = [Vector(from_data[i]) for ii, i in enumerate(indices)]
else:
@@ -307,10 +368,10 @@ def ngon_tessellate(from_data, indices, fix_loops=True):
fill = tessellate_polygon([verts])
else:
'''
"""
Seperate this loop into multiple loops be finding edges that are
used twice. This is used by lightwave LWO files a lot
'''
"""
if type(from_data) in {tuple, list}:
verts = [vert_treplet(Vector(from_data[i]), ii)
@@ -414,7 +475,7 @@ def ngon_tessellate(from_data, indices, fix_loops=True):
fill = tessellate_polygon([[v[0] for v in loop] for loop in loop_list])
#draw_loops(loop_list)
#raise 'done loop'
#raise Exception("done loop")
# map to original indices
fill = [[vert_map[i] for i in reversed(f)] for f in fill]

View File

@@ -20,6 +20,7 @@
__all__ = (
"region_2d_to_vector_3d",
"region_2d_to_origin_3d",
"region_2d_to_location_3d",
"location_3d_to_region_2d",
)
@@ -58,6 +59,40 @@ def region_2d_to_vector_3d(region, rv3d, coord):
return viewinv.col[2].xyz.normalized()
def region_2d_to_origin_3d(region, rv3d, coord):
"""
Return the 3d view origin from the region relative 2d coords.
:arg region: region of the 3D viewport, typically bpy.context.region.
:type region: :class:`bpy.types.Region`
:arg rv3d: 3D region data, typically bpy.context.space_data.region_3d.
:type rv3d: :class:`bpy.types.RegionView3D`
:arg coord: 2d coordinates relative to the region;
(event.mouse_region_x, event.mouse_region_y) for example.
:type coord: 2d vector
:return: The origin of the viewpoint in 3d space.
:rtype: :class:`mathutils.Vector`
"""
from mathutils import Vector
viewinv = rv3d.view_matrix.inverted()
if rv3d.is_perspective:
from mathutils.geometry import intersect_line_plane
origin_start = viewinv.translation.copy()
else:
from mathutils.geometry import intersect_point_line
persmat = rv3d.perspective_matrix.copy()
dx = (2.0 * coord[0] / region.width) - 1.0
dy = (2.0 * coord[1] / region.height) - 1.0
persinv = persmat.inverted()
origin_start = ((persinv.col[0].xyz * dx) +
(persinv.col[1].xyz * dy) +
viewinv.translation)
return origin_start
def region_2d_to_location_3d(region, rv3d, coord, depth_location):
"""
Return a 3d location from the region relative 2d coords, aligned with
@@ -77,18 +112,16 @@ def region_2d_to_location_3d(region, rv3d, coord, depth_location):
:rtype: :class:`mathutils.Vector`
"""
from mathutils import Vector
from mathutils.geometry import intersect_point_line
persmat = rv3d.perspective_matrix.copy()
viewinv = rv3d.view_matrix.inverted()
coord_vec = region_2d_to_vector_3d(region, rv3d, coord)
depth_location = Vector(depth_location)
origin_start = region_2d_to_origin_3d(region, rv3d, coord)
origin_end = origin_start + coord_vec
if rv3d.is_perspective:
from mathutils.geometry import intersect_line_plane
origin_start = viewinv.translation.copy()
origin_end = origin_start + coord_vec
viewinv = rv3d.view_matrix.inverted()
view_vec = viewinv.col[2].copy()
return intersect_line_plane(origin_start,
origin_end,
@@ -96,14 +129,7 @@ def region_2d_to_location_3d(region, rv3d, coord, depth_location):
view_vec, 1,
)
else:
dx = (2.0 * coord[0] / region.width) - 1.0
dy = (2.0 * coord[1] / region.height) - 1.0
persinv = persmat.inverted()
viewinv = rv3d.view_matrix.inverted()
origin_start = ((persinv.col[0].xyz * dx) +
(persinv.col[1].xyz * dy) +
viewinv.translation)
origin_end = origin_start + coord_vec
from mathutils.geometry import intersect_point_line
return intersect_point_line(depth_location,
origin_start,
origin_end,

View File

@@ -73,7 +73,7 @@ def _call_recursive(context, base, py_node):
value = eval(value, {"context": _bpy.context})
setattr(base, py_node[TAG], value)
else:
value = py_node[ARGS]['value'] # have to have this
value = py_node[ARGS]["value"] # have to have this
setattr(base, py_node[TAG], value)
else:
args = _parse_rna_args(base, py_node)
@@ -85,10 +85,10 @@ def _call_recursive(context, base, py_node):
class BPyML_BaseUI():
'''
"""
This is a mix-in class that defines a draw function
which checks for draw_data
'''
"""
def draw(self, context):
layout = self.layout

View File

@@ -48,14 +48,14 @@ def replace_help(namespace):
def get_console(console_id):
'''
"""
helper function for console operators
currently each text data block gets its own
console - code.InteractiveConsole()
...which is stored in this function.
console_id can be any hashable type
'''
"""
from code import InteractiveConsole
consoles = getattr(get_console, "consoles", None)
@@ -96,7 +96,10 @@ def get_console(console_id):
namespace["__builtins__"] = sys.modules["builtins"]
namespace["bpy"] = bpy
# weak! - but highly convenient
namespace["C"] = bpy.context
namespace["D"] = bpy.data
replace_help(namespace)
@@ -305,6 +308,7 @@ def banner(context):
'OUTPUT')
add_scrollback("Convenience Imports: from mathutils import *; "
"from math import *", 'OUTPUT')
add_scrollback("Convenience Variables: C = bpy.context, D = bpy.data", 'OUTPUT')
add_scrollback("", 'OUTPUT')
sc.prompt = PROMPT

View File

@@ -20,7 +20,7 @@
import os
import bpy
language_id = 'shell'
language_id = "shell"
def add_scrollback(text, text_type):

View File

@@ -172,7 +172,7 @@ def graph_armature(obj, filepath, FAKE_PARENT=True, CONSTRAINTS=True, DRIVERS=Tr
fileobject.close()
'''
print(".", end='')
print(".", end="")
import sys
sys.stdout.flush()
'''

View File

@@ -66,9 +66,9 @@ def rna_id_ignore(rna_id):
def range_str(val):
if val < -10000000:
return '-inf'
return "-inf"
elif val > 10000000:
return 'inf'
return "inf"
elif type(val) == float:
return '%g' % val
else:
@@ -305,8 +305,8 @@ class InfoPropertyRNA:
return type_str
def __str__(self):
txt = ''
txt += ' * ' + self.identifier + ': ' + self.description
txt = ""
txt += " * " + self.identifier + ": " + self.description
return txt
@@ -398,7 +398,7 @@ class InfoOperatorRNA:
return None, None
def _GetInfoRNA(bl_rna, cls, parent_id=''):
def _GetInfoRNA(bl_rna, cls, parent_id=""):
if bl_rna is None:
return None
@@ -437,9 +437,9 @@ def BuildRNAInfo():
# rna_functions_dict = {} # store all functions directly in this type (not inherited)
def full_rna_struct_path(rna_struct):
'''
"""
Needed when referencing one struct from another
'''
"""
nested = rna_struct.nested
if nested:
return "%s.%s" % (full_rna_struct_path(nested), rna_struct.identifier)
@@ -641,7 +641,7 @@ if __name__ == "__main__":
props = [(prop.identifier, prop) for prop in v.properties]
for prop_id, prop in sorted(props):
# if prop.type == 'boolean':
# if prop.type == "boolean":
# continue
prop_type = prop.type
if prop.array_length > 0:

View File

@@ -352,7 +352,7 @@ def xml_file_run(context, filepath, rna_map):
def xml_file_write(context, filepath, rna_map):
file = open(filepath, 'w', encoding='utf-8')
file = open(filepath, "w", encoding="utf-8")
fw = file.write
fw("<bpy>\n")

View File

@@ -27,7 +27,7 @@ import sys
def cutPoint(text, length):
"Returns position of the last space found before 'length' chars"
"""Returns position of the last space found before 'length' chars"""
l = length
c = text[l]
while c != ' ':
@@ -98,7 +98,7 @@ def write_sysinfo(op):
output.write(lilies)
ffmpeg = bpy.app.ffmpeg
if ffmpeg.supported:
for lib in ['avcodec', 'avdevice', 'avformat', 'avutil', 'swscale']:
for lib in ("avcodec", "avdevice", "avformat", "avutil", "swscale"):
output.write("%r:%r%r\n" % (lib, " " * (10 - len(lib)),
getattr(ffmpeg, lib + "_version_string")))
else:

View File

@@ -2,7 +2,7 @@
<Theme>
<view_3d>
<ThemeView3D object_active="#ff8c19"
editmesh_active="#ffffff80"
editmesh_active="#ff020080"
act_spline="#db2512"
handle_align="#803060"
handle_sel_align="#f090a0"
@@ -461,7 +461,7 @@
<ThemeTextEditor cursor="#ff0000"
syntax_special="#969629"
line_numbers_background="#191919"
selected_text="#ffffff"
selected_text="#202020"
syntax_builtin="#cf3d99"
syntax_comment="#249d60"
syntax_numbers="#3c68ff"

View File

@@ -1,10 +1,16 @@
import bpy
op = bpy.context.active_operator
op.apply_modifiers = True
op.export_mesh_type = 0
op.export_mesh_type_selection = 'view'
op.selected = True
op.include_children = False
op.include_armatures = True
op.deform_bones_only = True
op.active_uv_only = True
op.include_uv_textures = True
op.use_texture_copies = True
op.use_object_instantiation = False
op.sort_by_name = True
op.second_life = True

View File

@@ -0,0 +1,16 @@
import bpy
op = bpy.context.active_operator
op.apply_modifiers = True
op.export_mesh_type = 0
op.export_mesh_type_selection = 'view'
op.selected = True
op.include_children = False
op.include_armatures = False
op.deform_bones_only = False
op.active_uv_only = True
op.include_uv_textures = True
op.use_texture_copies = True
op.use_object_instantiation = False
op.sort_by_name = True
op.second_life = False

View File

@@ -84,7 +84,7 @@ def add_torus(major_rad, minor_rad, major_seg, minor_seg):
class AddTorus(Operator, object_utils.AddObjectHelper):
'''Add a torus mesh'''
"""Add a torus mesh"""
bl_idname = "mesh.primitive_torus_add"
bl_label = "Add Torus"
bl_options = {'REGISTER', 'UNDO', 'PRESET'}

View File

@@ -227,7 +227,7 @@ class BakeAction(Operator):
self.frame_start = scene.frame_start
self.frame_end = scene.frame_end
self.bake_types = {'POSE'} if context.mode == 'POSE' else {'OBJECT'}
wm = context.window_manager
return wm.invoke_props_dialog(self)

View File

@@ -77,7 +77,7 @@ class ConsoleBanner(Operator):
# default to python
if not sc.language:
sc.language = 'python'
sc.language = "python"
module = _lang_module_get(sc)
banner = getattr(module, "banner", None)

View File

@@ -24,7 +24,7 @@ from bpy.props import StringProperty
class EditExternally(Operator):
'''Edit image in an external application'''
"""Edit image in an external application"""
bl_idname = "image.external_edit"
bl_label = "Image Edit Externally"
bl_options = {'REGISTER'}

View File

@@ -25,7 +25,7 @@ from bpy.props import EnumProperty
class MeshMirrorUV(Operator):
'''Copy mirror UV coordinates on the X axis based on a mirrored mesh'''
"""Copy mirror UV coordinates on the X axis based on a mirrored mesh"""
bl_idname = "mesh.faces_mirror_uv"
bl_label = "Copy Mirrored UV coords"
bl_options = {'REGISTER', 'UNDO'}
@@ -58,12 +58,9 @@ class MeshMirrorUV(Operator):
vcos = (v.co.to_tuple(5) for v in mesh.vertices)
for i, co in enumerate(vcos):
if co[0] > 0.0:
mirror_gt[co] = i
elif co[0] < 0.0:
mirror_lt[co] = i
else:
if co[0] >= 0.0:
mirror_gt[co] = i
if co[0] <= 0.0:
mirror_lt[co] = i
#for i, v in enumerate(mesh.vertices):
@@ -97,14 +94,13 @@ class MeshMirrorUV(Operator):
puvsel[i] = (False not in
(uv.select for uv in uv_loops[lstart:lend]))
# Vert idx of the poly.
vidxs[i] = tuple(sorted(l.vertex_index
for l in loops[lstart:lend]))
vidxs[i] = tuple(l.vertex_index for l in loops[lstart:lend])
# As we have no poly.center yet...
pcents[i] = tuple(map(lambda x: x / p.loop_total,
map(sum, zip(*(verts[idx].co
for idx in vidxs[i])))))
# Preparing next step finding matching polys.
mirror_pm[vidxs[i]] = i
mirror_pm[tuple(sorted(vidxs[i]))] = i
for i in range(nbr_polys):
# Find matching mirror poly.

View File

@@ -27,14 +27,15 @@ from bpy.props import (StringProperty,
class SelectPattern(Operator):
'''Select objects matching a naming pattern'''
"""Select objects matching a naming pattern"""
bl_idname = "object.select_pattern"
bl_label = "Select Pattern"
bl_options = {'REGISTER', 'UNDO'}
pattern = StringProperty(
name="Pattern",
description="Name filter using '*', '?' and '[abc]' unix style wildcards",
description="Name filter using '*', '?' and "
"'[abc]' unix style wildcards",
maxlen=64,
default="*",
)
@@ -104,7 +105,7 @@ class SelectPattern(Operator):
class SelectCamera(Operator):
'''Select the active camera'''
"""Select the active camera"""
bl_idname = "object.select_camera"
bl_label = "Select Camera"
bl_options = {'REGISTER', 'UNDO'}
@@ -130,8 +131,8 @@ class SelectCamera(Operator):
class SelectHierarchy(Operator):
'''Select object relative to the active object's position ''' \
'''in the hierarchy'''
"""Select object relative to the active object's position """ \
"""in the hierarchy"""
bl_idname = "object.select_hierarchy"
bl_label = "Select Hierarchy"
bl_options = {'REGISTER', 'UNDO'}
@@ -197,7 +198,7 @@ class SelectHierarchy(Operator):
class SubdivisionSet(Operator):
'''Sets a Subdivision Surface Level (1-5)'''
"""Sets a Subdivision Surface Level (1-5)"""
bl_idname = "object.subdivision_set"
bl_label = "Subdivision Set"
@@ -277,8 +278,8 @@ class SubdivisionSet(Operator):
class ShapeTransfer(Operator):
'''Copy another selected objects active shape to this one by ''' \
'''applying the relative offsets'''
"""Copy another selected objects active shape to this one by """ \
"""applying the relative offsets"""
bl_idname = "object.shape_key_transfer"
bl_label = "Transfer Shape Key"
@@ -467,7 +468,7 @@ class ShapeTransfer(Operator):
class JoinUVs(Operator):
'''Copy UV Layout to objects with matching geometry'''
"""Copy UV Layout to objects with matching geometry"""
bl_idname = "object.join_uvs"
bl_label = "Join as UVs"
@@ -546,7 +547,7 @@ class JoinUVs(Operator):
class MakeDupliFace(Operator):
'''Make linked objects into dupli-faces'''
"""Make linked objects into dupli-faces"""
bl_idname = "object.make_dupli_face"
bl_label = "Make Dupli-Face"
@@ -641,7 +642,7 @@ class IsolateTypeRender(Operator):
class ClearAllRestrictRender(Operator):
'''Reveal all render objects by setting the hide render flag'''
"""Reveal all render objects by setting the hide render flag"""
bl_idname = "object.hide_render_clear_all"
bl_label = "Clear All Restrict Render"
bl_options = {'REGISTER', 'UNDO'}
@@ -653,7 +654,7 @@ class ClearAllRestrictRender(Operator):
class TransformsToDeltasAnim(Operator):
'''Convert object animation for normal transforms to delta transforms'''
"""Convert object animation for normal transforms to delta transforms"""
bl_idname = "object.anim_transforms_to_deltas"
bl_label = "Animated Transforms to Deltas"
bl_options = {'REGISTER', 'UNDO'}
@@ -699,7 +700,7 @@ class TransformsToDeltasAnim(Operator):
class DupliOffsetFromCursor(Operator):
'''Set offset used for DupliGroup based on cursor position'''
"""Set offset used for DupliGroup based on cursor position"""
bl_idname = "object.dupli_offset_from_cursor"
bl_label = "Set Offset From Cursor"
bl_options = {'REGISTER', 'UNDO'}

View File

@@ -341,7 +341,7 @@ from bpy.props import EnumProperty, BoolProperty
class AlignObjects(Operator):
'''Align Objects'''
"""Align Objects"""
bl_idname = "object.align"
bl_label = "Align Objects"
bl_options = {'REGISTER', 'UNDO'}

View File

@@ -95,7 +95,7 @@ from bpy.props import (IntProperty,
class RandomizeLocRotSize(Operator):
'''Randomize objects loc/rot/scale'''
"""Randomize objects loc/rot/scale"""
bl_idname = "object.randomize_transform"
bl_label = "Randomize Transform"
bl_options = {'REGISTER', 'UNDO'}

View File

@@ -16,7 +16,7 @@
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
# <pep8 compliant>
import bpy
from bpy.types import Menu, Operator
@@ -24,10 +24,10 @@ from bpy.props import StringProperty, BoolProperty
class AddPresetBase():
'''Base preset class, only for subclassing
"""Base preset class, only for subclassing
subclasses must define
- preset_values
- preset_subdir '''
- preset_subdir """
# bl_idname = "script.preset_base_add"
# bl_label = "Add a Python Preset"
bl_options = {'REGISTER'} # only because invoke_props_popup requires.
@@ -179,7 +179,7 @@ class AddPresetBase():
class ExecutePreset(Operator):
'''Execute a preset'''
"""Execute a preset"""
bl_idname = "script.execute_preset"
bl_label = "Execute a Python Preset"
@@ -217,7 +217,7 @@ class ExecutePreset(Operator):
class AddPresetRender(AddPresetBase, Operator):
'''Add a Render Preset'''
"""Add a Render Preset"""
bl_idname = "render.preset_add"
bl_label = "Add Render Preset"
preset_menu = "RENDER_MT_presets"
@@ -243,7 +243,7 @@ class AddPresetRender(AddPresetBase, Operator):
class AddPresetCamera(AddPresetBase, Operator):
'''Add a Camera Preset'''
"""Add a Camera Preset"""
bl_idname = "camera.preset_add"
bl_label = "Add Camera Preset"
preset_menu = "CAMERA_MT_presets"
@@ -262,7 +262,7 @@ class AddPresetCamera(AddPresetBase, Operator):
class AddPresetSSS(AddPresetBase, Operator):
'''Add a Subsurface Scattering Preset'''
"""Add a Subsurface Scattering Preset"""
bl_idname = "material.sss_preset_add"
bl_label = "Add SSS Preset"
preset_menu = "MATERIAL_MT_sss_presets"
@@ -290,7 +290,7 @@ class AddPresetSSS(AddPresetBase, Operator):
class AddPresetCloth(AddPresetBase, Operator):
'''Add a Cloth Preset'''
"""Add a Cloth Preset"""
bl_idname = "cloth.preset_add"
bl_label = "Add Cloth Preset"
preset_menu = "CLOTH_MT_presets"
@@ -312,7 +312,7 @@ class AddPresetCloth(AddPresetBase, Operator):
class AddPresetFluid(AddPresetBase, Operator):
'''Add a Fluid Preset'''
"""Add a Fluid Preset"""
bl_idname = "fluid.preset_add"
bl_label = "Add Fluid Preset"
preset_menu = "FLUID_MT_presets"
@@ -330,7 +330,7 @@ class AddPresetFluid(AddPresetBase, Operator):
class AddPresetSunSky(AddPresetBase, Operator):
'''Add a Sky & Atmosphere Preset'''
"""Add a Sky & Atmosphere Preset"""
bl_idname = "lamp.sunsky_preset_add"
bl_label = "Add Sunsky Preset"
preset_menu = "LAMP_MT_sunsky_presets"
@@ -359,7 +359,7 @@ class AddPresetSunSky(AddPresetBase, Operator):
class AddPresetInteraction(AddPresetBase, Operator):
'''Add an Application Interaction Preset'''
"""Add an Application Interaction Preset"""
bl_idname = "wm.interaction_preset_add"
bl_label = "Add Interaction Preset"
preset_menu = "USERPREF_MT_interaction_presets"
@@ -385,7 +385,7 @@ class AddPresetInteraction(AddPresetBase, Operator):
class AddPresetTrackingCamera(AddPresetBase, Operator):
'''Add a Tracking Camera Intrinsics Preset'''
"""Add a Tracking Camera Intrinsics Preset"""
bl_idname = "clip.camera_preset_add"
bl_label = "Add Camera Preset"
preset_menu = "CLIP_MT_camera_presets"
@@ -408,7 +408,7 @@ class AddPresetTrackingCamera(AddPresetBase, Operator):
class AddPresetTrackingTrackColor(AddPresetBase, Operator):
'''Add a Clip Track Color Preset'''
"""Add a Clip Track Color Preset"""
bl_idname = "clip.track_color_preset_add"
bl_label = "Add Track Color Preset"
preset_menu = "CLIP_MT_track_color_presets"
@@ -426,7 +426,7 @@ class AddPresetTrackingTrackColor(AddPresetBase, Operator):
class AddPresetTrackingSettings(AddPresetBase, Operator):
'''Add a motion tracking settings preset'''
"""Add a motion tracking settings preset"""
bl_idname = "clip.tracking_settings_preset_add"
bl_label = "Add Tracking Settings Preset"
preset_menu = "CLIP_MT_tracking_settings_presets"
@@ -453,7 +453,7 @@ class AddPresetTrackingSettings(AddPresetBase, Operator):
class AddPresetNodeColor(AddPresetBase, Operator):
'''Add a Node Color Preset'''
"""Add a Node Color Preset"""
bl_idname = "node.node_color_preset_add"
bl_label = "Add Node Color Preset"
preset_menu = "NODE_MT_node_color_presets"
@@ -471,7 +471,7 @@ class AddPresetNodeColor(AddPresetBase, Operator):
class AddPresetInterfaceTheme(AddPresetBase, Operator):
'''Add a theme preset'''
"""Add a theme preset"""
bl_idname = "wm.interface_theme_preset_add"
bl_label = "Add Tracking Settings Preset"
preset_menu = "USERPREF_MT_interface_theme_presets"
@@ -479,7 +479,7 @@ class AddPresetInterfaceTheme(AddPresetBase, Operator):
class AddPresetKeyconfig(AddPresetBase, Operator):
'''Add a Key-config Preset'''
"""Add a Key-config Preset"""
bl_idname = "wm.keyconfig_preset_add"
bl_label = "Add Keyconfig Preset"
preset_menu = "USERPREF_MT_keyconfigs"
@@ -502,7 +502,7 @@ class AddPresetKeyconfig(AddPresetBase, Operator):
class AddPresetOperator(AddPresetBase, Operator):
'''Add an Application Interaction Preset'''
"""Add an Application Interaction Preset"""
bl_idname = "wm.operator_preset_add"
bl_label = "Operator Preset"
preset_menu = "WM_MT_operator_presets"

View File

@@ -66,7 +66,7 @@ def guess_player_path(preset):
class PlayRenderedAnim(Operator):
'''Play back rendered frames/movies using an external player'''
"""Play back rendered frames/movies using an external player"""
bl_idname = "render.play_rendered_anim"
bl_label = "Play Rendered Animation"
bl_options = {'REGISTER'}

View File

@@ -25,7 +25,7 @@ from bpy.props import IntProperty
class SequencerCrossfadeSounds(Operator):
'''Do cross-fading volume animation of two selected sound strips'''
"""Do cross-fading volume animation of two selected sound strips"""
bl_idname = "sequencer.crossfade_sounds"
bl_label = "Crossfade sounds"
@@ -76,7 +76,7 @@ class SequencerCrossfadeSounds(Operator):
class SequencerCutMulticam(Operator):
'''Cut multi-cam strip and select camera'''
"""Cut multi-cam strip and select camera"""
bl_idname = "sequencer.cut_multicam"
bl_label = "Cut multicam"
@@ -118,7 +118,7 @@ class SequencerCutMulticam(Operator):
class SequencerDeinterlaceSelectedMovies(Operator):
'''Deinterlace all selected movie sources'''
"""Deinterlace all selected movie sources"""
bl_idname = "sequencer.deinterlace_selected_movies"
bl_label = "Deinterlace Movies"

View File

@@ -46,11 +46,11 @@ def extend(obj, operator, EXTEND_MODE):
OTHER_INDEX = 2, 3, 0, 1
def extend_uvs(face_source, face_target, edge_key):
'''
"""
Takes 2 faces,
Projects its extends its UV coords onto the face next to it.
Both faces must share an edge
'''
"""
def face_edge_vs(vi):
vlen = len(vi)
@@ -224,7 +224,7 @@ def main(context, operator):
class FollowActiveQuads(Operator):
'''Follow UVs from active quads along continuous face loops'''
"""Follow UVs from active quads along continuous face loops"""
bl_idname = "uv.follow_active_quads"
bl_label = "Follow Active Quads"
bl_options = {'REGISTER', 'UNDO'}

View File

@@ -197,12 +197,12 @@ def lightmap_uvpack(meshes,
PREF_BOX_DIV=8,
PREF_MARGIN_DIV=512
):
'''
"""
BOX_DIV if the maximum division of the UV map that
a box may be consolidated into.
Basically, a lower value will be slower but waist less space
and a higher value will have more clumpy boxes but more wasted space
'''
"""
import time
from math import sqrt
@@ -545,7 +545,7 @@ from bpy.props import BoolProperty, FloatProperty, IntProperty
class LightMapPack(Operator):
'''Follow UVs from active quads along continuous face loops'''
"""Follow UVs from active quads along continuous face loops"""
bl_idname = "uv.lightmap_pack"
bl_label = "Lightmap Pack"

View File

@@ -107,7 +107,6 @@ def boundsEdgeLoop(edges):
# print len(faces), minx, maxx, miny , maxy
for ed in edges:
for pt in ed:
print 'ass'
x= pt[0]
y= pt[1]
if x<minx: x= minx
@@ -493,7 +492,7 @@ def mergeUvIslands(islandList):
pass
if Intersect == 2: # Source inside target
'''
"""
We have an intersection, if we are inside the target
then move us 1 whole width across,
Its possible this is a bad idea since 2 skinny Angular faces
@@ -501,8 +500,7 @@ def mergeUvIslands(islandList):
since we have already tested for it.
It gives about 10% speedup with minimal errors.
'''
#print 'ass'
"""
# Move the test along its width + SMALL_NUM
#boxLeft += sourceIsland[4] + SMALL_NUM
boxLeft += sourceIsland[4]
@@ -696,11 +694,11 @@ def packIslands(islandList):
islandIdx -=1
continue
'''Save the offset to be applied later,
"""Save the offset to be applied later,
we could apply to the UVs now and allign them to the bottom left hand area
of the UV coords like the box packer imagines they are
but, its quicker just to remember their offset and
apply the packing and offset in 1 pass '''
apply the packing and offset in 1 pass """
islandOffsetList.append((minx, miny))
# Add to boxList. use the island idx for the BOX id.
@@ -832,7 +830,7 @@ def main(context,
USER_ONLY_SELECTED_FACES = False
if not obList:
raise('error, no selected mesh objects')
raise Exception("error, no selected mesh objects")
# Reuse variable
if len(obList) == 1:
@@ -1106,8 +1104,9 @@ from bpy.props import FloatProperty
class SmartProject(Operator):
'''This script projection unwraps the selected faces of a mesh ''' \
'''(it operates on all selected mesh objects, and can be used to unwrap selected faces, or all faces)'''
"""This script projection unwraps the selected faces of a mesh """ \
"""(it operates on all selected mesh objects, and can be used """ \
"""to unwrap selected faces, or all faces)"""
bl_idname = "uv.smart_project"
bl_label = "Smart UV Project"
bl_options = {'REGISTER', 'UNDO'}

View File

@@ -31,7 +31,8 @@ from rna_prop_ui import rna_idprop_ui_prop_get, rna_idprop_ui_prop_clear
class MESH_OT_delete_edgeloop(Operator):
'''Delete an edge loop by merging the faces on each side to a single face loop'''
"""Delete an edge loop by merging the faces on each side """ \
"""to a single face loop"""
bl_idname = "mesh.delete_edgeloop"
bl_label = "Delete Edge Loop"
@@ -138,7 +139,7 @@ def execute_context_assign(self, context):
class BRUSH_OT_active_index_set(Operator):
'''Set active sculpt/paint brush from it's number'''
"""Set active sculpt/paint brush from it's number"""
bl_idname = "brush.active_index_set"
bl_label = "Set Brush Number"
@@ -173,7 +174,7 @@ class BRUSH_OT_active_index_set(Operator):
class WM_OT_context_set_boolean(Operator):
'''Set a context value'''
"""Set a context value"""
bl_idname = "wm.context_set_boolean"
bl_label = "Context Set Boolean"
bl_options = {'UNDO', 'INTERNAL'}
@@ -189,7 +190,7 @@ class WM_OT_context_set_boolean(Operator):
class WM_OT_context_set_int(Operator): # same as enum
'''Set a context value'''
"""Set a context value"""
bl_idname = "wm.context_set_int"
bl_label = "Context Set"
bl_options = {'UNDO', 'INTERNAL'}
@@ -206,7 +207,7 @@ class WM_OT_context_set_int(Operator): # same as enum
class WM_OT_context_scale_int(Operator):
'''Scale an int context value'''
"""Scale an int context value"""
bl_idname = "wm.context_scale_int"
bl_label = "Context Set"
bl_options = {'UNDO', 'INTERNAL'}
@@ -249,7 +250,7 @@ class WM_OT_context_scale_int(Operator):
class WM_OT_context_set_float(Operator): # same as enum
'''Set a context value'''
"""Set a context value"""
bl_idname = "wm.context_set_float"
bl_label = "Context Set Float"
bl_options = {'UNDO', 'INTERNAL'}
@@ -266,7 +267,7 @@ class WM_OT_context_set_float(Operator): # same as enum
class WM_OT_context_set_string(Operator): # same as enum
'''Set a context value'''
"""Set a context value"""
bl_idname = "wm.context_set_string"
bl_label = "Context Set String"
bl_options = {'UNDO', 'INTERNAL'}
@@ -282,7 +283,7 @@ class WM_OT_context_set_string(Operator): # same as enum
class WM_OT_context_set_enum(Operator):
'''Set a context value'''
"""Set a context value"""
bl_idname = "wm.context_set_enum"
bl_label = "Context Set Enum"
bl_options = {'UNDO', 'INTERNAL'}
@@ -298,7 +299,7 @@ class WM_OT_context_set_enum(Operator):
class WM_OT_context_set_value(Operator):
'''Set a context value'''
"""Set a context value"""
bl_idname = "wm.context_set_value"
bl_label = "Context Set Value"
bl_options = {'UNDO', 'INTERNAL'}
@@ -319,7 +320,7 @@ class WM_OT_context_set_value(Operator):
class WM_OT_context_toggle(Operator):
'''Toggle a context value'''
"""Toggle a context value"""
bl_idname = "wm.context_toggle"
bl_label = "Context Toggle"
bl_options = {'UNDO', 'INTERNAL'}
@@ -338,7 +339,7 @@ class WM_OT_context_toggle(Operator):
class WM_OT_context_toggle_enum(Operator):
'''Toggle a context value'''
"""Toggle a context value"""
bl_idname = "wm.context_toggle_enum"
bl_label = "Context Toggle Values"
bl_options = {'UNDO', 'INTERNAL'}
@@ -371,8 +372,8 @@ class WM_OT_context_toggle_enum(Operator):
class WM_OT_context_cycle_int(Operator):
'''Set a context value. Useful for cycling active material, '''
'''vertex keys, groups' etc'''
"""Set a context value. Useful for cycling active material, """ \
"""vertex keys, groups' etc"""
bl_idname = "wm.context_cycle_int"
bl_label = "Context Int Cycle"
bl_options = {'UNDO', 'INTERNAL'}
@@ -406,7 +407,7 @@ class WM_OT_context_cycle_int(Operator):
class WM_OT_context_cycle_enum(Operator):
'''Toggle a context value'''
"""Toggle a context value"""
bl_idname = "wm.context_cycle_enum"
bl_label = "Context Enum Cycle"
bl_options = {'UNDO', 'INTERNAL'}
@@ -458,8 +459,8 @@ class WM_OT_context_cycle_enum(Operator):
class WM_OT_context_cycle_array(Operator):
'''Set a context array value. '''
'''Useful for cycling the active mesh edit mode'''
"""Set a context array value """ \
"""(useful for cycling the active mesh edit mode)"""
bl_idname = "wm.context_cycle_array"
bl_label = "Context Array Cycle"
bl_options = {'UNDO', 'INTERNAL'}
@@ -519,7 +520,7 @@ class WM_OT_context_menu_enum(Operator):
class WM_OT_context_set_id(Operator):
'''Toggle a context value'''
"""Toggle a context value"""
bl_idname = "wm.context_set_id"
bl_label = "Set Library ID"
bl_options = {'UNDO', 'INTERNAL'}
@@ -575,7 +576,7 @@ data_path_item = StringProperty(
class WM_OT_context_collection_boolean_set(Operator):
'''Set boolean values for a collection of items'''
"""Set boolean values for a collection of items"""
bl_idname = "wm.context_collection_boolean_set"
bl_label = "Context Collection Boolean Set"
bl_options = {'UNDO', 'REGISTER', 'INTERNAL'}
@@ -634,7 +635,7 @@ class WM_OT_context_collection_boolean_set(Operator):
class WM_OT_context_modal_mouse(Operator):
'''Adjust arbitrary values with mouse input'''
"""Adjust arbitrary values with mouse input"""
bl_idname = "wm.context_modal_mouse"
bl_label = "Context Modal Mouse"
bl_options = {'GRAB_POINTER', 'BLOCKING', 'UNDO', 'INTERNAL'}
@@ -799,7 +800,6 @@ class WM_OT_path_open(Operator):
return {'FINISHED'}
def _wm_doc_get_id(doc_id, do_url=True, url_prefix=""):
id_split = doc_id.split(".")
url = rna = None
@@ -832,12 +832,12 @@ def _wm_doc_get_id(doc_id, do_url=True, url_prefix=""):
url = ("%s/bpy.types.%s.html#bpy.types.%s.%s" % (url_prefix, class_name, class_name, class_prop))
else:
rna = ("bpy.types.%s.%s" % (class_name, class_prop))
return url if do_url else rna
class WM_OT_doc_view_manual(Operator):
'''Load online manual'''
"""Load online manual"""
bl_idname = "wm.doc_view_manual"
bl_label = "View Manual"
@@ -882,7 +882,7 @@ class WM_OT_doc_view_manual(Operator):
class WM_OT_doc_view(Operator):
'''Load online reference docs'''
"""Load online reference docs"""
bl_idname = "wm.doc_view"
bl_label = "View Documentation"
@@ -906,7 +906,7 @@ class WM_OT_doc_view(Operator):
class WM_OT_doc_edit(Operator):
'''Load online reference docs'''
"""Load online reference docs"""
bl_idname = "wm.doc_edit"
bl_label = "Edit Documentation"
@@ -1009,7 +1009,7 @@ rna_max = FloatProperty(
class WM_OT_properties_edit(Operator):
'''Internal use (edit a property data_path)'''
"""Internal use (edit a property data_path)"""
bl_idname = "wm.properties_edit"
bl_label = "Edit Property"
bl_options = {'REGISTER'} # only because invoke_props_popup requires.
@@ -1061,7 +1061,7 @@ class WM_OT_properties_edit(Operator):
prop_ui["soft_min"] = prop_ui["min"] = prop_type(self.min)
prop_ui["soft_max"] = prop_ui["max"] = prop_type(self.max)
prop_ui['description'] = self.description
prop_ui["description"] = self.description
# otherwise existing buttons which reference freed
# memory may crash blender [#26510]
@@ -1095,7 +1095,7 @@ class WM_OT_properties_edit(Operator):
class WM_OT_properties_add(Operator):
'''Internal use (edit a property data_path)'''
"""Internal use (edit a property data_path)"""
bl_idname = "wm.properties_add"
bl_label = "Add Property"
bl_options = {'UNDO'}
@@ -1138,7 +1138,7 @@ class WM_OT_properties_context_change(Operator):
class WM_OT_properties_remove(Operator):
'''Internal use (edit a property data_path)'''
"""Internal use (edit a property data_path)"""
bl_idname = "wm.properties_remove"
bl_label = "Remove Property"
bl_options = {'UNDO'}
@@ -1204,7 +1204,7 @@ class WM_OT_appconfig_activate(Operator):
class WM_OT_sysinfo(Operator):
'''Generate System Info'''
"""Generate System Info"""
bl_idname = "wm.sysinfo"
bl_label = "System Info"
@@ -1215,7 +1215,7 @@ class WM_OT_sysinfo(Operator):
class WM_OT_copy_prev_settings(Operator):
'''Copy settings from previous version'''
"""Copy settings from previous version"""
bl_idname = "wm.copy_prev_settings"
bl_label = "Copy Previous Settings"
@@ -1252,7 +1252,7 @@ class WM_OT_copy_prev_settings(Operator):
class WM_OT_blenderplayer_start(Operator):
'''Launch the blender-player with the current blend-file'''
"""Launch the blender-player with the current blend-file"""
bl_idname = "wm.blenderplayer_start"
bl_label = "Start Game In Player"
@@ -1597,10 +1597,11 @@ class WM_OT_addon_disable(Operator):
addon_utils.disable(self.module)
return {'FINISHED'}
class WM_OT_theme_install(Operator):
"Install a theme"
bl_idname = "wm.theme_install"
bl_label = "Install Theme..."
bl_label = "Install Theme..."
overwrite = BoolProperty(
name="Overwrite",
@@ -1624,10 +1625,10 @@ class WM_OT_theme_install(Operator):
import os
import shutil
import traceback
xmlfile = self.filepath
path_themes = bpy.utils.user_resource('SCRIPTS','presets/interface_theme',create=True)
path_themes = bpy.utils.user_resource('SCRIPTS', "presets/interface_theme", create=True)
if not path_themes:
self.report({'ERROR'}, "Failed to get themes path")
@@ -1642,7 +1643,7 @@ class WM_OT_theme_install(Operator):
try:
shutil.copyfile(xmlfile, path_dest)
bpy.ops.script.execute_preset(filepath=path_dest,menu_idname="USERPREF_MT_interface_theme_presets")
bpy.ops.script.execute_preset(filepath=path_dest, menu_idname="USERPREF_MT_interface_theme_presets")
except:
traceback.print_exc()
@@ -1650,7 +1651,6 @@ class WM_OT_theme_install(Operator):
return {'FINISHED'}
def invoke(self, context, event):
wm = context.window_manager
wm.fileselect_add(self)

View File

@@ -203,7 +203,7 @@ class DATA_PT_pose_library(ArmatureButtonsPanel, Panel):
col.operator("poselib.pose_remove", icon='ZOOMOUT', text="")
col.operator("poselib.apply_pose", icon='ZOOM_SELECTED', text="").pose_index = poselib.pose_markers.active_index
col.operator("poselib.action_sanitise", icon='HELP', text="") # XXX: put in menu?
col.operator("poselib.action_sanitize", icon='HELP', text="") # XXX: put in menu?
# properties for active marker
if pose_marker_active is not None:
@@ -304,7 +304,7 @@ class DATA_PT_motion_paths(MotionPathButtonsPanel, Panel):
layout = self.layout
ob = context.object
avs = ob.pose.animation_visualisation
avs = ob.pose.animation_visualization
pchan = context.active_pose_bone
mpath = pchan.motion_path if pchan else None
@@ -323,7 +323,7 @@ class DATA_PT_onion_skinning(OnionSkinButtonsPanel): # , Panel): # inherit from
def draw(self, context):
ob = context.object
self.draw_settings(context, ob.pose.animation_visualisation, bones=True)
self.draw_settings(context, ob.pose.animation_visualization, bones=True)
class DATA_PT_custom_props_arm(ArmatureButtonsPanel, PropertyPanel, Panel):

View File

@@ -120,13 +120,13 @@ class BONE_PT_transform_locks(BoneButtonsPanel, Panel):
pchan = ob.pose.bones[bone.name]
split = layout.split(percentage=0.1)
col = split.column(align=True)
col.label(text="")
col.label(text="X:")
col.label(text="Y:")
col.label(text="Z:")
col = split.row()
sub = col.row()
sub.active = not (bone.parent and bone.use_connect)
@@ -137,7 +137,7 @@ class BONE_PT_transform_locks(BoneButtonsPanel, Panel):
if pchan.rotation_mode in {'QUATERNION', 'AXIS_ANGLE'}:
row = layout.row()
row.prop(pchan, "lock_rotations_4d", text="Lock Rotation")
sub = row.row()
sub.active = pchan.lock_rotations_4d
sub.prop(pchan, "lock_rotation_w", text="W")

View File

@@ -33,7 +33,7 @@ class CurveButtonsPanel():
class CurveButtonsPanelCurve(CurveButtonsPanel):
'''Same as above but for curves only'''
"""Same as above but for curves only"""
@classmethod
def poll(cls, context):
@@ -41,7 +41,7 @@ class CurveButtonsPanelCurve(CurveButtonsPanel):
class CurveButtonsPanelActive(CurveButtonsPanel):
'''Same as above but for curves only'''
"""Same as above but for curves only"""
@classmethod
def poll(cls, context):
@@ -406,7 +406,7 @@ class DATA_PT_text_boxes(CurveButtonsPanel, Panel):
col.prop(box, "x", text="X")
col.prop(box, "y", text="Y")
row.operator("font.textbox_remove", text='', icon='X', emboss=False).index = i
row.operator("font.textbox_remove", text="", icon='X', emboss=False).index = i
class DATA_PT_custom_props_curve(CurveButtonsPanel, PropertyPanel, Panel):

View File

@@ -969,7 +969,7 @@ class DATA_PT_modifiers(ModifierButtonsPanel, Panel):
layout.label(text="Selected Vertices:")
split = layout.split()
col = split.column(align=True)
col.operator("object.skin_loose_mark_clear", text="Mark Loose").action = "MARK"
col.operator("object.skin_loose_mark_clear", text="Clear Loose").action = "CLEAR"
@@ -983,6 +983,6 @@ class DATA_PT_modifiers(ModifierButtonsPanel, Panel):
col.prop(md, "use_x_symmetry")
col.prop(md, "use_y_symmetry")
col.prop(md, "use_z_symmetry")
if __name__ == "__main__": # only for live edit.
bpy.utils.register_module(__name__)

View File

@@ -50,6 +50,8 @@ class PHYSICS_PT_game_physics(PhysicsButtonsPanel, Panel):
physics_type = game.physics_type
if physics_type == 'CHARACTER':
layout.prop(game, "use_actor")
layout.prop(ob, "hide_render", text="Invisible") # out of place but useful
layout.prop(game, "step_height", slider=True)
layout.prop(game, "jump_speed")
layout.prop(game, "fall_speed")

View File

@@ -105,13 +105,13 @@ class OBJECT_PT_transform_locks(ObjectButtonsPanel, Panel):
ob = context.object
split = layout.split(percentage=0.1)
col = split.column(align=True)
col.label(text="")
col.label(text="X:")
col.label(text="Y:")
col.label(text="Z:")
col = split.row()
col.column().prop(ob, "lock_location", text="Location")
col.column().prop(ob, "lock_rotation", text="Rotation")
@@ -120,7 +120,7 @@ class OBJECT_PT_transform_locks(ObjectButtonsPanel, Panel):
if ob.rotation_mode in {'QUATERNION', 'AXIS_ANGLE'}:
row = layout.row()
row.prop(ob, "lock_rotations_4d", text="Lock Rotation")
sub = row.row()
sub.active = ob.lock_rotations_4d
sub.prop(ob, "lock_rotation_w", text="W")
@@ -159,7 +159,7 @@ class OBJECT_PT_groups(ObjectButtonsPanel, Panel):
def draw(self, context):
layout = self.layout
ob = context.object
obj = context.object
row = layout.row(align=True)
row.operator("object.group_link", text="Add to Group")
@@ -167,8 +167,13 @@ class OBJECT_PT_groups(ObjectButtonsPanel, Panel):
# XXX, this is bad practice, yes, I wrote it :( - campbell
index = 0
obj_name = obj.name
for group in bpy.data.groups:
if ob.name in group.objects:
# XXX this is slow and stupid!, we need 2 checks, one thats fast
# and another that we can be sure its not a name collission
# from linked library data
group_objects = group.objects
if obj_name in group.objects and obj in group_objects[:]:
col = layout.column(align=True)
col.context_pointer_set("group", group)
@@ -304,7 +309,7 @@ class OBJECT_PT_motion_paths(MotionPathButtonsPanel, Panel):
layout = self.layout
ob = context.object
avs = ob.animation_visualisation
avs = ob.animation_visualization
mpath = ob.motion_path
self.draw_settings(context, avs, mpath)
@@ -321,7 +326,7 @@ class OBJECT_PT_onion_skinning(OnionSkinButtonsPanel): # , Panel): # inherit fr
def draw(self, context):
ob = context.object
self.draw_settings(context, ob.animation_visualisation)
self.draw_settings(context, ob.animation_visualization)
class OBJECT_PT_custom_props(ObjectButtonsPanel, PropertyPanel, Panel):

View File

@@ -195,7 +195,7 @@ class PHYSICS_PT_collision(PhysicButtonsPanel, Panel):
col = split.column()
col.label(text="Particle:")
col.prop(settings, "permeability", slider=True)
col.prop(settings, "stickness")
col.prop(settings, "stickiness")
col.prop(settings, "use_particle_kill")
col.label(text="Particle Damping:")
sub = col.column(align=True)

View File

@@ -117,10 +117,14 @@ class PHYSICS_PT_fluid(PhysicButtonsPanel, Panel):
col.prop(fluid, "use_animated_mesh")
col = split.column()
col.label(text="Slip Type:")
col.prop(fluid, "slip_type", text="")
subsplit = col.split()
subcol = subsplit.column()
if fluid.use_animated_mesh:
subcol.enabled = False
subcol.label(text="Slip Type:")
subcol.prop(fluid, "slip_type", text="")
if fluid.slip_type == 'PARTIALSLIP':
col.prop(fluid, "partial_slip_factor", slider=True, text="Amount")
subcol.prop(fluid, "partial_slip_factor", slider=True, text="Amount")
col.label(text="Impact:")
col.prop(fluid, "impact_factor", text="Factor")

View File

@@ -526,7 +526,7 @@ class RENDER_PT_encoding(RenderButtonsPanel, Panel):
if ffmpeg.format in {'AVI', 'QUICKTIME', 'MKV', 'OGG'}:
split.prop(ffmpeg, "codec")
elif rd.ffmpeg.format == 'H264':
split.prop(ffmpeg, 'use_lossless_output')
split.prop(ffmpeg, "use_lossless_output")
else:
split.label()
@@ -575,7 +575,7 @@ class RENDER_PT_bake(RenderButtonsPanel, Panel):
multires_bake = False
if rd.bake_type in ['NORMALS', 'DISPLACEMENT']:
layout.prop(rd, 'use_bake_multires')
layout.prop(rd, "use_bake_multires")
multires_bake = rd.use_bake_multires
if not multires_bake:

View File

@@ -165,7 +165,7 @@ class SCENE_PT_keying_set_paths(SceneButtonsPanel, Panel):
row.label(text="Array Target:")
row.prop(ksp, "use_entire_array", text="All Items")
if ksp.use_entire_array:
row.label(text=" ") # padding
row.label(text=" ") # padding
else:
row.prop(ksp, "array_index", text="Index")
@@ -177,7 +177,7 @@ class SCENE_PT_keying_set_paths(SceneButtonsPanel, Panel):
col.prop(ksp, "group_method", text="")
if ksp.group_method == 'NAMED':
col.prop(ksp, "group")
col = row.column(align=True)
col.label(text="Keyframing Settings:")
col.prop(ksp, "bl_options")

View File

@@ -23,6 +23,7 @@ from bpy.types import Menu, Panel
from bpy.types import (Brush,
Lamp,
Material,
Object,
ParticleSettings,
Texture,
World)
@@ -80,6 +81,15 @@ def context_tex_datablock(context):
return idblock
def id_tex_datablock(bid):
if isinstance(bid, Object):
if bid.type == 'LAMP':
return bid.data
return bid.active_material
return bid
class TextureButtonsPanel():
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
@@ -114,7 +124,7 @@ class TEXTURE_PT_context_texture(TextureButtonsPanel, Panel):
pin_id = space.pin_id
if space.use_pin_id and not isinstance(pin_id, Texture):
idblock = pin_id
idblock = id_tex_datablock(pin_id)
pin_id = None
if not space.use_pin_id:

View File

@@ -48,7 +48,7 @@ class CLIP_HT_header(Header):
sub.menu("CLIP_MT_clip")
row = layout.row()
row.template_ID(sc, "clip", open='clip.open')
row.template_ID(sc, "clip", open="clip.open")
if clip:
tracking = clip.tracking
@@ -95,7 +95,8 @@ class CLIP_HT_header(Header):
row = layout.row(align=True)
row.prop(dopesheet, "sort_method", text="")
row.prop(dopesheet, "use_invert_sort", text="Invert", toggle=True)
row.prop(dopesheet, "use_invert_sort",
text="Invert", toggle=True)
else:
layout.prop(sc, "view", text="", expand=True)
@@ -121,7 +122,7 @@ class CLIP_HT_header(Header):
sub.menu("CLIP_MT_clip")
row = layout.row()
row.template_ID(sc, "clip", open='clip.open')
row.template_ID(sc, "clip", open="clip.open")
layout.prop(sc, "mode", text="")
@@ -255,7 +256,7 @@ class CLIP_PT_tools_marker(CLIP_PT_tracking_panel, Panel):
col.prop(settings, "default_pattern_match", text="")
col.separator()
col.operator('clip.track_settings_as_default',
col.operator("clip.track_settings_as_default",
text="Copy From Active Track")
@@ -340,9 +341,9 @@ class CLIP_PT_tools_cleanup(CLIP_PT_tracking_panel, Panel):
layout.operator("clip.clean_tracks")
layout.prop(settings, 'clean_frames', text="Frames")
layout.prop(settings, 'clean_error', text="Error")
layout.prop(settings, 'clean_action', text="")
layout.prop(settings, "clean_frames", text="Frames")
layout.prop(settings, "clean_error", text="Error")
layout.prop(settings, "clean_action", text="")
class CLIP_PT_tools_geometry(CLIP_PT_reconstruction_panel, Panel):
@@ -553,7 +554,8 @@ class CLIP_PT_track(CLIP_PT_tracking_panel, Panel):
row.separator()
sub = row.row()
sub.prop(act_track, "use_alpha_preview", text="", toggle=True, icon='IMAGE_ALPHA')
sub.prop(act_track, "use_alpha_preview",
text="", toggle=True, icon='IMAGE_ALPHA')
layout.separator()
@@ -735,7 +737,9 @@ class CLIP_PT_active_mask_point(Panel):
mask = sc.mask
if mask and sc.mode == 'MASKEDIT':
return mask.layers.active and mask.layers.active.splines.active_point
mask_layer_active = mask.layers.active
return (mask_layer_active and
mask_layer_active.splines.active_point)
return False
@@ -979,7 +983,7 @@ class CLIP_PT_proxy(CLIP_PT_clip_view_panel, Panel):
layout.prop(clip.proxy, "quality")
layout.prop(clip, 'use_proxy_custom_directory')
layout.prop(clip, "use_proxy_custom_directory")
if clip.use_proxy_custom_directory:
layout.prop(clip.proxy, "directory")
@@ -1053,7 +1057,8 @@ class CLIP_MT_view(Menu):
for a, b in ratios:
text = "Zoom %d:%d" % (a, b)
layout.operator("clip.view_zoom_ratio", text=text).ratio = a / b
layout.operator("clip.view_zoom_ratio",
text=text).ratio = a / b
else:
layout.prop(sc, "show_seconds")
layout.separator()
@@ -1203,16 +1208,20 @@ class CLIP_MT_select(Menu):
layout.separator()
layout.operator("mask.select_all").action = 'TOGGLE'
layout.operator("mask.select_all", text="Inverse").action = 'INVERT'
layout.operator("mask.select_all"
).action = 'TOGGLE'
layout.operator("mask.select_all",
text="Inverse").action = 'INVERT'
else:
layout.operator("clip.select_border")
layout.operator("clip.select_circle")
layout.separator()
layout.operator("clip.select_all").action = 'TOGGLE'
layout.operator("clip.select_all", text="Inverse").action = 'INVERT'
layout.operator("clip.select_all"
).action = 'TOGGLE'
layout.operator("clip.select_all",
text="Inverse").action = 'INVERT'
layout.menu("CLIP_MT_select_grouped")
@@ -1317,6 +1326,7 @@ class CLIP_MT_mask_animation(Menu):
layout.operator("mask.shape_key_clear")
layout.operator("mask.shape_key_insert")
layout.operator("mask.shape_key_feather_reset")
layout.operator("mask.shape_key_rekey")
class CLIP_MT_camera_presets(Menu):
@@ -1349,7 +1359,7 @@ class CLIP_MT_track_color_specials(Menu):
def draw(self, context):
layout = self.layout
layout.operator('clip.track_copy_color', icon='COPY_ID')
layout.operator("clip.track_copy_color", icon='COPY_ID')
class CLIP_MT_stabilize_2d_specials(Menu):
@@ -1358,7 +1368,7 @@ class CLIP_MT_stabilize_2d_specials(Menu):
def draw(self, context):
layout = self.layout
layout.operator('clip.stabilize_2d_select')
layout.operator("clip.stabilize_2d_select")
if __name__ == "__main__": # only for live edit.
bpy.utils.register_module(__name__)

View File

@@ -371,16 +371,16 @@ class INFO_MT_help(Menu):
def draw(self, context):
layout = self.layout
layout.operator("wm.url_open", text="Manual", icon='HELP').url = 'http://wiki.blender.org/index.php/Doc:2.6/Manual'
layout.operator("wm.url_open", text="Release Log", icon='URL').url = 'http://www.blender.org/development/release-logs/blender-263/'
layout.operator("wm.url_open", text="Manual", icon='HELP').url = "http://wiki.blender.org/index.php/Doc:2.6/Manual"
layout.operator("wm.url_open", text="Release Log", icon='URL').url = "http://www.blender.org/development/release-logs/blender-263"
layout.separator()
layout.operator("wm.url_open", text="Blender Website", icon='URL').url = 'http://www.blender.org/'
layout.operator("wm.url_open", text="Blender e-Shop", icon='URL').url = 'http://www.blender.org/e-shop'
layout.operator("wm.url_open", text="Developer Community", icon='URL').url = 'http://www.blender.org/community/get-involved/'
layout.operator("wm.url_open", text="User Community", icon='URL').url = 'http://www.blender.org/community/user-community/'
layout.operator("wm.url_open", text="Blender Website", icon='URL').url = "http://www.blender.org"
layout.operator("wm.url_open", text="Blender e-Shop", icon='URL').url = "http://www.blender.org/e-shop"
layout.operator("wm.url_open", text="Developer Community", icon='URL').url = "http://www.blender.org/community/get-involved"
layout.operator("wm.url_open", text="User Community", icon='URL').url = "http://www.blender.org/community/user-community"
layout.separator()
layout.operator("wm.url_open", text="Report a Bug", icon='URL').url = 'http://projects.blender.org/tracker/?atid=498&group_id=9&func=browse'
layout.operator("wm.url_open", text="Report a Bug", icon='URL').url = "http://projects.blender.org/tracker/?atid=498&group_id=9&func=browse"
layout.separator()
layout.operator("wm.url_open", text="Python API Reference", icon='URL').url = bpy.types.WM_OT_doc_view._prefix

View File

@@ -32,6 +32,7 @@ class NODE_HT_header(Header):
snode = context.space_data
snode_id = snode.id
id_from = snode.id_from
toolsettings = context.tool_settings
row = layout.row(align=True)
row.template_header()
@@ -86,6 +87,13 @@ class NODE_HT_header(Header):
layout.separator()
# Snap
row = layout.row(align=True)
row.prop(toolsettings, "use_snap", text="")
row.prop(toolsettings, "snap_node_element", text="", icon_only=True)
if toolsettings.snap_node_element != 'INCREMENT':
row.prop(toolsettings, "snap_target", text="")
layout.template_running_jobs()
@@ -205,10 +213,11 @@ class NODE_PT_properties(Panel):
col.prop(snode, "backdrop_y", text="Y")
col.operator("node.backimage_move", text="Move")
class NODE_PT_quality(bpy.types.Panel):
bl_space_type = 'NODE_EDITOR'
bl_region_type = 'UI'
bl_label = "Quality"
bl_label = "Performance"
@classmethod
def poll(cls, context):
@@ -224,8 +233,10 @@ class NODE_PT_quality(bpy.types.Panel):
layout.prop(tree, "edit_quality", text="Edit")
layout.prop(tree, "chunk_size")
layout.prop(tree, "use_opencl")
layout.prop(tree, "two_pass")
layout.prop(snode, "show_highlight")
class NODE_MT_node_color_presets(Menu):
"""Predefined node color"""
bl_label = "Color Presets"
@@ -240,8 +251,8 @@ class NODE_MT_node_color_specials(Menu):
def draw(self, context):
layout = self.layout
layout.operator('node.node_copy_color', icon='COPY_ID')
layout.operator("node.node_copy_color", icon='COPY_ID')
if __name__ == "__main__": # only for live edit.
bpy.utils.register_module(__name__)

View File

@@ -256,7 +256,7 @@ class SEQUENCER_MT_strip(Menu):
layout.operator("sequencer.rebuild_proxy")
layout.separator()
layout.operator("sequencer.duplicate")
layout.operator("sequencer.duplicate_move")
layout.operator("sequencer.delete")
strip = act_strip(context)
@@ -352,7 +352,7 @@ class SEQUENCER_PT_edit(SequencerButtonsPanel, Panel):
def draw(self, context):
layout = self.layout
scene = context.scene
frame_current = scene.frame_current
strip = act_strip(context)
@@ -474,7 +474,7 @@ class SEQUENCER_PT_effect(SequencerButtonsPanel, Panel):
elif strip.type == 'TRANSFORM':
layout = self.layout
col = layout.column()
col.prop(strip, "interpolation")
col.prop(strip, "translation_unit")
col = layout.column(align=True)
@@ -483,7 +483,7 @@ class SEQUENCER_PT_effect(SequencerButtonsPanel, Panel):
col.prop(strip, "translate_start_y", text="Y")
layout.separator()
col = layout.column(align=True)
col.prop(strip, "use_uniform_scale")
if (strip.use_uniform_scale):
@@ -568,10 +568,10 @@ class SEQUENCER_PT_input(SequencerButtonsPanel, Panel):
split = layout.split(percentage=0.2)
split.label(text="Path:")
split.prop(strip, "filepath", text="")
layout.prop(strip, "mpeg_preseek")
layout.prop(strip, "stream_index")
layout.prop(strip, "use_translation", text="Image Offset")
if strip.use_translation:
col = layout.column(align=True)
@@ -740,13 +740,13 @@ class SEQUENCER_PT_filter(SequencerButtonsPanel, Panel):
col = layout.column()
col.label(text="Distortion:")
col.prop(strip, "undistort")
split = layout.split(percentage=0.65)
col = split.column()
col.prop(strip, "use_reverse_frames", text="Backwards")
col.prop(strip, "use_deinterlace")
col = split.column()
col.label(text="Flip:")
col.prop(strip, "use_flip_x", text="X")

View File

@@ -245,7 +245,7 @@ class USERPREF_PT_interface(Panel):
col.prop(view, "show_splash")
if os.name == 'nt':
if os.name == "nt":
col.prop(view, "quit_dialog")
@@ -419,7 +419,7 @@ class USERPREF_PT_system(Panel):
col.separator()
col.separator()
if hasattr(system, 'compute_device'):
if hasattr(system, "compute_device"):
col.label(text="Compute Device:")
col.row().prop(system, "compute_device_type", expand=True)
sub = col.row()
@@ -1094,7 +1094,7 @@ class USERPREF_PT_addons(Panel):
rowsub = row.row()
rowsub.active = is_enabled
rowsub.label(text='%s: %s' % (info['category'], info["name"]))
rowsub.label(text='%s: %s' % (info["category"], info["name"]))
if info["warning"]:
rowsub.label(icon='ERROR')

View File

@@ -979,7 +979,9 @@ class VIEW3D_MT_object_group(Menu):
layout = self.layout
layout.operator("group.create")
# layout.operator_menu_enum("group.objects_remove", "group") # BUGGY
layout.operator("group.objects_remove")
layout.operator("group.objects_remove_all")
layout.separator()
@@ -2391,7 +2393,7 @@ class VIEW3D_PT_view3d_display(Panel):
col.prop(gs, "material_mode", text="")
col.prop(view, "show_textured_solid")
col.prop(view, "show_backface_culling")
col.prop(view, "show_backface_culling")
layout.separator()
@@ -2556,7 +2558,7 @@ class VIEW3D_PT_background_image(Panel):
has_bg = True
elif bg.source == 'MOVIE_CLIP':
box.prop(bg, 'use_camera_clip')
box.prop(bg, "use_camera_clip")
column = box.column()
column.active = not bg.use_camera_clip

View File

@@ -676,6 +676,8 @@ class VIEW3D_PT_tools_brush(Panel, View3DPaintPanel):
row.prop(brush, "jitter", slider=True)
row.prop(brush, "use_pressure_jitter", toggle=True, text="")
col.prop(brush, "vertex_tool", text="Blend")
# Vertex Paint Mode #
elif context.vertex_paint_object and brush:
col = layout.column()
@@ -695,6 +697,8 @@ class VIEW3D_PT_tools_brush(Panel, View3DPaintPanel):
#row.prop(brush, "jitter", slider=True)
#row.prop(brush, "use_pressure_jitter", toggle=True, text="")
col.prop(brush, "vertex_tool", text="Blend")
class VIEW3D_PT_tools_brush_texture(Panel, View3DPaintPanel):
bl_label = "Texture"
@@ -988,7 +992,7 @@ class VIEW3D_PT_tools_weightpaint_options(Panel, View3DPaintPanel):
col.prop(mesh, "use_mirror_topology")
col.prop(wpaint, "input_samples")
self.unified_paint_settings(col, context)
# Commented out because the Apply button isn't an operator yet, making these settings useless
@@ -1139,7 +1143,7 @@ class VIEW3D_MT_tools_projectpaint_stencil(Menu):
class VIEW3D_PT_tools_particlemode(View3DPanel, Panel):
'''default tools for particle mode'''
"""default tools for particle mode"""
bl_context = "particlemode"
bl_label = "Options"

View File

@@ -18,7 +18,7 @@ from bpy.types import Operator
class ExportSomeData(Operator, ExportHelper):
'''This appears in the tooltip of the operator and in the generated docs'''
"""This appears in the tooltip of the operator and in the generated docs"""
bl_idname = "export_test.some_data" # important since its how bpy.ops.import_test.some_data is constructed
bl_label = "Export Some Data"

View File

@@ -21,7 +21,7 @@ from bpy.types import Operator
class ImportSomeData(Operator, ImportHelper):
'''This appears in the tooltip of the operator and in the generated docs'''
"""This appears in the tooltip of the operator and in the generated docs"""
bl_idname = "import_test.some_data" # important since its how bpy.ops.import_test.some_data is constructed
bl_label = "Import Some Data"

View File

@@ -37,7 +37,7 @@ from bpy.props import FloatProperty, BoolProperty, FloatVectorProperty
class AddBox(bpy.types.Operator):
'''Add a simple box mesh'''
"""Add a simple box mesh"""
bl_idname = "mesh.primitive_box_add"
bl_label = "Add Box"
bl_options = {'REGISTER', 'UNDO'}

View File

@@ -3,7 +3,7 @@ from bpy.props import IntProperty, FloatProperty
class ModalOperator(bpy.types.Operator):
'''Move an object with the mouse, example.'''
"""Move an object with the mouse, example"""
bl_idname = "object.modal_operator"
bl_label = "Simple Modal Operator"

View File

@@ -31,7 +31,7 @@ def draw_callback_px(self, context):
class ModalDrawOperator(bpy.types.Operator):
'''Draw a line with the mouse'''
"""Draw a line with the mouse"""
bl_idname = "view3d.modal_operator"
bl_label = "Simple Modal View3D Operator"

View File

@@ -2,7 +2,7 @@ import bpy
class ModalTimerOperator(bpy.types.Operator):
'''Operator which runs its self from a timer.'''
"""Operator which runs its self from a timer"""
bl_idname = "wm.modal_timer_operator"
bl_label = "Modal Timer Operator"

View File

@@ -4,7 +4,7 @@ from bpy.props import FloatVectorProperty
class ViewOperator(bpy.types.Operator):
'''Translate the view using mouse events.'''
"""Translate the view using mouse events"""
bl_idname = "view3d.modal_operator"
bl_label = "Simple View Operator"

View File

@@ -0,0 +1,107 @@
import bpy
from mathutils import Vector
from bpy_extras import view3d_utils
def main(context, event, ray_max=10000.0):
"""Run this function on left mouse, execute the ray cast"""
# get the context arguments
scene = context.scene
region = context.region
rv3d = context.region_data
coord = event.mouse_region_x, event.mouse_region_y
# get the ray from the viewport and mouse
view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)
ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)
ray_target = ray_origin + (view_vector * ray_max)
scene.cursor_location = ray_target
def visible_objects_and_duplis():
"""Loop over (object, matrix) pairs (mesh only)"""
for obj in context.visible_objects:
if obj.type == 'MESH':
yield (obj, obj.matrix_world.copy())
if obj.dupli_type != 'NONE':
obj.dupli_list_create(scene)
for dob in obj.dupli_list:
obj_dupli = dob.object
if obj_dupli.type == 'MESH':
yield (obj_dupli, dob.matrix.copy())
obj.dupli_list_clear()
def obj_ray_cast(obj, matrix):
"""Wrapper for ray casting that moves the ray into object space"""
# get the ray relative to the object
matrix_inv = matrix.inverted()
ray_origin_obj = matrix_inv * ray_origin
ray_target_obj = matrix_inv * ray_target
# cast the ray
hit, normal, face_index = obj.ray_cast(ray_origin_obj, ray_target_obj)
if face_index != -1:
return hit, normal, face_index
else:
return None, None, None
# cast rays and find the closest object
best_length_squared = ray_max * ray_max
best_obj = None
for obj, matrix in visible_objects_and_duplis():
if obj.type == 'MESH':
hit, normal, face_index = obj_ray_cast(obj, matrix)
if hit is not None:
length_squared = (hit - ray_origin).length_squared
if length_squared < best_length_squared:
best_length_squared = length_squared
best_obj = obj
# now we have the object under the mouse cursor,
# we could do lots of stuff but for the example just select.
if best_obj is not None:
best_obj.select = True
class ViewOperatorRayCast(bpy.types.Operator):
"""Modal object selection with a ray cast"""
bl_idname = "view3d.modal_operator_raycast"
bl_label = "RayCast View Operator"
def modal(self, context, event):
if event.type in {'MIDDLEMOUSE', 'WHEELUPMOUSE', 'WHEELDOWNMOUSE'}:
# allow navigation
return {'PASS_THROUGH'}
elif event.type == 'LEFTMOUSE':
main(context, event)
return {'RUNNING_MODAL'}
elif event.type in {'RIGHTMOUSE', 'ESC'}:
return {'CANCELLED'}
return {'RUNNING_MODAL'}
def invoke(self, context, event):
if context.space_data.type == 'VIEW_3D':
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
else:
self.report({'WARNING'}, "Active space must be a View3d")
return {'CANCELLED'}
def register():
bpy.utils.register_class(ViewOperatorRayCast)
def unregister():
bpy.utils.unregister_class(ViewOperatorRayCast)
if __name__ == "__main__":
register()

View File

@@ -1,7 +1,7 @@
import bpy
def main(operator, context):
def main(context):
space = context.space_data
node_tree = space.node_tree
node_active = context.active_node
@@ -33,7 +33,7 @@ def main(operator, context):
class NodeOperator(bpy.types.Operator):
'''Tooltip'''
"""Tooltip"""
bl_idname = "node.simple_operator"
bl_label = "Simple Node Operator"

View File

@@ -7,7 +7,7 @@ def main(context):
class SimpleOperator(bpy.types.Operator):
'''Tooltip'''
"""Tooltip"""
bl_idname = "object.simple_operator"
bl_label = "Simple Object Operator"

View File

@@ -27,7 +27,7 @@ def main(context):
class UvOperator(bpy.types.Operator):
'''UV Operator description'''
"""UV Operator description"""
bl_idname = "uv.simple_operator"
bl_label = "Simple UV Operator"