Merging r51923 through r52851 from trunk into soc-2011-tomato
This commit is contained in:
@@ -232,7 +232,6 @@ def enable(module_name, default_set=True, persistent=False):
|
||||
|
||||
import os
|
||||
import sys
|
||||
import imp
|
||||
|
||||
def handle_error():
|
||||
import traceback
|
||||
@@ -246,6 +245,7 @@ def enable(module_name, default_set=True, persistent=False):
|
||||
mtime_orig = getattr(mod, "__time__", 0)
|
||||
mtime_new = os.path.getmtime(mod.__file__)
|
||||
if mtime_orig != mtime_new:
|
||||
import imp
|
||||
print("module changed on disk:", mod.__file__, "reloading...")
|
||||
|
||||
try:
|
||||
|
||||
@@ -40,46 +40,46 @@ LANGUAGES_CATEGORIES = (
|
||||
)
|
||||
LANGUAGES = (
|
||||
# ID, UI english label, ISO code.
|
||||
( 0, "Default (Default)", "DEFAULT", ""),
|
||||
( 1, "English (English)", "en_US", "english"),
|
||||
( 2, "Japanese (日本語)", "ja_JP", "japanese"),
|
||||
( 3, "Dutch (Nederlandse taal)", "nl_NL", "dutch"),
|
||||
( 4, "Italian (Italiano)", "it_IT", "italian"),
|
||||
( 5, "German (Deutsch)", "de_DE", "german"),
|
||||
( 6, "Finnish (Suomi)", "fi_FI", "finnish"),
|
||||
( 7, "Swedish (Svenska)", "sv_SE", "swedish"),
|
||||
( 8, "French (Français)", "fr_FR", "french"),
|
||||
( 9, "Spanish (Español)", "es", "spanish"),
|
||||
(10, "Catalan (Català)", "ca_AD", "catalan"),
|
||||
(11, "Czech (Český)", "cs_CZ", "czech"),
|
||||
(12, "Portuguese (Português)", "pt_PT", "portuguese_portugal"),
|
||||
(13, "Simplified Chinese (简体中文)", "zh_CN", "Chinese (Simplified)_China.1252"),
|
||||
(14, "Traditional Chinese (繁體中文)", "zh_TW", "Chinese (Traditional)_China.1252"),
|
||||
(15, "Russian (Русский)", "ru_RU", "russian"),
|
||||
(16, "Croatian (Hrvatski)", "hr_HR", "croatian"),
|
||||
(17, "Serbian (Српски)", "sr_RS", "serbian"),
|
||||
(18, "Ukrainian (Український)", "uk_UA", "ukrainian"),
|
||||
(19, "Polish (Polski)", "pl_PL", "polish"),
|
||||
(20, "Romanian (Român)", "ro_RO", "romanian"),
|
||||
( 0, "Default (Default)", "DEFAULT"),
|
||||
( 1, "English (English)", "en_US"),
|
||||
( 2, "Japanese (日本語)", "ja_JP"),
|
||||
( 3, "Dutch (Nederlandse taal)", "nl_NL"),
|
||||
( 4, "Italian (Italiano)", "it_IT"),
|
||||
( 5, "German (Deutsch)", "de_DE"),
|
||||
( 6, "Finnish (Suomi)", "fi_FI"),
|
||||
( 7, "Swedish (Svenska)", "sv_SE"),
|
||||
( 8, "French (Français)", "fr_FR"),
|
||||
( 9, "Spanish (Español)", "es"),
|
||||
(10, "Catalan (Català)", "ca_AD"),
|
||||
(11, "Czech (Český)", "cs_CZ"),
|
||||
(12, "Portuguese (Português)", "pt_PT"),
|
||||
(13, "Simplified Chinese (简体中文)", "zh_CN"),
|
||||
(14, "Traditional Chinese (繁體中文)", "zh_TW"),
|
||||
(15, "Russian (Русский)", "ru_RU"),
|
||||
(16, "Croatian (Hrvatski)", "hr_HR"),
|
||||
(17, "Serbian (Српски)", "sr_RS"),
|
||||
(18, "Ukrainian (Український)", "uk_UA"),
|
||||
(19, "Polish (Polski)", "pl_PL"),
|
||||
(20, "Romanian (Român)", "ro_RO"),
|
||||
# Using the utf8 flipped form of Arabic (العربية).
|
||||
(21, "Arabic (ﺔﻴﺑﺮﻌﻟﺍ)", "ar_EG", "arabic"),
|
||||
(22, "Bulgarian (Български)", "bg_BG", "bulgarian"),
|
||||
(23, "Greek (Ελληνικά)", "el_GR", "greek"),
|
||||
(24, "Korean (한국 언어)", "ko_KR", "korean"),
|
||||
(25, "Nepali (नेपाली)", "ne_NP", "nepali"),
|
||||
(21, "Arabic (ﺔﻴﺑﺮﻌﻟﺍ)", "ar_EG"),
|
||||
(22, "Bulgarian (Български)", "bg_BG"),
|
||||
(23, "Greek (Ελληνικά)", "el_GR"),
|
||||
(24, "Korean (한국 언어)", "ko_KR"),
|
||||
(25, "Nepali (नेपाली)", "ne_NP"),
|
||||
# Using the utf8 flipped form of Persian (فارسی).
|
||||
(26, "Persian (ﯽﺳﺭﺎﻓ)", "fa_IR", "farsi"),
|
||||
(27, "Indonesian (Bahasa indonesia)", "id_ID", "indonesian"),
|
||||
(28, "Serbian Latin (Srpski latinica)", "sr_RS@latin", "serbian (latin)"),
|
||||
(29, "Kyrgyz (Кыргыз тили)", "ky_KG", "kyrgyz"),
|
||||
(30, "Turkish (Türkçe)", "tr_TR", "turkish"),
|
||||
(31, "Hungarian (Magyar)", "hu_HU", "hungarian"),
|
||||
(32, "Brazilian Portuguese (Português do Brasil)", "pt_BR", "protuguese_brazil"),
|
||||
(26, "Persian (ﯽﺳﺭﺎﻓ)", "fa_IR"),
|
||||
(27, "Indonesian (Bahasa indonesia)", "id_ID"),
|
||||
(28, "Serbian Latin (Srpski latinica)", "sr_RS@latin"),
|
||||
(29, "Kyrgyz (Кыргыз тили)", "ky_KG"),
|
||||
(30, "Turkish (Türkçe)", "tr_TR"),
|
||||
(31, "Hungarian (Magyar)", "hu_HU"),
|
||||
(32, "Brazilian Portuguese (Português do Brasil)", "pt_BR"),
|
||||
# Using the utf8 flipped form of Hebrew (עִבְרִית)).
|
||||
(33, "Hebrew (תירִבְעִ)", "he_IL", "hebrew"),
|
||||
(34, "Estonian (Eestlane)", "et_EE", "estonian"),
|
||||
(35, "Esperanto (Esperanto)", "eo", "esperanto"),
|
||||
(36, "Spanish from Spain (Español de España)", "es_ES", "spanish_spain"),
|
||||
(33, "Hebrew (תירִבְעִ)", "he_IL"),
|
||||
(34, "Estonian (Eestlane)", "et_EE"),
|
||||
(35, "Esperanto (Esperanto)", "eo"),
|
||||
(36, "Spanish from Spain (Español de España)", "es_ES"),
|
||||
)
|
||||
|
||||
# Name of language file used by Blender to generate translations' menu.
|
||||
@@ -114,6 +114,9 @@ DOMAIN = "blender"
|
||||
# File type (ext) to parse.
|
||||
PYGETTEXT_ALLOWED_EXTS = {".c", ".cpp", ".cxx", ".hpp", ".hxx", ".h"}
|
||||
|
||||
# Max number of contexts into a BLF_I18N_MSGID_MULTI_CTXT macro...
|
||||
PYGETTEXT_MAX_MULTI_CTXT = 16
|
||||
|
||||
# Where to search contexts definitions, relative to SOURCE_DIR (defined below).
|
||||
PYGETTEXT_CONTEXTS_DEFSRC = os.path.join("source", "blender", "blenfont",
|
||||
"BLF_translation.h")
|
||||
@@ -149,7 +152,10 @@ _str_whole_re = (
|
||||
# End of loop.
|
||||
"))*"
|
||||
)
|
||||
_ctxt_re = r"(?P<ctxt_raw>(?:" + _str_whole_re.format(_="_ctxt") + r")|(?:[A-Z_0-9]+))"
|
||||
_ctxt_re_gen = lambda uid : r"(?P<ctxt_raw{uid}>(?:".format(uid=uid) + \
|
||||
_str_whole_re.format(_="_ctxt{uid}".format(uid=uid)) + \
|
||||
r")|(?:[A-Z_0-9]+))"
|
||||
_ctxt_re = _ctxt_re_gen("")
|
||||
_msg_re = r"(?P<msg_raw>" + _str_whole_re.format(_="_msg") + r")"
|
||||
PYGETTEXT_KEYWORDS = (() +
|
||||
tuple((r"{}\(\s*" + _msg_re + r"\s*\)").format(it)
|
||||
@@ -165,7 +171,11 @@ PYGETTEXT_KEYWORDS = (() +
|
||||
for it in ("BMO_error_raise",)) +
|
||||
|
||||
tuple(("{}\\((?:[^\"',]+,)\\s*" + _msg_re + r"\s*(?:\)|,)").format(it)
|
||||
for it in ("modifier_setError",))
|
||||
for it in ("modifier_setError",)) +
|
||||
|
||||
tuple((r"{}\(\s*" + _msg_re + r"\s*,\s*(?:" + \
|
||||
r"\s*,\s*)?(?:".join(_ctxt_re_gen(i) for i in range(PYGETTEXT_MAX_MULTI_CTXT)) + r")?\s*\)").format(it)
|
||||
for it in ("BLF_I18N_MSGID_MULTI_CTXT",))
|
||||
)
|
||||
|
||||
ESCAPE_RE = (
|
||||
|
||||
@@ -77,6 +77,7 @@ dict_uimsgs = {
|
||||
"boxpack",
|
||||
"buffersize",
|
||||
"builtin", "builtins",
|
||||
"bytecode",
|
||||
"chunksize",
|
||||
"de",
|
||||
"defocus",
|
||||
@@ -425,12 +426,14 @@ dict_uimsgs = {
|
||||
"fh",
|
||||
"fov",
|
||||
"fft",
|
||||
"futura",
|
||||
"gfx",
|
||||
"gl",
|
||||
"glsl",
|
||||
"gpl",
|
||||
"gpu", "gpus",
|
||||
"hc",
|
||||
"hdc",
|
||||
"hdr",
|
||||
"hh", "mm", "ss", "ff", # hh:mm:ss:ff timecode
|
||||
"hsv", "hsva",
|
||||
@@ -441,6 +444,7 @@ dict_uimsgs = {
|
||||
"mux",
|
||||
"ndof",
|
||||
"ppc",
|
||||
"precisa",
|
||||
"px",
|
||||
"qmc",
|
||||
"rgb", "rgba",
|
||||
@@ -502,6 +506,8 @@ dict_uimsgs = {
|
||||
"mtl",
|
||||
"ogg",
|
||||
"openjpeg",
|
||||
"osl",
|
||||
"oso",
|
||||
"piz",
|
||||
"png",
|
||||
"po",
|
||||
|
||||
@@ -53,7 +53,7 @@ FLAG_MESSAGES = {
|
||||
def find_matching_po(languages, stats, forbidden):
|
||||
"""Match languages defined in LANGUAGES setting to relevant po, if possible!"""
|
||||
ret = []
|
||||
for uid, label, org_key, long_loc in languages:
|
||||
for uid, label, org_key, in languages:
|
||||
key = org_key
|
||||
if key not in stats:
|
||||
# Try to simplify the key (eg from es_ES to es).
|
||||
@@ -64,11 +64,11 @@ def find_matching_po(languages, stats, forbidden):
|
||||
key = key + org_key[org_key.index('@'):]
|
||||
if key in stats:
|
||||
if key in forbidden:
|
||||
ret.append((stats[key], uid, label, org_key, long_loc, FORBIDDEN))
|
||||
ret.append((stats[key], uid, label, org_key, FORBIDDEN))
|
||||
else:
|
||||
ret.append((stats[key], uid, label, org_key, long_loc, OK))
|
||||
ret.append((stats[key], uid, label, org_key, OK))
|
||||
else:
|
||||
ret.append((0.0, uid, label, org_key, long_loc, MISSING))
|
||||
ret.append((0.0, uid, label, org_key, MISSING))
|
||||
return ret
|
||||
|
||||
def main():
|
||||
@@ -103,14 +103,14 @@ def main():
|
||||
stats = sorted(stats, key=lambda it: it[0], reverse=True)
|
||||
langs_cats = [[] for i in range(len(limits))]
|
||||
highest_uid = 0
|
||||
for prop, uid, label, key, long_loc, flag in stats:
|
||||
for prop, uid, label, key, flag in stats:
|
||||
if prop < limits[idx][0]:
|
||||
# Sub-sort languages by iso-codes.
|
||||
langs_cats[idx].sort(key=lambda it: it[2])
|
||||
idx += 1
|
||||
if prop < min_trans and flag == OK:
|
||||
flag = TOOLOW
|
||||
langs_cats[idx].append((uid, label, key, long_loc, flag))
|
||||
langs_cats[idx].append((uid, label, key, flag))
|
||||
if abs(uid) > highest_uid:
|
||||
highest_uid = abs(uid)
|
||||
# Sub-sort last group of languages by iso-codes!
|
||||
@@ -120,7 +120,7 @@ def main():
|
||||
f.write("# and to generate translation menu.\n")
|
||||
f.write("#\n")
|
||||
f.write("# File format:\n")
|
||||
f.write("# ID:MENULABEL:ISOCODE:WINCODE\n")
|
||||
f.write("# ID:MENULABEL:ISOCODE\n")
|
||||
f.write("# ID must be unique, except for 0 value (marks categories for menu).\n")
|
||||
f.write("# Line starting with a # are comments!\n")
|
||||
f.write("#\n")
|
||||
@@ -135,12 +135,12 @@ def main():
|
||||
# Do not write the category if it has no language!
|
||||
f.write("# Void category! #0:{}:\n".format(cat[1]))
|
||||
# ...and all matching language entries!
|
||||
for uid, label, key, long_loc, flag in langs_cat:
|
||||
for uid, label, key, flag in langs_cat:
|
||||
if flag == OK:
|
||||
f.write("{}:{}:{}:{}\n".format(uid, label, key, long_loc))
|
||||
f.write("{}:{}:{}\n".format(uid, label, key))
|
||||
else:
|
||||
# Non-existing, commented entry!
|
||||
f.write("# {} #{}:{}:{}:{}\n".format(FLAG_MESSAGES[flag], uid, label, key, long_loc))
|
||||
f.write("# {} #{}:{}:{}\n".format(FLAG_MESSAGES[flag], uid, label, key))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -52,6 +52,7 @@ SRC_POTFILES = settings.FILE_NAME_SRC_POTFILES
|
||||
|
||||
CONTEXT_DEFAULT = settings.CONTEXT_DEFAULT
|
||||
PYGETTEXT_ALLOWED_EXTS = settings.PYGETTEXT_ALLOWED_EXTS
|
||||
PYGETTEXT_MAX_MULTI_CTXT = settings.PYGETTEXT_MAX_MULTI_CTXT
|
||||
|
||||
SVN_EXECUTABLE = settings.SVN_EXECUTABLE
|
||||
|
||||
@@ -79,6 +80,31 @@ clean_str = lambda s: "".join(m.group("clean") for m in _clean_str(s))
|
||||
|
||||
|
||||
def check_file(path, rel_path, messages):
|
||||
def process_entry(ctxt, msg):
|
||||
# Context.
|
||||
if ctxt:
|
||||
if ctxt in CONTEXTS:
|
||||
ctxt = CONTEXTS[ctxt]
|
||||
elif '"' in ctxt or "'" in ctxt:
|
||||
ctxt = clean_str(ctxt)
|
||||
else:
|
||||
print("WARNING: raw context “{}” couldn’t be resolved!"
|
||||
"".format(ctxt))
|
||||
ctxt = CONTEXT_DEFAULT
|
||||
else:
|
||||
ctxt = CONTEXT_DEFAULT
|
||||
# Message.
|
||||
if msg:
|
||||
if '"' in msg or "'" in msg:
|
||||
msg = clean_str(msg)
|
||||
else:
|
||||
print("WARNING: raw message “{}” couldn’t be resolved!"
|
||||
"".format(msg))
|
||||
msg = ""
|
||||
else:
|
||||
msg = ""
|
||||
return (ctxt, msg)
|
||||
|
||||
with open(path, encoding="utf-8") as f:
|
||||
f = f.read()
|
||||
for srch in pygettexts:
|
||||
@@ -86,34 +112,23 @@ def check_file(path, rel_path, messages):
|
||||
line = pos = 0
|
||||
while m:
|
||||
d = m.groupdict()
|
||||
# Context.
|
||||
ctxt = d.get("ctxt_raw")
|
||||
if ctxt:
|
||||
if ctxt in CONTEXTS:
|
||||
ctxt = CONTEXTS[ctxt]
|
||||
elif '"' in ctxt or "'" in ctxt:
|
||||
ctxt = clean_str(ctxt)
|
||||
else:
|
||||
print("WARNING: raw context “{}” couldn’t be resolved!"
|
||||
"".format(ctxt))
|
||||
ctxt = CONTEXT_DEFAULT
|
||||
else:
|
||||
ctxt = CONTEXT_DEFAULT
|
||||
# Message.
|
||||
msg = d.get("msg_raw")
|
||||
if msg:
|
||||
if '"' in msg or "'" in msg:
|
||||
msg = clean_str(msg)
|
||||
else:
|
||||
print("WARNING: raw message “{}” couldn’t be resolved!"
|
||||
"".format(msg))
|
||||
msg = ""
|
||||
else:
|
||||
msg = ""
|
||||
# Line.
|
||||
line += f[pos:m.start()].count('\n')
|
||||
# And we are done for this item!
|
||||
messages.setdefault((ctxt, msg), []).append(":".join((rel_path, str(line))))
|
||||
msg = d.get("msg_raw")
|
||||
# First, try the "multi-contexts" stuff!
|
||||
ctxts = tuple(d.get("ctxt_raw{}".format(i)) for i in range(PYGETTEXT_MAX_MULTI_CTXT))
|
||||
if ctxts[0]:
|
||||
for ctxt in ctxts:
|
||||
if not ctxt:
|
||||
break
|
||||
ctxt, _msg = process_entry(ctxt, msg)
|
||||
# And we are done for this item!
|
||||
messages.setdefault((ctxt, _msg), []).append(":".join((rel_path, str(line))))
|
||||
else:
|
||||
ctxt = d.get("ctxt_raw")
|
||||
ctxt, msg = process_entry(ctxt, msg)
|
||||
# And we are done for this item!
|
||||
messages.setdefault((ctxt, msg), []).append(":".join((rel_path, str(line))))
|
||||
pos = m.end()
|
||||
line += f[m.start():pos].count('\n')
|
||||
m = srch(f, pos)
|
||||
@@ -138,12 +153,12 @@ def py_xgettext(messages):
|
||||
rel_path = os.path.relpath(path, SOURCE_DIR)
|
||||
if rel_path in forbidden:
|
||||
continue
|
||||
elif rel_path in forced:
|
||||
forced.remove(rel_path)
|
||||
check_file(path, rel_path, messages)
|
||||
for path in forced:
|
||||
elif rel_path not in forced:
|
||||
forced.add(rel_path)
|
||||
for rel_path in sorted(forced):
|
||||
path = os.path.join(SOURCE_DIR, rel_path)
|
||||
if os.path.exists(path):
|
||||
check_file(os.path.join(SOURCE_DIR, path), path, messages)
|
||||
check_file(path, rel_path, messages)
|
||||
|
||||
|
||||
# Spell checking!
|
||||
|
||||
@@ -115,7 +115,7 @@ def main():
|
||||
if not os.path.exists(os.path.join(TRUNK_PO_DIR, ".".join((lang, "po")))):
|
||||
failed.add(lang)
|
||||
|
||||
# Check and compile each po separatly, to keep track of those failing.
|
||||
# Check and compile each po separately, to keep track of those failing.
|
||||
# XXX There should not be any failing at this stage, import step is
|
||||
# supposed to have already filtered them out!
|
||||
for po in os.listdir(TRUNK_PO_DIR):
|
||||
|
||||
@@ -74,7 +74,7 @@ class BPyOpsSubMod(object):
|
||||
|
||||
eg. bpy.ops.object
|
||||
"""
|
||||
__keys__ = ("module",)
|
||||
__slots__ = ("module",)
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
@@ -111,7 +111,7 @@ class BPyOpsSubModOp(object):
|
||||
eg. bpy.ops.object.somefunc
|
||||
"""
|
||||
|
||||
__keys__ = ("module", "func")
|
||||
__slots__ = ("module", "func")
|
||||
|
||||
def _get_doc(self):
|
||||
return op_as_string(self.idname())
|
||||
|
||||
@@ -216,7 +216,7 @@ def load_scripts(reload_scripts=False, refresh_scripts=False):
|
||||
for mod in _global_loaded_modules:
|
||||
test_reload(mod)
|
||||
|
||||
_global_loaded_modules[:] = []
|
||||
del _global_loaded_modules[:]
|
||||
|
||||
for base_path in script_paths():
|
||||
for path_subdir in _script_module_dirs:
|
||||
|
||||
@@ -223,7 +223,7 @@ def edge_loops_from_tessfaces(mesh, tessfaces=None, seams=()):
|
||||
else:
|
||||
other_dir = None
|
||||
|
||||
ed_adj[:] = []
|
||||
del ed_adj[:]
|
||||
|
||||
flipped = False
|
||||
|
||||
@@ -235,22 +235,22 @@ def edge_loops_from_tessfaces(mesh, tessfaces=None, seams=()):
|
||||
if other_dir and flipped is False:
|
||||
flipped = True # only flip the list once
|
||||
context_loop.reverse()
|
||||
ed_adj[:] = []
|
||||
del ed_adj[:]
|
||||
context_loop.append(other_dir) # save 1 look-up
|
||||
|
||||
ed_adj = edges[context_loop[-1]]
|
||||
if len(ed_adj) != 2:
|
||||
ed_adj[:] = []
|
||||
del ed_adj[:]
|
||||
break
|
||||
else:
|
||||
ed_adj[:] = []
|
||||
del ed_adj[:]
|
||||
break
|
||||
|
||||
i = ed_adj.index(context_loop[-2])
|
||||
context_loop.append(ed_adj[not i])
|
||||
|
||||
# Dont look at this again
|
||||
ed_adj[:] = []
|
||||
del ed_adj[:]
|
||||
|
||||
return edge_loops
|
||||
|
||||
@@ -325,10 +325,12 @@ def ngon_tessellate(from_data, indices, fix_loops=True):
|
||||
fgon to create from existing verts.
|
||||
|
||||
from_data: either a mesh, or a list/tuple of vectors.
|
||||
indices: a list of indices to use this list is the ordered closed polyline
|
||||
:arg indices: a list of indices to use this list is the ordered closed polyline
|
||||
to fill, and can be a subset of the data given.
|
||||
fix_loops: If this is enabled polylines that use loops to make multiple
|
||||
:type indices: list
|
||||
:arg fix_loops: If this is enabled polylines that use loops to make multiple
|
||||
polylines are delt with correctly.
|
||||
:type fix_loops: bool
|
||||
"""
|
||||
|
||||
from mathutils.geometry import tessellate_polygon
|
||||
@@ -436,7 +438,7 @@ def ngon_tessellate(from_data, indices, fix_loops=True):
|
||||
if s1[0][1] == s1[-1][1]: # remove endpoints double
|
||||
s1.pop()
|
||||
|
||||
s2[:] = [] # Empty this segment s2 so we don't use it again.
|
||||
del s2[:] # Empty this segment s2 so we don't use it again.
|
||||
return True
|
||||
|
||||
joining_segments = True
|
||||
|
||||
@@ -394,7 +394,7 @@ class Mesh(bpy_types.ID):
|
||||
p.vertices = f
|
||||
loop_index += loop_len
|
||||
|
||||
# if no edges - calculae them
|
||||
# if no edges - calculate them
|
||||
if faces and (not edges):
|
||||
self.update(calc_edges=True)
|
||||
|
||||
|
||||
@@ -178,7 +178,7 @@ def rna2xml(fw=print_ln,
|
||||
fw("%s</%s>\n" % (ident, value_type_name))
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# needs re-workign to be generic
|
||||
# needs re-working to be generic
|
||||
|
||||
if root_node:
|
||||
fw("%s<%s>\n" % (root_ident, root_node))
|
||||
|
||||
Reference in New Issue
Block a user