""" % (href, title)
def _engine_title(self, engine, variation):
if variation:
return engine.title() + ' ' + variation
else:
return engine.title()
def _engine_path(self, path, variation):
if variation:
return os.path.join(path, variation.lower())
else:
return path
def _navigation_html(self, comparison):
html = """"""
return html
def _write_html(self, comparison=False):
# Gather intermediate data for all tests.
if comparison:
failed_data = []
passed_data = sorted(glob.glob(os.path.join(self.output_dir, "*/compare.data")))
else:
failed_data = sorted(glob.glob(os.path.join(self.output_dir, "*/failed.data")))
passed_data = sorted(glob.glob(os.path.join(self.output_dir, "*/passed.data")))
failed_tests = ""
passed_tests = ""
for filename in failed_data:
filepath = os.path.join(self.output_dir, filename)
failed_tests += pathlib.Path(filepath).read_text()
for filename in passed_data:
filepath = os.path.join(self.output_dir, filename)
passed_tests += pathlib.Path(filepath).read_text()
tests_html = failed_tests + passed_tests
# Write html for all tests.
if self.pixelated:
image_rendering = 'pixelated'
else:
image_rendering = 'auto'
# Navigation
menu = self._navigation_html(comparison)
failed = len(failed_tests) > 0
if failed:
message = """
"""
message += """
Run this command to regenerate reference (ground truth) images:
"""
message += """
BLENDER_TEST_UPDATE=1 ctest -R %s
""" % self.engine_name
message += """
This then happens for new and failing tests; reference images of """ \
"""passing test cases will not be updated. Be sure to commit the new reference """ \
"""images to the tests/data git submodule afterwards.
"""
message += """
"""
else:
message = ""
if comparison:
title = self.title + " Test Compare"
engine_self = self.title
engine_other = self._engine_title(*self.compare_engine)
columns_html = "
Name
%s
%s
" % (engine_self, engine_other)
else:
title = self.title + " Test Report"
columns_html = "
Name
New
Reference
Diff Color
Diff Alpha
"
html = f"""
{title}
{title}
{menu}
{message}
{columns_html}
{tests_html}
"""
filename = "report.html" if not comparison else "compare.html"
filepath = os.path.join(self.output_dir, filename)
pathlib.Path(filepath).write_text(html)
print_message("Report saved to: " + pathlib.Path(filepath).as_uri())
# Update global report
if not comparison:
global_failed = failed if not comparison else None
global_report.add(self.global_dir, "Render", self.title, filepath, global_failed)
def _relative_url(self, filepath):
relpath = os.path.relpath(filepath, self.output_dir)
return pathlib.Path(relpath).as_posix()
def _write_test_html(self, testname, filepath, error):
name = test_get_name(filepath)
name = name.replace('_', ' ')
old_img, ref_img, new_img, diff_color_img, diff_alpha_img = test_get_images(
self.output_dir, filepath, self.reference_dir, self.reference_override_dir)
status = error if error else ""
tr_style = """ class="table-danger" """ if error else ""
new_url = self._relative_url(new_img)
ref_url = self._relative_url(ref_img)
diff_color_url = self._relative_url(diff_color_img)
diff_alpha_url = self._relative_url(diff_alpha_img)
test_html = f"""
""" . format(tr_style=tr_style,
name=name,
testname=testname,
status=status,
new_url=new_url,
ref_url=ref_url)
self.compare_tests += test_html
def _diff_output(self, filepath, tmp_filepath):
old_img, ref_img, new_img, diff_color_img, diff_alpha_img = test_get_images(
self.output_dir, filepath, self.reference_dir, self.reference_override_dir)
# Create reference render directory.
old_dirpath = os.path.dirname(old_img)
os.makedirs(old_dirpath, exist_ok=True)
# Copy temporary to new image.
if os.path.exists(new_img):
os.remove(new_img)
if os.path.exists(tmp_filepath):
shutil.copy(tmp_filepath, new_img)
if os.path.exists(ref_img):
# Diff images test with threshold.
command = (
self.oiiotool,
ref_img,
tmp_filepath,
"--fail", str(self.fail_threshold),
"--failpercent", str(self.fail_percent),
"--diff",
)
try:
subprocess.check_output(command)
failed = False
except subprocess.CalledProcessError as e:
if self.verbose:
print_message(e.output.decode("utf-8", 'ignore'))
failed = e.returncode != 0
else:
if not self.update:
return False
failed = True
if failed and self.update:
# Update reference image if requested.
shutil.copy(new_img, ref_img)
shutil.copy(new_img, old_img)
failed = False
# Generate color diff image.
command = (
self.oiiotool,
ref_img,
"--ch", "R,G,B",
tmp_filepath,
"--ch", "R,G,B",
"--sub",
"--abs",
"--mulc", "16",
"-o", diff_color_img,
)
try:
subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
if self.verbose:
print_message(e.output.decode("utf-8", 'ignore'))
# Generate alpha diff image.
command = (
self.oiiotool,
ref_img,
"--ch", "A",
tmp_filepath,
"--ch", "A",
"--sub",
"--abs",
"--mulc", "16",
"-o", diff_alpha_img,
)
try:
subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
if self.verbose:
msg = e.output.decode("utf-8", 'ignore')
for line in msg.splitlines():
# Ignore warnings for images without alpha channel.
if "--ch: Unknown channel name" not in line:
print_message(line)
return not failed
def _get_render_arguments(self, arguments_cb, filepath, base_output_filepath):
# Each render test can override this method to provide extra functionality.
# See Cycles render tests for an example.
# Do not delete.
return arguments_cb(filepath, base_output_filepath)
def _get_arguments_suffix(self):
# Get command line arguments that need to be provided after all file-specific ones.
# For example the Cycles render device argument needs to be added at the end of
# the argument list, otherwise tests can't be batched together.
#
# Each render test is supposed to override this method.
return []
def _run_tests(self, filepaths, blender, arguments_cb, batch):
# Run multiple tests in a single Blender process since startup can be
# a significant factor. In case of crashes, re-run the remaining tests.
verbose = os.environ.get("BLENDER_VERBOSE") is not None
remaining_filepaths = filepaths[:]
errors = []
while len(remaining_filepaths) > 0:
command = [blender]
output_filepaths = []
# Construct output filepaths and command to run
for filepath in remaining_filepaths:
testname = test_get_name(filepath)
print_message(testname, 'SUCCESS', 'RUN')
base_output_filepath = os.path.join(self.output_dir, "tmp_" + testname)
output_filepath = base_output_filepath + '0001.png'
output_filepaths.append(output_filepath)
if os.path.exists(output_filepath):
os.remove(output_filepath)
command.extend(self._get_render_arguments(arguments_cb, filepath, base_output_filepath))
# Only chain multiple commands for batch
if not batch:
break
command.extend(self._get_arguments_suffix())
# Run process
crash = False
output = None
try:
completed_process = subprocess.run(command, stdout=subprocess.PIPE)
if completed_process.returncode != 0:
crash = True
output = completed_process.stdout
except Exception:
crash = True
if verbose:
print(" ".join(command))
if (verbose or crash) and output:
print(output.decode("utf-8", 'ignore'))
# Detect missing filepaths and consider those errors
for filepath, output_filepath in zip(remaining_filepaths[:], output_filepaths):
remaining_filepaths.pop(0)
if crash:
# In case of crash, stop after missing files and re-render remaining
if not os.path.exists(output_filepath):
errors.append("CRASH")
print_message("Crash running Blender")
print_message(testname, 'FAILURE', 'FAILED')
break
testname = test_get_name(filepath)
if not os.path.exists(output_filepath) or os.path.getsize(output_filepath) == 0:
errors.append("NO OUTPUT")
print_message("No render result file found")
print_message(testname, 'FAILURE', 'FAILED')
elif not self._diff_output(filepath, output_filepath):
errors.append("VERIFY")
print_message("Render result is different from reference image")
print_message(testname, 'FAILURE', 'FAILED')
else:
errors.append(None)
print_message(testname, 'SUCCESS', 'OK')
if os.path.exists(output_filepath):
os.remove(output_filepath)
return errors
def _run_all_tests(self, dirname, dirpath, blender, arguments_cb, batch, fail_silently):
passed_tests = []
failed_tests = []
silently_failed_tests = []
all_files = list(blend_list(dirpath, self.blocklist))
all_files.sort()
if not list(blend_list(dirpath, self.device, [])):
print_message("No .blend files found in '{}'!".format(dirpath), 'FAILURE', 'FAILED')
return False
print_message("Running {} tests from 1 test case." .
format(len(all_files)),
'SUCCESS', "==========")
time_start = time.time()
errors = self._run_tests(all_files, blender, arguments_cb, batch)
for filepath, error in zip(all_files, errors):
testname = test_get_name(filepath)
if error:
if error == "NO_ENGINE":
return False
elif error == "NO_START":
return False
if fail_silently and error != 'CRASH':
silently_failed_tests.append(testname)
else:
failed_tests.append(testname)
else:
passed_tests.append(testname)
self._write_test_html(dirname, filepath, error)
time_end = time.time()
elapsed_ms = int((time_end - time_start) * 1000)
print_message("")
print_message("{} tests from 1 test case ran. ({} ms total)" .
format(len(all_files), elapsed_ms),
'SUCCESS', "==========")
print_message("{} tests." .
format(len(passed_tests)),
'SUCCESS', 'PASSED')
all_failed_tests = silently_failed_tests + failed_tests
if all_failed_tests:
print_message("{} tests, listed below:" .
format(len(all_failed_tests)),
'FAILURE', 'FAILED')
all_failed_tests.sort()
for test in all_failed_tests:
print_message("{}" . format(test), 'FAILURE', "FAILED")
return not bool(failed_tests)