""" % (href, title)
def _engine_title(self, engine, variation):
if variation:
return engine.title() + ' ' + variation
else:
return engine.title()
def _engine_path(self, path, variation):
if variation:
variation = variation.replace(' ', '_')
return os.path.join(path, variation.lower())
else:
return path
def _navigation_html(self, comparison):
html = """"""
return html
def _write_html(self, comparison=False):
# Gather intermediate data for all tests.
if comparison:
failed_data = []
passed_data = sorted(glob.glob(os.path.join(self.output_dir, "*/compare.data")))
else:
failed_data = sorted(glob.glob(os.path.join(self.output_dir, "*/failed.data")))
passed_data = sorted(glob.glob(os.path.join(self.output_dir, "*/passed.data")))
failed_tests = ""
passed_tests = ""
for filename in failed_data:
filepath = os.path.join(self.output_dir, filename)
failed_tests += pathlib.Path(filepath).read_text()
for filename in passed_data:
filepath = os.path.join(self.output_dir, filename)
passed_tests += pathlib.Path(filepath).read_text()
tests_html = failed_tests + passed_tests
# Write html for all tests.
if self.pixelated:
image_rendering = 'pixelated'
else:
image_rendering = 'auto'
# Navigation
menu = self._navigation_html(comparison)
failed = len(failed_tests) > 0
if failed:
message = """
"""
message += """
Run this command to regenerate reference (ground truth) images:
"""
message += """
BLENDER_TEST_UPDATE=1 ctest -R %s
""" % self.engine_name
message += """
This then happens for new and failing tests; reference images of """ \
"""passing test cases will not be updated. Be sure to commit the new reference """ \
"""images to the tests/files git submodule afterwards.
"""
message += """
"""
else:
message = ""
if comparison:
title = self.title + " Test Compare"
engine_self = self.title
engine_other = self._engine_title(*self.compare_engine)
columns_html = "
Name
%s
%s
" % (engine_self, engine_other)
else:
title = self.title + " Test Report"
columns_html = "
Name
New
Reference
Diff Color
Diff Alpha
"
html = f"""
{title}
{title}
{menu}
{message}
{columns_html}
{tests_html}
"""
filename = "report.html" if not comparison else "compare.html"
filepath = os.path.join(self.output_dir, filename)
pathlib.Path(filepath).write_text(html)
print_message("Report saved to: " + pathlib.Path(filepath).as_uri())
# Update global report
if not comparison:
global_failed = failed if not comparison else None
global_report.add(self.global_dir, "Render", self.title, filepath, global_failed)
def _relative_url(self, filepath):
relpath = os.path.relpath(filepath, self.output_dir)
return pathlib.Path(relpath).as_posix()
def _write_test_html(self, test_category, test_result):
name = test_result.name.replace('_', ' ')
status = test_result.error if test_result.error else ""
tr_style = """ class="table-danger" """ if test_result.error else ""
new_url = self._relative_url(test_result.new_img)
ref_url = self._relative_url(test_result.ref_img)
diff_color_url = self._relative_url(test_result.diff_color_img)
diff_alpha_url = self._relative_url(test_result.diff_alpha_img)
test_html = f"""
""" . format(tr_style=tr_style,
name=name,
testname=test_result.name,
status=status,
new_url=new_url,
ref_url=ref_url)
self.compare_tests += test_html
def _get_render_arguments(self, arguments_cb, filepath, base_output_filepath):
# Each render test can override this method to provide extra functionality.
# See Cycles render tests for an example.
# Do not delete.
return arguments_cb(filepath, base_output_filepath)
def _get_arguments_suffix(self):
# Get command line arguments that need to be provided after all file-specific ones.
# For example the Cycles render device argument needs to be added at the end of
# the argument list, otherwise tests can't be batched together.
#
# Each render test is supposed to override this method.
return []
def _get_filepath_tests(self, filepath):
list_filepath = filepath.replace('.blend', '_permutations.txt')
if os.path.exists(list_filepath):
with open(list_filepath, 'r') as file:
return [TestResult(self, filepath, testname.rstrip('\n')) for testname in file]
else:
testname = test_get_name(filepath)
return [TestResult(self, filepath, testname)]
def _run_tests(self, filepaths, blender, arguments_cb, batch):
# Run multiple tests in a single Blender process since startup can be
# a significant factor. In case of crashes, re-run the remaining tests.
verbose = os.environ.get("BLENDER_VERBOSE") is not None
remaining_filepaths = filepaths[:]
test_results = []
while len(remaining_filepaths) > 0:
command = [blender]
running_tests = []
# Construct output filepaths and command to run
for filepath in remaining_filepaths:
running_tests.append(filepath)
testname = test_get_name(filepath)
print_message(testname, 'SUCCESS', 'RUN')
base_output_filepath = os.path.join(self.output_dir, "tmp_" + testname)
output_filepath = base_output_filepath + '0001.png'
if os.path.exists(output_filepath):
os.remove(output_filepath)
command.extend(self._get_render_arguments(arguments_cb, filepath, base_output_filepath))
# Only chain multiple commands for batch
if not batch:
break
command.extend(self._get_arguments_suffix())
# Run process
crash = False
output = None
try:
completed_process = subprocess.run(command, stdout=subprocess.PIPE)
if completed_process.returncode != 0:
crash = True
output = completed_process.stdout
except Exception:
crash = True
if verbose:
print(" ".join(command))
if (verbose or crash) and output:
print(output.decode("utf-8", 'ignore'))
tests_to_check = []
# Detect missing filepaths and consider those errors
for filepath in running_tests:
remaining_filepaths.pop(0)
file_crashed = False
for test in self._get_filepath_tests(filepath):
if not os.path.exists(test.tmp_out_img) or os.path.getsize(test.tmp_out_img) == 0:
if crash:
# In case of crash, stop after missing files and re-render remaining
test.error = "CRASH"
test_results.append(test)
file_crashed = True
break
else:
test.error = "NO OUTPUT"
test_results.append(test)
else:
tests_to_check.append(test)
if file_crashed:
break
pool = multiprocessing.Pool(multiprocessing.cpu_count())
test_results.extend(pool.starmap(diff_output,
[(test, self.oiiotool, self.fail_threshold, self.fail_percent, self.verbose, self.update)
for test in tests_to_check]))
pool.close()
for test in test_results:
if test.error == "CRASH":
print_message("Crash running Blender")
print_message(test.name, 'FAILURE', 'FAILED')
elif test.error == "NO OUTPUT":
print_message("No render result file found")
print_message(test.tmp_out_img, 'FAILURE', 'FAILED')
elif test.error == "VERIFY":
print_message("Render result is different from reference image")
print_message(test.name, 'FAILURE', 'FAILED')
else:
print_message(test.name, 'SUCCESS', 'OK')
if os.path.exists(test.tmp_out_img):
os.remove(test.tmp_out_img)
return test_results
def _run_all_tests(self, dirname, dirpath, blender, arguments_cb, batch, fail_silently):
passed_tests = []
failed_tests = []
silently_failed_tests = []
all_files = list(blend_list(dirpath, self.blocklist))
all_files.sort()
if not list(blend_list(dirpath, [])):
print_message("No .blend files found in '{}'!".format(dirpath), 'FAILURE', 'FAILED')
return False
print_message("Running {} tests from 1 test case." .
format(len(all_files)),
'SUCCESS', "==========")
time_start = time.time()
test_results = self._run_tests(all_files, blender, arguments_cb, batch)
for test in test_results:
if test.error:
if test.error == "NO_ENGINE":
return False
elif test.error == "NO_START":
return False
if fail_silently and test.error != 'CRASH':
silently_failed_tests.append(test.name)
else:
failed_tests.append(test.name)
else:
passed_tests.append(test.name)
self._write_test_html(dirname, test)
time_end = time.time()
elapsed_ms = int((time_end - time_start) * 1000)
print_message("")
print_message("{} tests from 1 test case ran. ({} ms total)" .
format(len(all_files), elapsed_ms),
'SUCCESS', "==========")
print_message("{} tests." .
format(len(passed_tests)),
'SUCCESS', 'PASSED')
all_failed_tests = silently_failed_tests + failed_tests
if all_failed_tests:
print_message("{} tests, listed below:" .
format(len(all_failed_tests)),
'FAILURE', 'FAILED')
all_failed_tests.sort()
for test in all_failed_tests:
print_message("{}" . format(test), 'FAILURE', "FAILED")
return not bool(failed_tests)