Tests: support generating code coverage report

This only works with GCC and has only been tested on Linux. The main goal is to
automatically generate the code coverage reports on the buildbot and to publish
them. With some luck, this motivates people to increase test coverage in their
respective areas. Nevertheless, it should be easy to generate the reports
locally too (at least on supported software stacks).

Usage:
1. Create a **debug** build using **GCC** with **WITH_COMPILER_CODE_COVERAGE**
   enabled.
2. Run tests. This automatically generates `.gcda` files in the build directory.
3. Run `make/ninja coverage-report` in the build directory.

If everything is successful, this will open a browser with the final report
which is stored in `build-dir/coverage/report/`. For a bit more control one can
also run `coverage.py` script directly. This allows passing in the
`--no-browser` option which may be benefitial when running it on the buildbot.
Running `make/ninja coverage-reset` deletes all `.gcda` files which resets the
line execution counts.

The final report has a main entry point (`index.html`) and a separate `.html`
file for every source code file that coverage data was available for. This also
contains some code that is not in Blender's git repository. We could filter
those out, but it also seems interesting (to me anyway), so I just kept it in.

Doing the analysis and writing the report takes ~1 min. The slow part is running
all tests in a debug build which takes ~12 min for me. Since the coverage data
is fairly large and the report also includes the entire source code, file
compression is used in two places:
* The intermediate analysis results for each file are stored in compressed zip
  files. This data is still independent from the report html and could be used
  to build other tools on top of. I could imagine storing the analysis data for
  each day for example to gather greater insights into how coverage changes over
  time in different parts of the code.
* The analysis data and source code is compressed and base64 encoded embedded
  into the `.html` files. This makes them much smaller than embedding the data
  without compression (5-10x).

Pull Request: https://projects.blender.org/blender/blender/pulls/126181
This commit is contained in:
Jacques Lucke
2024-08-15 12:17:55 +02:00
parent ed3c16624b
commit bb8460da9e
9 changed files with 1768 additions and 0 deletions

View File

@@ -965,6 +965,28 @@ Can cause linking issues due to too large binary size."
)
mark_as_advanced(WITH_COMPILER_ASAN_EXTERN)
option(WITH_COMPILER_CODE_COVERAGE "\
Build and link with code coverage support (only for Debug targets)."
OFF
)
mark_as_advanced(WITH_COMPILER_CODE_COVERAGE)
if(WITH_COMPILER_CODE_COVERAGE)
if(NOT CMAKE_COMPILER_IS_GNUCC)
message("WITH_COMPILER_CODE_COVERAGE only works with GCC currently.")
set(WITH_COMPILER_CODE_COVERAGE OFF)
endif()
endif()
if(WITH_COMPILER_CODE_COVERAGE)
set(_code_coverage_defaults "--coverage")
set(COMPILER_CODE_COVERAGE_CFLAGS ${_code_coverage_defaults} CACHE STRING "C flags for code coverage")
mark_as_advanced(COMPILER_CODE_COVERAGE_CFLAGS)
set(COMPILER_CODE_COVERAGE_CXXFLAGS ${_code_coverage_defaults} CACHE STRING "C++ flags for code coverage")
mark_as_advanced(COMPILER_CODE_COVERAGE_CXXFLAGS)
unset(_code_coverage_defaults)
endif()
if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_C_COMPILER_ID MATCHES "Clang")
if(WITH_COMPILER_ASAN)
set(_asan_defaults "\
@@ -1360,6 +1382,11 @@ set(PLATFORM_LINKFLAGS_DEBUG "")
set(PLATFORM_LINKFLAGS_RELEASE "")
set(PLATFORM_LINKFLAGS_EXECUTABLE "")
if(WITH_COMPILER_CODE_COVERAGE)
string(APPEND CMAKE_C_FLAGS_DEBUG " ${COMPILER_CODE_COVERAGE_CFLAGS}")
string(APPEND CMAKE_CXX_FLAGS_DEBUG " ${COMPILER_CODE_COVERAGE_CXXFLAGS}")
endif()
if(NOT CMAKE_BUILD_TYPE MATCHES "Release")
if(WITH_COMPILER_ASAN)
if(NOT APPLE)

View File

@@ -69,3 +69,17 @@ endif()
# GTest
add_subdirectory(gtests)
if(WITH_COMPILER_CODE_COVERAGE)
set(COVERAGE_SCRIPT_PATH ${CMAKE_CURRENT_SOURCE_DIR}/coverage/coverage.py)
add_custom_target(coverage-report
${PYTHON_EXECUTABLE} ${COVERAGE_SCRIPT_PATH} report --build-directory ${CMAKE_BINARY_DIR}
USES_TERMINAL
)
add_custom_target(coverage-reset
${PYTHON_EXECUTABLE} ${COVERAGE_SCRIPT_PATH} reset --build-directory ${CMAKE_BINARY_DIR}
USES_TERMINAL
)
endif()

97
tests/coverage/coverage.py Executable file
View File

@@ -0,0 +1,97 @@
#!/usr/bin/env python3
# SPDX-FileCopyrightText: 2024 Blender Authors
#
# SPDX-License-Identifier: GPL-2.0-or-later
import argparse
import os
import shutil
import sys
import textwrap
import webbrowser
from coverage_report import parse, report_as_html
from coverage_report.util import print_updateable_line
from pathlib import Path
usage = textwrap.dedent(
"""\
coverage.py <command> [<args>]
Commands:
report Analyse coverage data and generate html report.
[--build-directory] Blender build directory. This will be scanned for .gcda files.
[--no-browser] Don't open the browser in the end.
reset [--build-directory] Delete .gcda files.
help Show this help.
"""
)
def main():
parser = argparse.ArgumentParser(description="Blender test coverage", usage=usage)
parser.add_argument("command", nargs="?", default="help")
args = parser.parse_args(sys.argv[1:2])
command = args.command
argv = sys.argv[2:]
if command == "report":
run_report(argv)
elif command == "reset":
run_reset(argv)
elif command == "help":
print(usage)
else:
print("Unknown command: {}".format(command))
sys.exit(1)
def run_report(argv):
parser = argparse.ArgumentParser(usage=usage)
parser.add_argument("--build-directory", type=str, default=".")
parser.add_argument("--no-browser", action="store_true", default=False)
args = parser.parse_args(argv)
build_dir = Path(args.build_directory).absolute()
if not is_blender_build_directory(build_dir):
print("Directory does not seem to be a Blender build directory.")
sys.exit(1)
coverage_dir = build_dir / "coverage"
analysis_dir = coverage_dir / "analysis"
report_dir = coverage_dir / "report"
parse(build_dir, analysis_dir)
report_as_html(analysis_dir, report_dir)
if not args.no_browser:
webbrowser.open("file://" + str(report_dir / "index.html"))
def run_reset(argv):
parser = argparse.ArgumentParser(usage=usage)
parser.add_argument("--build-directory", type=str, default=".")
args = parser.parse_args(argv)
build_dir = Path(args.build_directory).absolute()
if not is_blender_build_directory(build_dir):
print("Directory does not seem to be a Blender build directory.")
sys.exit(1)
print("Remove .gcda files...")
gcda_files = list(build_dir.glob("**/*.gcda"))
for i, path in enumerate(gcda_files):
print_updateable_line("[{}/{}] Remove: {}".format(i + 1, len(gcda_files), path))
os.remove(path)
print()
def is_blender_build_directory(build_dir):
return (Path(build_dir) / "CMakeCache.txt").exists()
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,6 @@
# SPDX-FileCopyrightText: 2024 Blender Authors
#
# SPDX-License-Identifier: GPL-2.0-or-later
from .build_report import report_as_html
from .parse import parse

View File

@@ -0,0 +1,108 @@
# SPDX-FileCopyrightText: 2024 Blender Authors
#
# SPDX-License-Identifier: GPL-2.0-or-later
import base64
import gzip
import html
import json
import shutil
import textwrap
import zipfile
from .util import print_updateable_line
from collections import defaultdict
from pathlib import Path
from pprint import pprint
index_template_path = Path(__file__).parent / "index_template.html"
single_file_template_path = Path(__file__).parent / "single_file_template.html"
def report_as_html(analysis_dir, report_dir):
analysis_dir = Path(analysis_dir).absolute()
report_dir = Path(report_dir).absolute()
if not analysis_dir.exists():
raise RuntimeError("Missing analysis at: {}".format(analysis_dir))
try:
shutil.rmtree(report_dir)
except:
pass
build_summary(analysis_dir, report_dir)
build_file_pages(analysis_dir, report_dir)
print("Report written to {}.".format(report_dir / "index.html"))
def build_summary(analysis_dir, report_dir):
print("Write index...")
with open(index_template_path) as f:
template = f.read()
result = template
result = result.replace(
"ANALYSIS_DATA",
zip_file_to_compressed_base64(analysis_dir / "summary.json.zip"),
)
report_summary_path = report_dir / "index.html"
report_summary_path.parent.mkdir(parents=True, exist_ok=True)
with open(report_summary_path, "w") as f:
f.write(result)
def build_file_pages(analysis_dir, report_dir):
with open(single_file_template_path) as f:
template = f.read()
analysis_files_dir = analysis_dir / "files"
analysis_paths = list(analysis_files_dir.glob("**/*.json.zip"))
print("Write report pages...")
for i, analysis_path in enumerate(analysis_paths):
relative_path = analysis_path.relative_to(analysis_files_dir)
relative_path = Path(str(relative_path)[: -len(".json.zip")])
source_path = "/" / relative_path
report_path = Path(str(report_dir / "files" / relative_path) + ".html")
index_page_link = "../" * len(relative_path.parents) + "index.html"
build_report_for_source_file(template, source_path, analysis_path, report_path, index_page_link)
print_updateable_line("[{}/{}] written: {}".format(i + 1, len(analysis_paths), report_path))
print()
def build_report_for_source_file(template_str, source_path, analysis_path, report_path, index_page_link):
result = template_str
result = result.replace("TITLE", source_path.name)
result = result.replace("INDEX_PAGE_LINK", index_page_link)
result = result.replace("SOURCE_FILE_PATH", str(source_path))
result = result.replace("SOURCE_CODE", file_to_compressed_base64(source_path))
result = result.replace("ANALYSIS_DATA", zip_file_to_compressed_base64(analysis_path))
report_path.parent.mkdir(parents=True, exist_ok=True)
with open(report_path, "w") as f:
f.write(result)
def file_to_compressed_base64(file_path):
with open(file_path, "rb") as f:
text = f.read()
return bytes_to_compressed_base64(text)
def zip_file_to_compressed_base64(zip_file_path):
file_name = zip_file_path.with_suffix("").name
with zipfile.ZipFile(zip_file_path) as zip_file:
with zip_file.open(file_name) as f:
data = f.read()
return bytes_to_compressed_base64(data)
def bytes_to_compressed_base64(data):
data = gzip.compress(data)
data = base64.b64encode(data)
data = data.decode("utf-8")
return data

View File

@@ -0,0 +1,670 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Blender Code Coverage</title>
<!-- Libraries for tooltips. -->
<script
src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/2.11.8/umd/popper.min.js"
integrity="sha512-TPh2Oxlg1zp+kz3nFA0C5vVC6leG/6mm1z9+mA81MI5eaUVqasPLO8Cuk4gMF4gUfP5etR73rgU/8PNMsSesoQ=="
crossorigin="anonymous"
referrerpolicy="no-referrer"
></script>
<link
rel="stylesheet"
href="https://cdnjs.cloudflare.com/ajax/libs/tippy.js/6.3.7/tippy.min.css"
integrity="sha512-HbPh+j4V7pXprvQMt2dtmK/zCEsUeZWYXRln4sOwmoyHPQAPqy/k9lIquKUyKNpNbDGAY06UdiDHcEkBc72yCQ=="
crossorigin="anonymous"
referrerpolicy="no-referrer"
/>
<script
src="https://cdnjs.cloudflare.com/ajax/libs/tippy.js/6.3.7/tippy.umd.min.js"
integrity="sha512-2TtfktSlvvPzopzBA49C+MX6sdc7ykHGbBQUTH8Vk78YpkXVD5r6vrNU+nOmhhl1MyTWdVfxXdZfyFsvBvOllw=="
crossorigin="anonymous"
referrerpolicy="no-referrer"
></script>
<style>
* {
padding: 0;
margin: 0;
border: 0;
font-family: monospace;
overflow: visible;
}
body {
background: #2b2b2b;
}
#summary {
color: #e1dfcc;
border-bottom: #535353 1px solid;
padding: 0.5em;
font-size: large;
font-family: monospace;
}
a {
color: #e1dfcc;
text-decoration: none;
}
a:hover {
color: #89dbdb;
}
.row {
padding-bottom: 0.25em;
padding-top: 0.25em;
min-width: 1000px;
width: 100%;
}
.row-label {
margin-left: 1em;
cursor: pointer;
display: inline-block;
}
.open-dir-label::before {
content: "";
display: inline-block;
width: 0.5em;
height: 0.5em;
background-color: #e1dfcc;
clip-path: polygon(0% 0%, 100% 0%, 50% 100%);
margin-right: 0.5em;
}
.closed-dir-label::before {
content: "";
display: inline-block;
width: 0.5em;
height: 0.5em;
background-color: #e1dfcc;
clip-path: polygon(0% 0%, 100% 50%, 0% 100%);
margin-right: 0.5em;
}
#files-tree-view {
color: #d4d0ab;
width: 100%;
}
.row-stats {
display: inline-block;
margin-left: 1em;
}
.lines-percent-row {
display: inline-block;
text-align: right;
width: 3em;
}
.lines-total-row {
display: inline-block;
text-align: right;
font-size: smaller;
color: #7d7d7d;
margin-left: 1em;
width: 7em;
}
.odd {
background-color: rgba(0, 0, 0, 0.15);
}
.tippy-box {
background-color: #18110d;
border-radius: 3px;
}
.tippy-arrow {
color: #18110d;
}
.tippy-content {
padding: 0.3em;
}
</style>
</head>
<body>
<div id="summary">
<p>Code Coverage Report</p>
<br />
<p>Files: <span id="coverage-files"></span></p>
<p>Functions: <span id="coverage-functions"></span></p>
<p>Lines: <span id="coverage-lines"></span></p>
</div>
<div id="files-tree-view"></div>
<div id="file-row-tooltip-template" style="display: none">
<p>Functions: FUNCTIONS</p>
<p>Lines: LINES</p>
</div>
<div id="directory-row-tooltip-template" style="display: none">
<p>Functions: FUNCTIONS</p>
<p>Lines: LINES</p>
<br />
<p><a href="FILTER_LINK">FILTER_TEXT</a></p>
</div>
<script>
window.addEventListener("DOMContentLoaded", async () => {
analysis_data = JSON.parse(await str_from_gzip_base64(analysis_data_compressed_base64));
filter_analysis_data();
const consolidated_tree = build_consolidated_tree();
root_row = build_row_data(consolidated_tree);
initialize_coverage_counts();
initialize_global_overview();
if (root_row.children_map.size == 0) {
return;
}
let paths_to_open = get_startup_paths_to_open();
for (const path of paths_to_open) {
const row = row_by_path.get(path);
if (row) {
ensure_dom_for_row(row);
open_directory(row);
}
}
update_odd_even_rows();
const scroll_position = localStorage.getItem("scroll_position");
if (scroll_position) {
window.scrollTo(0, scroll_position);
}
});
function build_consolidated_tree() {
// Build a tree where each directory is still separate.
const root_children = new Map();
for (const file_path of Object.keys(analysis_data.files)) {
let current = root_children;
for (const part of file_path.split("/").slice(1)) {
if (!current.has(part)) {
current.set(part, new Map());
}
current = current.get(part);
}
}
// Based on the tree above, build a new tree that has multiple directory levels
// joined together if there are directories with only one child.
function consolidate_recursive(name, children) {
if (children.size === 0) {
return { name: name };
}
const new_children = new Map();
for (const [child_name, child_children] of children.entries()) {
const new_child = consolidate_recursive(child_name, child_children);
new_children.set(new_child.name, new_child);
}
if (new_children.size >= 2) {
return { name: name, children: new_children };
}
const single_child = new_children.entries().next().value[1];
const joined_name = (name ? name + "/" : "") + single_child.name;
if (!single_child.children) {
return { name: joined_name };
}
return {
name: joined_name,
children: single_child.children,
};
}
const consolidated_root = consolidate_recursive("", root_children);
if (!consolidated_root.name.startsWith("/")) {
consolidated_root.name = "/" + consolidated_root.name;
}
return consolidated_root;
}
// Builds a tree whereby each node corresponds to a row.
function build_row_data(consolidated_tree) {
function create_row_data(parent, data) {
const name = data.name;
let path;
if (parent) {
if (parent.path == "/") {
path = `/${name}`;
} else {
path = `${parent.path}/${name}`;
}
} else {
path = `${name}`;
}
const row = {
num_lines_run: 0,
num_lines: 0,
num_functions_run: 0,
num_functions: 0,
path: path,
parent: parent,
name: name,
depth: parent ? parent.depth + 1 : 0,
children_map: new Map(),
sorted_children: [],
is_file: !data.children,
has_directory_separator: name.includes("/"),
dom_elem: null,
dom_children_elem: null,
dom_label_elem: null,
};
row_by_path.set(row.path, row);
if (parent) {
parent.children_map.set(name, row);
}
return row;
}
function build_rows_recursive(data, parent_row) {
const row = create_row_data(parent_row, data);
if (data.children) {
for (const child of data.children.values()) {
build_rows_recursive(child, row);
}
// Sort children so that directories come first.
const directory_children = [];
const file_children = [];
for (const child_row of row.children_map.values()) {
if (child_row.has_directory_separator || !child_row.is_file) {
directory_children.push(child_row);
} else {
file_children.push(child_row);
}
}
directory_children.sort((a, b) => a.name.localeCompare(b.name));
file_children.sort((a, b) => a.name.localeCompare(b.name));
row.sorted_children = [...directory_children, ...file_children];
}
return row;
}
return build_rows_recursive(consolidated_tree, null);
}
function initialize_coverage_counts() {
// Initialize the counts at the leaf rows, i.e. the source files.
for (const [file_path, file_data] of Object.entries(analysis_data.files)) {
const row = row_by_path.get(file_path);
row.num_lines = file_data.num_lines;
row.num_lines_run = file_data.num_lines_run;
row.num_functions = file_data.num_functions;
row.num_functions_run = file_data.num_functions_run;
}
// Recursively propagate the counts up until the root directory.
function count_directory_file_lines_recursive(row) {
if (row.is_file) {
return;
}
for (const child_row of row.children_map.values()) {
count_directory_file_lines_recursive(child_row);
row.num_lines += child_row.num_lines;
row.num_lines_run += child_row.num_lines_run;
row.num_functions += child_row.num_functions;
row.num_functions_run += child_row.num_functions_run;
}
}
count_directory_file_lines_recursive(root_row);
}
function get_startup_paths_to_open() {
let paths_to_open = [];
if (custom_root_paths) {
paths_to_open = paths_to_open.concat(Array.from(custom_root_paths.values()));
}
if (previous_open_paths) {
paths_to_open = paths_to_open.concat(Array.from(previous_open_paths));
}
if (paths_to_open.length == 0) {
paths_to_open.push(get_fallback_open_path());
}
return paths_to_open;
}
function get_fallback_open_path() {
for (const [file_path, file_data] of Object.entries(analysis_data.files)) {
// Used to find which path should be opened by default if there is no other information available.
const default_path_index = file_path.indexOf(fallback_default_path_segment);
if (default_path_index != -1) {
// Used to find which path should be opened by default if there is no other information available.
return file_path.substr(0, default_path_index + fallback_default_path_segment.length);
}
}
return root_row.name;
}
function initialize_global_overview() {
document.getElementById("coverage-files").innerText = Object.keys(
analysis_data.files
).length.toLocaleString();
document.getElementById(
"coverage-lines"
).innerText = `${root_row.num_lines_run.toLocaleString()} / ${root_row.num_lines.toLocaleString()}`;
document.getElementById(
"coverage-functions"
).innerText = `${root_row.num_functions_run.toLocaleString()} / ${root_row.num_functions.toLocaleString()}`;
}
// Makes sure that the html elements for a specific row (and all its parents) have been created.
// This data is gnerated lazily to improve start-up time.
function ensure_dom_for_row(row) {
if (row.dom_elem) {
return;
}
const parent = row.parent;
if (parent) {
ensure_dom_for_row(parent);
for (const child_row of parent.sorted_children) {
create_row_dom_elements(child_row);
parent.dom_children_elem.appendChild(child_row.dom_elem);
if (child_row.dom_children_elem) {
parent.dom_children_elem.appendChild(child_row.dom_children_elem);
}
}
} else {
create_row_dom_elements(row);
const tree_view = document.getElementById("files-tree-view");
tree_view.appendChild(row.dom_elem);
tree_view.appendChild(row.dom_children_elem);
}
}
function create_row_dom_elements(row) {
const name = row.name;
const row_elem = document.createElement("div");
row_elem.classList.add("row");
row.dom_elem = row_elem;
row_elem.row_data = row;
const stats_elem = document.createElement("span");
const label_elem = document.createElement("span");
row_elem.appendChild(stats_elem);
row_elem.appendChild(label_elem);
row.dom_stats_elem = stats_elem;
row.dom_label_elem = label_elem;
label_elem.className = "row-label";
let left_padding = row.depth;
if (row.is_file && row.has_directory_separator) {
// Add padding because this element does not have the open-directory icon.
left_padding += 1;
}
label_elem.style.paddingLeft = `${left_padding}em`;
add_row_tooltip(row);
stats_elem.className = "row-stats";
{
const lines_percent_elem = document.createElement("span");
stats_elem.appendChild(lines_percent_elem);
lines_percent_elem.className = "lines-percent-row";
if (row.num_lines == 0) {
lines_percent_elem.style.color = "rgb(137 137 137)";
lines_percent_elem.innerText = "-";
} else {
const lines_percent = ratio_to_percent(row.num_lines_run, row.num_lines);
lines_percent_elem.style.color = `color-mix(in hsl, rgb(240, 50, 50), rgb(50, 240, 50) ${lines_percent}%)`;
lines_percent_elem.innerText = `${lines_percent}%`;
}
const total_lines_elem = document.createElement("span");
total_lines_elem.className = "lines-total-row";
total_lines_elem.innerText = `${row.num_lines.toLocaleString()}`;
stats_elem.appendChild(total_lines_elem);
}
if (row.is_file) {
const link_elem = document.createElement("a");
link_elem.href = "./files" + row.path + ".html";
link_elem.innerText = name;
label_elem.appendChild(link_elem);
} else {
label_elem.innerText = name;
const children_container = document.createElement("div");
children_container.className = "children-container";
row.dom_children_elem = children_container;
label_elem.classList.add("closed-dir-label");
children_container.style.display = "none";
label_elem.addEventListener("click", () => {
if (row.dom_children_elem.style.display === "none") {
open_directory(row);
} else {
close_directory(row);
}
localStorage.setItem(
open_paths_storage_key,
JSON.stringify(Array.from(current_open_paths))
);
update_odd_even_rows();
});
}
}
function open_directory(directory_row) {
if (directory_row.parent) {
open_directory(directory_row.parent);
}
if (directory_row.sorted_children.length > 0) {
ensure_dom_for_row(directory_row.sorted_children[0]);
}
directory_row.dom_children_elem.style.display = "block";
current_open_paths.add(directory_row.path);
directory_row.dom_label_elem.classList.remove("closed-dir-label");
directory_row.dom_label_elem.classList.add("open-dir-label");
}
function close_directory(directory_row) {
directory_row.dom_children_elem.style.display = "none";
current_open_paths.delete(directory_row.path);
directory_row.dom_label_elem.classList.remove("open-dir-label");
directory_row.dom_label_elem.classList.add("closed-dir-label");
}
function update_odd_even_rows() {
let index = 0;
function update_odd_even_rows_recursive(row) {
if (index % 2) {
row.dom_elem.classList.add("odd");
} else {
row.dom_elem.classList.remove("odd");
}
index++;
if (!row.is_file) {
if (row.dom_children_elem.style.display !== "none") {
for (const child_row of row.sorted_children) {
update_odd_even_rows_recursive(child_row);
}
}
}
}
update_odd_even_rows_recursive(root_row);
}
function add_row_tooltip(row) {
const elems = [row.dom_stats_elem];
// It's annoying if the tooltip shows up on mobiles devices when toggling a directory.
if (!mobileAndTabletCheck()) {
elems.push(row.dom_label_elem);
}
tippy(elems, {
content: "Loading...",
onShow(instance) {
if (!instance.tooltip_generated) {
instance.setContent(generate_row_label_tooltip(row));
instance.tooltip_generated = true;
instance.show();
}
},
placement: "top",
arrow: false,
interactive: true,
followCursor: "initial",
maxWidth: "none",
delay: [400, 0],
});
}
function generate_row_label_tooltip(row) {
const template_id = row.is_file
? "file-row-tooltip-template"
: "directory-row-tooltip-template";
let template = document.getElementById(template_id).innerHTML;
template = template.replace(
"FUNCTIONS",
`${row.num_functions_run.toLocaleString()} / ${row.num_functions.toLocaleString()}`
);
template = template.replace(
"LINES",
`${row.num_lines_run.toLocaleString()} / ${row.num_lines.toLocaleString()}`
);
if (!row.is_file) {
let filter_text;
let filter_link;
if (custom_root_paths.includes(row.path)) {
filter_text = "Remove Filter";
filter_link = `./index.html`;
} else {
filter_text = "Filter Directory";
filter_link = `./index.html?filter=${encodeURIComponent(row.path)}`;
}
template = template.replace("FILTER_LINK", filter_link);
template = template.replace("FILTER_TEXT", filter_text);
}
const container_elem = document.createElement("div");
container_elem.innerHTML = template;
return container_elem;
}
function ratio_to_percent(numerator, denominator) {
return fraction_to_percent(ratio_to_fraction(numerator, denominator));
}
function ratio_to_fraction(numerator, denominator) {
if (denominator == 0) {
return 1;
}
return numerator / denominator;
}
function fraction_to_percent(f) {
if (f >= 1) {
return 100;
}
if (f >= 0.99) {
// Avoid showing 100% if there is still something missing.
return 99;
}
if (f <= 0) {
return 0;
}
if (f <= 0.01) {
// Avoid showing 0% if there is some coverage already.
return 1;
}
return Math.round(f * 100);
}
function filter_analysis_data() {
const new_analysis_files = {};
const new_analysis_data = { files: new_analysis_files };
if (custom_root_paths.length > 0) {
for (const [path, fdata] of Object.entries(analysis_data.files)) {
for (const filter_path of custom_root_paths) {
if (path.startsWith(filter_path)) {
new_analysis_files[path] = fdata;
}
}
}
} else {
Object.assign(new_analysis_files, analysis_data.files);
}
analysis_data = new_analysis_data;
}
async function str_from_gzip_base64(data_compressed_base64) {
const compressed = atob(data_compressed_base64);
const compressed_bytes = new Uint8Array(compressed.length);
for (let i = 0; i < compressed.length; i++) {
compressed_bytes[i] = compressed.charCodeAt(i);
}
const compressed_blob = new Blob([compressed_bytes]);
const stream = new Response(compressed_blob).body.pipeThrough(
new DecompressionStream("gzip")
);
const result = await new Response(stream).text();
return result;
}
// prettier-ignore
window.mobileAndTabletCheck = function() {
let check = false;
(function(a){if(/(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\.(browser|link)|vodafone|wap|windows ce|xda|xiino|android|ipad|playbook|silk/i.test(a)||/1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\-(n|u)|c55\/|capi|ccwa|cdm\-|cell|chtm|cldc|cmd\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\-s|devi|dica|dmob|do(c|p)o|ds(12|\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\-|_)|g1 u|g560|gene|gf\-5|g\-mo|go(\.w|od)|gr(ad|un)|haie|hcit|hd\-(m|p|t)|hei\-|hi(pt|ta)|hp( i|ip)|hs\-c|ht(c(\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\-(20|go|ma)|i230|iac( |\-|\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\/)|klon|kpt |kwc\-|kyo(c|k)|le(no|xi)|lg( g|\/(k|l|u)|50|54|\-[a-w])|libw|lynx|m1\-w|m3ga|m50\/|ma(te|ui|xo)|mc(01|21|ca)|m\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\-2|po(ck|rt|se)|prox|psio|pt\-g|qa\-a|qc(07|12|21|32|60|\-[2-7]|i\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\-|oo|p\-)|sdk\/|se(c(\-|0|1)|47|mc|nd|ri)|sgh\-|shar|sie(\-|m)|sk\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\-|v\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\-|tdg\-|tel(i|m)|tim\-|t\-mo|to(pl|sh)|ts(70|m\-|m3|m5)|tx\-9|up(\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\-|your|zeto|zte\-/i.test(a.substr(0,4))) check = true;})(navigator.userAgent||navigator.vendor||window.opera);
return check;
};
window.addEventListener("beforeunload", () => {
localStorage.setItem("scroll_position", window.scrollY);
});
// Maps from a path to the corresponding row data which contains information
// like the children, corresponding DOM elements, etc.
const row_by_path = new Map();
let root_row = null;
// Used to find which path should be opened by default if there is no other information available.
const fallback_default_path_segment = "/blender/source/blender";
// Get filters from URL.
let custom_root_paths = [];
const search_params = new URLSearchParams(location.search);
for (const [key, value] of search_params.entries()) {
if (key == "filter") {
custom_root_paths.push(value);
}
}
// Retrieve directories that have been open before to improve persistency
// when e.g. reloading the page.
const open_paths_storage_key = "open_paths";
let previous_open_paths = localStorage.getItem(open_paths_storage_key);
if (previous_open_paths && previous_open_paths.length > 0) {
previous_open_paths = new Set(JSON.parse(previous_open_paths));
} else {
previous_open_paths = undefined;
}
const current_open_paths = new Set();
// This data will be replaced by the script that builds the report. It still has to
// be uncompressed.
const analysis_data_compressed_base64 = "ANALYSIS_DATA";
// Uncompressed analysis data. Uncompressing is done a bit later in an async context.
let analysis_data = undefined;
</script>
</body>
</html>

View File

@@ -0,0 +1,315 @@
# SPDX-FileCopyrightText: 2024 Blender Authors
#
# SPDX-License-Identifier: GPL-2.0-or-later
import concurrent.futures
import json
import multiprocessing
import os
import random
import shutil
import subprocess
import sys
import textwrap
import time
import zipfile
from collections import defaultdict
from pathlib import Path
from pprint import pprint
from .util import print_updateable_line
def parse(build_dir, analysis_dir, gcov_binary="gcov"):
"""
Parses coverage data generated in the given directory, merges it, and stores
result in the analysis directory.
"""
build_dir = Path(build_dir).absolute()
analysis_dir = Path(analysis_dir).absolute()
gcov_path = get_gcov_path(gcov_binary)
if gcov_path is None or not gcov_path.exists():
raise RuntimeError("Gcov not found.")
gcda_paths = gather_gcda_files(build_dir)
if len(gcda_paths) == 0:
raise RuntimeError(
textwrap.dedent(
"""\
No .gcda files found. Make sure to run the tests in a debug build that has
been compiled with GCC with --coverage.
"""
)
)
# Invoke gcov many times in parallel to get the data in json format.
gcov_outputs = parse_gcda_files_with_gcov(gcda_paths, gcov_path)
gcov_by_source_file = collect_data_per_file(gcov_outputs)
if len(gcov_by_source_file) == 0:
raise RuntimeError("No coverage data found.")
# Sort files to make the progress report more useful.
source_file_order = list(sorted(list(gcov_by_source_file.keys())))
# Many object files may have collected data from the same source files.
data_by_source_file = merge_coverage_data(gcov_by_source_file, source_file_order)
# Generate summary for each file.
summary = compute_summary(data_by_source_file, source_file_order)
clear_old_analysis_on_disk(analysis_dir)
write_analysis_to_disk(analysis_dir, summary, data_by_source_file, source_file_order)
def get_gcov_path(gcov_binary):
if not Path(gcov_binary).is_file():
if gcov_path := shutil.which(gcov_binary):
return Path(gcov_path)
return None
return Path(gcov_binary).absolute()
def gather_gcda_files(build_dir):
print("Gather .gcda files...")
gcda_paths = []
for gcda_path in build_dir.glob("**/*.gcda"):
gcda_paths.append(gcda_path)
print_updateable_line("[{}]: {}".format(len(gcda_paths), gcda_path))
print()
return gcda_paths
def parse_gcda_files_with_gcov(gcda_paths, gcov_path):
# Shuffle to make chunks more similar in size.
random.shuffle(gcda_paths)
# Gcov can process multiple files in a single invocation. So split all the tasks into chunks
# to reduce the total number of required gcov invocations. The chunks should not be too large
# because then multi-threading is less useful.
chunk_size = 10
gcda_path_chunks = [gcda_paths[i: i + chunk_size] for i in range(0, len(gcda_paths), chunk_size)]
def parse_with_gcov(file_paths):
return subprocess.check_output([gcov_path, "--stdout", "--json-format", *file_paths])
print("Parse files...")
print_updateable_line("[0/{}] parsed.".format(len(gcda_paths)))
gcov_outputs = []
# Use multi-threading instead of multi-processing here because the actual work is actually done
# in separate gcov processes which run in parallel. Every gcov process is managed by a separate
# thread though. This does not seem strictly necessary, but was good enough and the easy.
with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count() * 2) as executor:
futures = {executor.submit(parse_with_gcov, file_paths): file_paths for file_paths in gcda_path_chunks}
done_count = 0
for future in concurrent.futures.as_completed(futures):
file_paths = futures[future]
done_count += len(file_paths)
try:
# Gcov outputs a line for each file that it processed.
for line in future.result().splitlines():
gcov_outputs.append(json.loads(line))
except Exception as e:
print("Error:", e)
print_updateable_line("[{}/{}] parsed.".format(done_count, len(gcda_paths)))
print()
return gcov_outputs
def collect_data_per_file(gcov_outputs):
gcov_by_source_file = defaultdict(list)
for data in gcov_outputs:
for file_data in data["files"]:
gcov_by_source_file[file_data["file"]].append(file_data)
return gcov_by_source_file
def merge_coverage_data(gcov_by_source_file, source_file_order):
print("Merge coverage data...")
data_by_source_file = {}
for i, file_path in enumerate(source_file_order):
print_updateable_line("[{}/{}] merged: {}".format(i + 1, len(gcov_by_source_file), file_path))
# For templated code, many functions may be generated for the same function in the source code.
# Often we want to merge data from these individual instantiations together though. It's hard
# to find the functions that belong together based on the name. However, we can use the source
# code location as a value that's common for all instantiations of the same function.
function_by_location = {}
# Sometimes lines don't have function information. Not sure what the exact rules are here.
# I found that this is sometimes the case for inline functions.
loose_lines = defaultdict(int)
# Maps a name of a specific function instantiation to it's source code location.
location_key_by_mangled_name = {}
# See the `--json-format` documentation to understand the input data:
# https://gcc.gnu.org/onlinedocs/gcc/Invoking-Gcov.html
for gcov_data in gcov_by_source_file[file_path]:
for gcov_function in gcov_data["functions"]:
start_line = gcov_line_number_to_index(gcov_function["start_line"])
end_line = gcov_line_number_to_index(gcov_function["end_line"])
start_column = gcov_function["start_column"]
end_column = gcov_function["end_column"]
# Build an identifier for the function that is common among all template instantiations.
location_key = "{}:{}-{}:{}".format(start_line, start_column, end_line, end_column)
if location_key not in function_by_location:
function_by_location[location_key] = {
"start_line": start_line,
"end_line": end_line,
"start_column": start_column,
"end_column": end_column,
"execution_count": 0,
"instantiations": {},
"lines": defaultdict(int),
}
mangled_name = gcov_function["name"]
demangled_name = gcov_function["demangled_name"]
execution_count = gcov_function["execution_count"]
location_key_by_mangled_name[mangled_name] = location_key
function = function_by_location[location_key]
function["execution_count"] += execution_count
if mangled_name not in function["instantiations"]:
function["instantiations"][mangled_name] = {
"demangled": demangled_name,
"execution_count": 0,
"lines": defaultdict(int),
}
function["instantiations"][mangled_name]["execution_count"] += execution_count
for gcov_line in gcov_data["lines"]:
line_index = gcov_line_number_to_index(gcov_line["line_number"])
count = gcov_line["count"]
mangled_name = gcov_line.get("function_name")
if mangled_name is None:
loose_lines[line_index] += gcov_line["count"]
else:
location_key = location_key_by_mangled_name[mangled_name]
function = function_by_location[location_key]
function["lines"][line_index] += count
instantiation = function["instantiations"][mangled_name]
instantiation["lines"][line_index] += count
data_by_source_file[file_path] = {
"file": file_path,
"functions": function_by_location,
"loose_lines": loose_lines,
}
print()
return data_by_source_file
def compute_summary(data_by_source_file, source_file_order):
print("Compute summaries...")
summary_by_source_file = {}
for i, file_path in enumerate(source_file_order):
data = data_by_source_file[file_path]
print_updateable_line("[{}/{}] written: {}".format(i + 1, len(data_by_source_file), file_path))
num_instantiated_lines = 0
num_instantiated_lines_run = 0
num_instantiated_functions = 0
num_instantiated_functions_run = 0
all_lines = set()
run_lines = set()
all_function_keys = set()
run_function_keys = set()
for function_key, fdata in data["functions"].items():
all_function_keys.add(function_key)
if fdata["execution_count"] > 0:
run_function_keys.add(function_key)
for line_index, execution_count in fdata["lines"].items():
all_lines.add(line_index)
if execution_count > 0:
run_lines.add(line_index)
for function_name, instantiation_fdata in fdata["instantiations"].items():
num_instantiated_functions += 1
if instantiation_fdata["execution_count"] > 0:
num_instantiated_functions_run += 1
for line_index, execution_count in instantiation_fdata["lines"].items():
num_instantiated_lines += 1
if execution_count > 0:
num_instantiated_lines_run += 1
for line_index, execution_count in data["loose_lines"].items():
num_instantiated_lines += 1
all_lines.add(line_index)
if execution_count > 0:
num_instantiated_lines_run += 1
run_lines.add(line_index)
summary_by_source_file[file_path] = {
"num_instantiated_lines": num_instantiated_lines,
"num_instantiated_lines_run": num_instantiated_lines_run,
"num_instantiated_functions": num_instantiated_functions,
"num_instantiated_functions_run": num_instantiated_functions_run,
"num_lines": len(all_lines),
"num_lines_run": len(run_lines),
"num_functions": len(all_function_keys),
"num_functions_run": len(run_function_keys),
}
print()
summary = {
"files": summary_by_source_file,
}
return summary
def clear_old_analysis_on_disk(analysis_dir):
print("Clear old analysis...")
try:
shutil.rmtree(analysis_dir)
except:
pass
def write_analysis_to_disk(analysis_dir, summary, data_by_source_file, source_file_order):
print("Write summary...")
write_dict_to_zip_file(analysis_dir / "summary.json.zip", summary)
print("Write per file analysis...")
for i, file_path in enumerate(source_file_order):
analysis_file_path = analysis_dir / "files" / Path(file_path).relative_to("/")
analysis_file_path = str(analysis_file_path) + ".json.zip"
data = data_by_source_file[file_path]
print_updateable_line("[{}/{}] written: {}".format(i + 1, len(data_by_source_file), analysis_file_path))
write_dict_to_zip_file(analysis_file_path, data)
print()
print("Parsed data written to {}.".format(analysis_dir))
def gcov_line_number_to_index(line_number):
# Gcov starts counting lines at 1.
return line_number - 1
def write_dict_to_zip_file(zip_file_path, data):
zip_file_path = Path(zip_file_path)
zip_file_path.parent.mkdir(parents=True, exist_ok=True)
# Was way faster to serialize first before writing to the file instead of using json.dump.
data_str = json.dumps(data)
name = zip_file_path.with_suffix("").name
with zipfile.ZipFile(zip_file_path, "w", compression=zipfile.ZIP_DEFLATED) as f:
f.writestr(name, data_str)

View File

@@ -0,0 +1,518 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>TITLE Coverage</title>
<!-- Libraries for code highlighting. -->
<link
rel="stylesheet"
href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.10.0/styles/default.min.css"
integrity="sha512-hasIneQUHlh06VNBe7f6ZcHmeRTLIaQWFd43YriJ0UND19bvYRauxthDg8E4eVNPm9bRUhr5JGeqH7FRFXQu5g=="
crossorigin="anonymous"
referrerpolicy="no-referrer"
/>
<link
rel="stylesheet"
href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.10.0/styles/a11y-dark.min.css"
integrity="sha512-Vj6gPCk8EZlqnoveEyuGyYaWZ1+jyjMPg8g4shwyyNlRQl6d3L9At02ZHQr5K6s5duZl/+YKMnM3/8pDhoUphg=="
crossorigin="anonymous"
referrerpolicy="no-referrer"
/>
<script
src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.10.0/highlight.min.js"
integrity="sha512-6yoqbrcLAHDWAdQmiRlHG4+m0g/CT/V9AGyxabG8j7Jk8j3r3K6due7oqpiRMZqcYe9WM2gPcaNNxnl2ux+3tA=="
crossorigin="anonymous"
referrerpolicy="no-referrer"
></script>
<script
src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.10.0/languages/cpp.min.js"
integrity="sha512-GvXk866wSSbg5H/SHQ41QgYaKtp98r/+3vEa6vz3vB1q6Jpt53hGeWUDHjFyflWk/vv1EcrdNY2ZNz/gWxFIsg=="
crossorigin="anonymous"
referrerpolicy="no-referrer"
></script>
<script
src="https://cdnjs.cloudflare.com/ajax/libs/highlightjs-line-numbers.js/2.6.0/highlightjs-line-numbers.min.js"
integrity="sha512-nkjLcPbHjdAof51b8uUd+6q4YH7YrMwh+kfTwSBrg5T/yMKrz8GUxM4uJJ1xAL7Q1lfAMIEowDsTzfWskZ5RcQ=="
crossorigin="anonymous"
referrerpolicy="no-referrer"
></script>
<script
src="https://cdn.jsdelivr.net/gh/TRSasasusu/highlightjs-highlight-lines.js@1.2.0/highlightjs-highlight-lines.min.js"
integrity="sha512-6wevP4KzPut+rTlItH5T2H7vOiy/E/GJIK7SDCiGoxO2gdqpYjRv0MhFbk72HuRbbexVZ6vqV++w82DwRdiGPw=="
crossorigin="anonymous"
referrerpolicy="no-referrer"
></script>
<!-- Libraries for tooltips. -->
<script
src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/2.11.8/umd/popper.min.js"
integrity="sha512-TPh2Oxlg1zp+kz3nFA0C5vVC6leG/6mm1z9+mA81MI5eaUVqasPLO8Cuk4gMF4gUfP5etR73rgU/8PNMsSesoQ=="
crossorigin="anonymous"
referrerpolicy="no-referrer"
></script>
<link
rel="stylesheet"
href="https://cdnjs.cloudflare.com/ajax/libs/tippy.js/6.3.7/tippy.min.css"
integrity="sha512-HbPh+j4V7pXprvQMt2dtmK/zCEsUeZWYXRln4sOwmoyHPQAPqy/k9lIquKUyKNpNbDGAY06UdiDHcEkBc72yCQ=="
crossorigin="anonymous"
referrerpolicy="no-referrer"
/>
<script
src="https://cdnjs.cloudflare.com/ajax/libs/tippy.js/6.3.7/tippy.umd.min.js"
integrity="sha512-2TtfktSlvvPzopzBA49C+MX6sdc7ykHGbBQUTH8Vk78YpkXVD5r6vrNU+nOmhhl1MyTWdVfxXdZfyFsvBvOllw=="
crossorigin="anonymous"
referrerpolicy="no-referrer"
></script>
<style>
* {
margin: 0;
padding: 0;
border: 0;
}
body {
background: #2b2b2b;
color: #e1dfcc;
}
#summary {
padding: 0.5em;
font-size: large;
font-family: monospace;
}
a {
color: #e1dfcc;
}
a:hover {
color: #89dbdb;
}
/* For block of numbers. */
.hljs-ln-numbers {
user-select: none;
text-align: right;
color: #ccc;
border-right: 1px solid #ccc;
vertical-align: top;
padding-right: 0.5em !important;
}
/* For block of code. */
.hljs-ln-code {
padding-left: 0.5em !important;
}
.line-call-count {
color: #f7f2f2;
font-size: 80%;
user-select: none;
}
.tippy-box {
background-color: #18110d;
border-radius: 3px;
}
.tippy-arrow {
color: #18110d;
}
.tippy-content {
max-width: calc(100vw - 20px);
overflow: auto;
padding: 0.3em;
}
table {
color: #e1dfcc;
font-family: monospace;
}
td {
padding: 0.2em;
padding-left: 1em;
}
hr {
border-top: #535353 1px solid;
}
.function-name-in-table {
cursor: pointer;
}
.function-name-in-table:hover {
color: #89dbdb;
}
.lines-in-function {
font-size: smaller;
color: #7d7d7d;
}
</style>
</head>
<body>
<div id="summary">
<p>File: SOURCE_FILE_PATH</p>
<p><a href="INDEX_PAGE_LINK">Back to Overview</a></p>
<br />
<p>Lines: <span id="coverage-lines"></span></p>
<p>Functions: <span id="coverage-functions"></span></p>
</div>
<hr />
<table id="functions-overview"></table>
<hr />
<pre><code class="language-cpp" id="source-code"></code></pre>
<!-- Support scrolling past the end. -->
<div style="height: 90vh"></div>
<script>
window.addEventListener("DOMContentLoaded", async () => {
source_code = await str_from_gzip_base64(source_code_compressed_base64);
analysis_data = JSON.parse(await str_from_gzip_base64(analysis_data_compressed_base64));
document.getElementById("source-code").innerHTML = escapeHtml(source_code);
prepare_analysis_data();
generate_functions_table();
hljs.highlightAll();
hljs.initLineNumbersOnLoad();
set_line_background_colors();
// Need to delay here in case the line background colors are not set immediately.
const interval = window.setInterval(() => {
if (document.querySelector(".hljs-ln-code .hljs-ln-line")) {
clearInterval(interval);
create_overview();
augment_lines();
scroll_to_line_from_url();
}
}, 10);
});
function escapeHtml(unsafe) {
return unsafe
.replace(/&/g, "&amp;")
.replace(/</g, "&lt;")
.replace(/>/g, "&gt;")
.replace(/"/g, "&quot;")
.replace(/'/g, "&#039;");
}
function prepare_analysis_data() {
for (const [function_key, fdata] of Object.entries(analysis_data.functions)) {
num_functions++;
if (fdata.execution_count > 0) {
num_run_functions++;
}
for (const [line_index_str, count] of Object.entries(fdata.lines)) {
const line_index = parseInt(line_index_str);
count_by_line.set(line_index, count);
num_executable_lines++;
if (count > 0) {
num_run_lines++;
}
}
}
for (const [line_index_str, count] of Object.entries(analysis_data.loose_lines)) {
const line_index = parseInt(line_index_str);
count_by_line.set(line_index, count);
num_executable_lines++;
if (count > 0) {
num_run_lines++;
}
}
}
function set_line_background_colors() {
const line_colors = [];
for (let [line_index, hit_count] of count_by_line.entries()) {
line_colors.push({
start: line_index + 1,
end: line_index + 1,
color: hit_count_to_color(hit_count),
});
}
hljs.highlightLinesAll([line_colors]);
}
function hit_count_to_color(hit_count) {
if (hit_count == 0) {
return "rgb(65 48 48)";
}
return "rgb(42 53 42)";
}
function augment_lines() {
code_line_elements = Array.from(document.querySelectorAll(".hljs-ln-code .hljs-ln-line"));
for (let i = 0; i < code_line_elements.length; i++) {
const line_element = code_line_elements[i];
line_element.parentElement.parentElement.id = `source-line-${i + 1}`;
}
for (let i = 0; i < code_line_elements.length; i++) {
const line_element = code_line_elements[i];
const hit_count = count_by_line.get(i);
if (!hit_count) {
continue;
}
const call_count_element = document.createElement("span");
call_count_element.className = "line-call-count";
call_count_element.innerText = ` ${hit_count.toLocaleString()}x`;
line_element.appendChild(call_count_element);
add_executed_line_tooltip(call_count_element, i);
}
}
function add_executed_line_tooltip(elem, line_index) {
tippy(elem, {
content: "Loading...",
onShow(instance) {
if (!instance.tooltip_generated) {
instance.setContent(generate_executed_line_tooltip(line_index));
instance.tooltip_generated = true;
instance.show();
}
},
placement: "bottom",
arrow: true,
interactive: true,
maxWidth: "none",
delay: [400, 0],
});
}
function generate_executed_line_tooltip(query_line_index) {
const instantiations = [];
for (const [function_key, fdata] of Object.entries(analysis_data.functions)) {
if (query_line_index < fdata.start_line || fdata.end_line < query_line_index) {
continue;
}
for (const [instantiation_name, idata] of Object.entries(fdata.instantiations)) {
for (const [line_index_str, count] of Object.entries(idata.lines)) {
if (count > 0) {
if (parseInt(line_index_str) == query_line_index) {
instantiations.push({ name: idata.demangled, count: count });
}
}
}
}
}
if (instantiations.length === 0) {
return "No used instantiations";
}
const container = document.createElement("div");
const header = document.createElement("h4");
header.innerText = "Used Instantiations";
container.appendChild(header);
instantiations.sort((a, b) => b.count - a.count);
const max_count = instantiations[0].count;
const num_count_chars = max_count.toLocaleString().length;
for (const { name, count } of instantiations) {
if (count == 0) {
continue;
}
const elem = document.createElement("pre");
const count_label = count.toLocaleString().padStart(num_count_chars, " ");
const escaped_name = escapeHtml(name);
elem.innerHTML = `${count_label}x ${escaped_name}`;
container.appendChild(elem);
}
return container;
}
function create_overview() {
document.getElementById(
"coverage-lines"
).innerText = `${num_run_lines} / ${num_executable_lines} (${ratio_to_percent(
num_run_lines,
num_executable_lines
)}%)`;
document.getElementById(
"coverage-functions"
).innerText = `${num_run_functions} / ${num_functions} (${ratio_to_percent(
num_run_functions,
num_functions
)}%)`;
}
function generate_functions_table() {
const table = document.getElementById("functions-overview");
const fdata_array = Array.from(Object.values(analysis_data.functions));
fdata_array.sort((a, b) => b.execution_count - a.execution_count);
for (const fdata of fdata_array) {
const row_elem = document.createElement("tr");
table.appendChild(row_elem);
const percentage_elem = document.createElement("td");
percentage_elem.style.textAlign = "right";
const lines_elem = document.createElement("td");
lines_elem.style.textAlign = "right";
const count_elem = document.createElement("td");
count_elem.style.textAlign = "right";
const name_elem = document.createElement("td");
row_elem.appendChild(percentage_elem);
row_elem.appendChild(lines_elem);
row_elem.appendChild(count_elem);
row_elem.appendChild(name_elem);
const execution_counts_array = Array.from(Object.values(fdata.lines));
const num_lines = execution_counts_array.length;
const num_lines_run = execution_counts_array.filter((x) => x > 0).length;
const lines_percent = ratio_to_percent(num_lines_run, num_lines);
count_elem.innerText = `${fdata.execution_count.toLocaleString()}x`;
percentage_elem.innerText = `${lines_percent}%`;
percentage_elem.style.color = `color-mix(in hsl, rgb(240, 50, 50), rgb(50, 240, 50) ${lines_percent}%)`;
lines_elem.innerText = `${num_lines}`;
lines_elem.className = "lines-in-function";
name_elem.classList.add("function-name-in-table");
const first_instantiation_name = Array.from(Object.values(fdata.instantiations))[0]
.demangled;
const base_name = extract_base_function_name(first_instantiation_name);
name_elem.innerText = base_name;
add_executed_line_tooltip(count_elem, fdata.start_line);
add_simple_tooltip(percentage_elem, "Line Coverage");
add_simple_tooltip(lines_elem, "Number of Lines");
name_elem.addEventListener("click", () => {
const line_number = fdata.start_line + 1;
const id = `source-line-${line_number}`;
history.pushState({ scroll_from: window.scrollY }, "");
document.getElementById(id).scrollIntoView({
behavior: "smooth",
});
});
}
}
function add_simple_tooltip(element, message) {
const content = document.createElement("pre");
content.innerText = message;
tippy(element, { content: content, delay: 400 });
}
function extract_base_function_name(instantiation_name) {
// Remove parts in parenthesis and templates.
let name = "";
let template_depth = 0;
let braces_depth = 0;
let arguments_start = -1;
for (const c of instantiation_name) {
if (c == "{") {
braces_depth++;
} else if (c == "}") {
braces_depth--;
}
if (c == "(" && template_depth === 0 && braces_depth === 0 && arguments_start === -1) {
arguments_start = name.length;
}
if (c == "<" && !(name.endsWith("::operator") || name.endsWith("::operator<"))) {
template_depth++;
} else if (c == ">" && !(name.endsWith("::operator") || name.endsWith("::operator>"))) {
template_depth--;
if (template_depth === 0) {
name += "<>";
}
} else if (template_depth == 0) {
name += c;
}
}
// Remove argument list.
if (arguments_start >= 0) {
const arguments_end = name.lastIndexOf(")");
name = name.substring(0, arguments_start) + name.substring(arguments_end + 1);
}
// Fix case for operator().
if (name.endsWith("::operator")) {
name += "()";
}
return name;
}
function scroll_to_line_from_url() {
if (window.location.hash.startsWith("#source-line-")) {
const id = window.location.hash.slice(1);
const elem = document.getElementById(id);
elem.scrollIntoView();
}
}
function ratio_to_percent(numerator, denominator) {
return fraction_to_percent(ratio_to_fraction(numerator, denominator));
}
function ratio_to_fraction(numerator, denominator) {
if (denominator == 0) {
return 1;
}
return numerator / denominator;
}
function fraction_to_percent(f) {
if (f >= 1) {
return 100;
}
if (f >= 0.99) {
// Avoid showing 100% if there is still something missing.
return 99;
}
if (f <= 0) {
return 0;
}
if (f <= 0.01) {
// Avoid showing 0% if there is some coverage already.
return 1;
}
return Math.round(f * 100);
}
async function str_from_gzip_base64(data_compressed_base64) {
const compressed = atob(data_compressed_base64);
const compressed_bytes = new Uint8Array(compressed.length);
for (let i = 0; i < compressed.length; i++) {
compressed_bytes[i] = compressed.charCodeAt(i);
}
const compressed_blob = new Blob([compressed_bytes]);
const stream = new Response(compressed_blob).body.pipeThrough(
new DecompressionStream("gzip")
);
const result = await new Response(stream).text();
return result;
}
const count_by_line = new Map();
let num_executable_lines = 0;
let num_run_lines = 0;
let num_functions = 0;
let num_run_functions = 0;
const source_code_compressed_base64 = "SOURCE_CODE";
const analysis_data_compressed_base64 = "ANALYSIS_DATA";
// Will be decompressed a bit later.
let source_code = undefined;
let analysis_data = undefined;
// Initialized when line counts have been added.
let code_line_elements = undefined;
</script>
</body>
</html>

View File

@@ -0,0 +1,13 @@
# SPDX-FileCopyrightText: 2024 Blender Authors
#
# SPDX-License-Identifier: GPL-2.0-or-later
last_line_length = 0
def print_updateable_line(data):
global last_line_length
print(" " * last_line_length, end="\r")
print(data, end="\r")
last_line_length = len(data)