diff --git a/CMakeLists.txt b/CMakeLists.txt index 68e01b75bbc..27de11815ab 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -965,6 +965,28 @@ Can cause linking issues due to too large binary size." ) mark_as_advanced(WITH_COMPILER_ASAN_EXTERN) +option(WITH_COMPILER_CODE_COVERAGE "\ +Build and link with code coverage support (only for Debug targets)." +OFF +) +mark_as_advanced(WITH_COMPILER_CODE_COVERAGE) + +if(WITH_COMPILER_CODE_COVERAGE) + if(NOT CMAKE_COMPILER_IS_GNUCC) + message("WITH_COMPILER_CODE_COVERAGE only works with GCC currently.") + set(WITH_COMPILER_CODE_COVERAGE OFF) + endif() +endif() + +if(WITH_COMPILER_CODE_COVERAGE) + set(_code_coverage_defaults "--coverage") + set(COMPILER_CODE_COVERAGE_CFLAGS ${_code_coverage_defaults} CACHE STRING "C flags for code coverage") + mark_as_advanced(COMPILER_CODE_COVERAGE_CFLAGS) + set(COMPILER_CODE_COVERAGE_CXXFLAGS ${_code_coverage_defaults} CACHE STRING "C++ flags for code coverage") + mark_as_advanced(COMPILER_CODE_COVERAGE_CXXFLAGS) + unset(_code_coverage_defaults) +endif() + if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_C_COMPILER_ID MATCHES "Clang") if(WITH_COMPILER_ASAN) set(_asan_defaults "\ @@ -1360,6 +1382,11 @@ set(PLATFORM_LINKFLAGS_DEBUG "") set(PLATFORM_LINKFLAGS_RELEASE "") set(PLATFORM_LINKFLAGS_EXECUTABLE "") +if(WITH_COMPILER_CODE_COVERAGE) + string(APPEND CMAKE_C_FLAGS_DEBUG " ${COMPILER_CODE_COVERAGE_CFLAGS}") + string(APPEND CMAKE_CXX_FLAGS_DEBUG " ${COMPILER_CODE_COVERAGE_CXXFLAGS}") +endif() + if(NOT CMAKE_BUILD_TYPE MATCHES "Release") if(WITH_COMPILER_ASAN) if(NOT APPLE) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 00c96eeda54..dce54ffdcf5 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -69,3 +69,17 @@ endif() # GTest add_subdirectory(gtests) + +if(WITH_COMPILER_CODE_COVERAGE) + set(COVERAGE_SCRIPT_PATH ${CMAKE_CURRENT_SOURCE_DIR}/coverage/coverage.py) + + add_custom_target(coverage-report + ${PYTHON_EXECUTABLE} ${COVERAGE_SCRIPT_PATH} report --build-directory ${CMAKE_BINARY_DIR} + USES_TERMINAL + ) + + add_custom_target(coverage-reset + ${PYTHON_EXECUTABLE} ${COVERAGE_SCRIPT_PATH} reset --build-directory ${CMAKE_BINARY_DIR} + USES_TERMINAL + ) +endif() diff --git a/tests/coverage/coverage.py b/tests/coverage/coverage.py new file mode 100755 index 00000000000..f1491b4a9a8 --- /dev/null +++ b/tests/coverage/coverage.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 + +# SPDX-FileCopyrightText: 2024 Blender Authors +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import argparse +import os +import shutil +import sys +import textwrap +import webbrowser + +from coverage_report import parse, report_as_html +from coverage_report.util import print_updateable_line +from pathlib import Path + +usage = textwrap.dedent( + """\ + coverage.py [] + + Commands: + report Analyse coverage data and generate html report. + [--build-directory] Blender build directory. This will be scanned for .gcda files. + [--no-browser] Don't open the browser in the end. + reset [--build-directory] Delete .gcda files. + help Show this help. + """ +) + + +def main(): + parser = argparse.ArgumentParser(description="Blender test coverage", usage=usage) + + parser.add_argument("command", nargs="?", default="help") + args = parser.parse_args(sys.argv[1:2]) + command = args.command + + argv = sys.argv[2:] + + if command == "report": + run_report(argv) + elif command == "reset": + run_reset(argv) + elif command == "help": + print(usage) + else: + print("Unknown command: {}".format(command)) + sys.exit(1) + + +def run_report(argv): + parser = argparse.ArgumentParser(usage=usage) + parser.add_argument("--build-directory", type=str, default=".") + parser.add_argument("--no-browser", action="store_true", default=False) + args = parser.parse_args(argv) + + build_dir = Path(args.build_directory).absolute() + if not is_blender_build_directory(build_dir): + print("Directory does not seem to be a Blender build directory.") + sys.exit(1) + + coverage_dir = build_dir / "coverage" + analysis_dir = coverage_dir / "analysis" + report_dir = coverage_dir / "report" + + parse(build_dir, analysis_dir) + report_as_html(analysis_dir, report_dir) + + if not args.no_browser: + webbrowser.open("file://" + str(report_dir / "index.html")) + + +def run_reset(argv): + parser = argparse.ArgumentParser(usage=usage) + parser.add_argument("--build-directory", type=str, default=".") + args = parser.parse_args(argv) + + build_dir = Path(args.build_directory).absolute() + if not is_blender_build_directory(build_dir): + print("Directory does not seem to be a Blender build directory.") + sys.exit(1) + + print("Remove .gcda files...") + gcda_files = list(build_dir.glob("**/*.gcda")) + for i, path in enumerate(gcda_files): + print_updateable_line("[{}/{}] Remove: {}".format(i + 1, len(gcda_files), path)) + os.remove(path) + print() + + +def is_blender_build_directory(build_dir): + return (Path(build_dir) / "CMakeCache.txt").exists() + + +if __name__ == '__main__': + main() diff --git a/tests/coverage/coverage_report/__init__.py b/tests/coverage/coverage_report/__init__.py new file mode 100644 index 00000000000..f49a7eed245 --- /dev/null +++ b/tests/coverage/coverage_report/__init__.py @@ -0,0 +1,6 @@ +# SPDX-FileCopyrightText: 2024 Blender Authors +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from .build_report import report_as_html +from .parse import parse diff --git a/tests/coverage/coverage_report/build_report.py b/tests/coverage/coverage_report/build_report.py new file mode 100644 index 00000000000..ccf67227e59 --- /dev/null +++ b/tests/coverage/coverage_report/build_report.py @@ -0,0 +1,108 @@ +# SPDX-FileCopyrightText: 2024 Blender Authors +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import base64 +import gzip +import html +import json +import shutil +import textwrap +import zipfile + +from .util import print_updateable_line +from collections import defaultdict +from pathlib import Path +from pprint import pprint + +index_template_path = Path(__file__).parent / "index_template.html" +single_file_template_path = Path(__file__).parent / "single_file_template.html" + + +def report_as_html(analysis_dir, report_dir): + analysis_dir = Path(analysis_dir).absolute() + report_dir = Path(report_dir).absolute() + + if not analysis_dir.exists(): + raise RuntimeError("Missing analysis at: {}".format(analysis_dir)) + + try: + shutil.rmtree(report_dir) + except: + pass + + build_summary(analysis_dir, report_dir) + build_file_pages(analysis_dir, report_dir) + print("Report written to {}.".format(report_dir / "index.html")) + + +def build_summary(analysis_dir, report_dir): + print("Write index...") + with open(index_template_path) as f: + template = f.read() + + result = template + result = result.replace( + "ANALYSIS_DATA", + zip_file_to_compressed_base64(analysis_dir / "summary.json.zip"), + ) + + report_summary_path = report_dir / "index.html" + report_summary_path.parent.mkdir(parents=True, exist_ok=True) + with open(report_summary_path, "w") as f: + f.write(result) + + +def build_file_pages(analysis_dir, report_dir): + with open(single_file_template_path) as f: + template = f.read() + + analysis_files_dir = analysis_dir / "files" + analysis_paths = list(analysis_files_dir.glob("**/*.json.zip")) + + print("Write report pages...") + for i, analysis_path in enumerate(analysis_paths): + relative_path = analysis_path.relative_to(analysis_files_dir) + relative_path = Path(str(relative_path)[: -len(".json.zip")]) + source_path = "/" / relative_path + report_path = Path(str(report_dir / "files" / relative_path) + ".html") + index_page_link = "../" * len(relative_path.parents) + "index.html" + + build_report_for_source_file(template, source_path, analysis_path, report_path, index_page_link) + + print_updateable_line("[{}/{}] written: {}".format(i + 1, len(analysis_paths), report_path)) + print() + + +def build_report_for_source_file(template_str, source_path, analysis_path, report_path, index_page_link): + result = template_str + result = result.replace("TITLE", source_path.name) + result = result.replace("INDEX_PAGE_LINK", index_page_link) + result = result.replace("SOURCE_FILE_PATH", str(source_path)) + result = result.replace("SOURCE_CODE", file_to_compressed_base64(source_path)) + result = result.replace("ANALYSIS_DATA", zip_file_to_compressed_base64(analysis_path)) + + report_path.parent.mkdir(parents=True, exist_ok=True) + with open(report_path, "w") as f: + f.write(result) + + +def file_to_compressed_base64(file_path): + with open(file_path, "rb") as f: + text = f.read() + return bytes_to_compressed_base64(text) + + +def zip_file_to_compressed_base64(zip_file_path): + file_name = zip_file_path.with_suffix("").name + with zipfile.ZipFile(zip_file_path) as zip_file: + with zip_file.open(file_name) as f: + data = f.read() + return bytes_to_compressed_base64(data) + + +def bytes_to_compressed_base64(data): + data = gzip.compress(data) + data = base64.b64encode(data) + data = data.decode("utf-8") + return data diff --git a/tests/coverage/coverage_report/index_template.html b/tests/coverage/coverage_report/index_template.html new file mode 100644 index 00000000000..e2496c64b34 --- /dev/null +++ b/tests/coverage/coverage_report/index_template.html @@ -0,0 +1,670 @@ + + + + + + Blender Code Coverage + + + + + + + + + +
+

Code Coverage Report

+
+

Files:

+

Functions:

+

Lines:

+
+
+ + + + + diff --git a/tests/coverage/coverage_report/parse.py b/tests/coverage/coverage_report/parse.py new file mode 100644 index 00000000000..c699a7d4dc1 --- /dev/null +++ b/tests/coverage/coverage_report/parse.py @@ -0,0 +1,315 @@ +# SPDX-FileCopyrightText: 2024 Blender Authors +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import concurrent.futures +import json +import multiprocessing +import os +import random +import shutil +import subprocess +import sys +import textwrap +import time +import zipfile + +from collections import defaultdict +from pathlib import Path +from pprint import pprint + +from .util import print_updateable_line + + +def parse(build_dir, analysis_dir, gcov_binary="gcov"): + """ + Parses coverage data generated in the given directory, merges it, and stores + result in the analysis directory. + """ + + build_dir = Path(build_dir).absolute() + analysis_dir = Path(analysis_dir).absolute() + gcov_path = get_gcov_path(gcov_binary) + + if gcov_path is None or not gcov_path.exists(): + raise RuntimeError("Gcov not found.") + + gcda_paths = gather_gcda_files(build_dir) + if len(gcda_paths) == 0: + raise RuntimeError( + textwrap.dedent( + """\ + No .gcda files found. Make sure to run the tests in a debug build that has + been compiled with GCC with --coverage. + """ + ) + ) + + # Invoke gcov many times in parallel to get the data in json format. + gcov_outputs = parse_gcda_files_with_gcov(gcda_paths, gcov_path) + + gcov_by_source_file = collect_data_per_file(gcov_outputs) + if len(gcov_by_source_file) == 0: + raise RuntimeError("No coverage data found.") + + # Sort files to make the progress report more useful. + source_file_order = list(sorted(list(gcov_by_source_file.keys()))) + + # Many object files may have collected data from the same source files. + data_by_source_file = merge_coverage_data(gcov_by_source_file, source_file_order) + + # Generate summary for each file. + summary = compute_summary(data_by_source_file, source_file_order) + + clear_old_analysis_on_disk(analysis_dir) + write_analysis_to_disk(analysis_dir, summary, data_by_source_file, source_file_order) + + +def get_gcov_path(gcov_binary): + if not Path(gcov_binary).is_file(): + if gcov_path := shutil.which(gcov_binary): + return Path(gcov_path) + return None + return Path(gcov_binary).absolute() + + +def gather_gcda_files(build_dir): + print("Gather .gcda files...") + gcda_paths = [] + for gcda_path in build_dir.glob("**/*.gcda"): + gcda_paths.append(gcda_path) + print_updateable_line("[{}]: {}".format(len(gcda_paths), gcda_path)) + print() + return gcda_paths + + +def parse_gcda_files_with_gcov(gcda_paths, gcov_path): + # Shuffle to make chunks more similar in size. + random.shuffle(gcda_paths) + + # Gcov can process multiple files in a single invocation. So split all the tasks into chunks + # to reduce the total number of required gcov invocations. The chunks should not be too large + # because then multi-threading is less useful. + chunk_size = 10 + gcda_path_chunks = [gcda_paths[i: i + chunk_size] for i in range(0, len(gcda_paths), chunk_size)] + + def parse_with_gcov(file_paths): + return subprocess.check_output([gcov_path, "--stdout", "--json-format", *file_paths]) + + print("Parse files...") + print_updateable_line("[0/{}] parsed.".format(len(gcda_paths))) + gcov_outputs = [] + + # Use multi-threading instead of multi-processing here because the actual work is actually done + # in separate gcov processes which run in parallel. Every gcov process is managed by a separate + # thread though. This does not seem strictly necessary, but was good enough and the easy. + with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count() * 2) as executor: + futures = {executor.submit(parse_with_gcov, file_paths): file_paths for file_paths in gcda_path_chunks} + + done_count = 0 + for future in concurrent.futures.as_completed(futures): + file_paths = futures[future] + done_count += len(file_paths) + try: + # Gcov outputs a line for each file that it processed. + for line in future.result().splitlines(): + gcov_outputs.append(json.loads(line)) + except Exception as e: + print("Error:", e) + print_updateable_line("[{}/{}] parsed.".format(done_count, len(gcda_paths))) + print() + + return gcov_outputs + + +def collect_data_per_file(gcov_outputs): + gcov_by_source_file = defaultdict(list) + for data in gcov_outputs: + for file_data in data["files"]: + gcov_by_source_file[file_data["file"]].append(file_data) + return gcov_by_source_file + + +def merge_coverage_data(gcov_by_source_file, source_file_order): + print("Merge coverage data...") + + data_by_source_file = {} + for i, file_path in enumerate(source_file_order): + print_updateable_line("[{}/{}] merged: {}".format(i + 1, len(gcov_by_source_file), file_path)) + + # For templated code, many functions may be generated for the same function in the source code. + # Often we want to merge data from these individual instantiations together though. It's hard + # to find the functions that belong together based on the name. However, we can use the source + # code location as a value that's common for all instantiations of the same function. + function_by_location = {} + + # Sometimes lines don't have function information. Not sure what the exact rules are here. + # I found that this is sometimes the case for inline functions. + loose_lines = defaultdict(int) + + # Maps a name of a specific function instantiation to it's source code location. + location_key_by_mangled_name = {} + + # See the `--json-format` documentation to understand the input data: + # https://gcc.gnu.org/onlinedocs/gcc/Invoking-Gcov.html + for gcov_data in gcov_by_source_file[file_path]: + for gcov_function in gcov_data["functions"]: + start_line = gcov_line_number_to_index(gcov_function["start_line"]) + end_line = gcov_line_number_to_index(gcov_function["end_line"]) + start_column = gcov_function["start_column"] + end_column = gcov_function["end_column"] + + # Build an identifier for the function that is common among all template instantiations. + location_key = "{}:{}-{}:{}".format(start_line, start_column, end_line, end_column) + + if location_key not in function_by_location: + function_by_location[location_key] = { + "start_line": start_line, + "end_line": end_line, + "start_column": start_column, + "end_column": end_column, + "execution_count": 0, + "instantiations": {}, + "lines": defaultdict(int), + } + + mangled_name = gcov_function["name"] + demangled_name = gcov_function["demangled_name"] + execution_count = gcov_function["execution_count"] + + location_key_by_mangled_name[mangled_name] = location_key + + function = function_by_location[location_key] + function["execution_count"] += execution_count + if mangled_name not in function["instantiations"]: + function["instantiations"][mangled_name] = { + "demangled": demangled_name, + "execution_count": 0, + "lines": defaultdict(int), + } + function["instantiations"][mangled_name]["execution_count"] += execution_count + + for gcov_line in gcov_data["lines"]: + line_index = gcov_line_number_to_index(gcov_line["line_number"]) + count = gcov_line["count"] + mangled_name = gcov_line.get("function_name") + if mangled_name is None: + loose_lines[line_index] += gcov_line["count"] + else: + location_key = location_key_by_mangled_name[mangled_name] + function = function_by_location[location_key] + function["lines"][line_index] += count + instantiation = function["instantiations"][mangled_name] + instantiation["lines"][line_index] += count + + data_by_source_file[file_path] = { + "file": file_path, + "functions": function_by_location, + "loose_lines": loose_lines, + } + print() + + return data_by_source_file + + +def compute_summary(data_by_source_file, source_file_order): + print("Compute summaries...") + summary_by_source_file = {} + for i, file_path in enumerate(source_file_order): + data = data_by_source_file[file_path] + print_updateable_line("[{}/{}] written: {}".format(i + 1, len(data_by_source_file), file_path)) + + num_instantiated_lines = 0 + num_instantiated_lines_run = 0 + num_instantiated_functions = 0 + num_instantiated_functions_run = 0 + + all_lines = set() + run_lines = set() + all_function_keys = set() + run_function_keys = set() + + for function_key, fdata in data["functions"].items(): + all_function_keys.add(function_key) + if fdata["execution_count"] > 0: + run_function_keys.add(function_key) + + for line_index, execution_count in fdata["lines"].items(): + all_lines.add(line_index) + if execution_count > 0: + run_lines.add(line_index) + + for function_name, instantiation_fdata in fdata["instantiations"].items(): + num_instantiated_functions += 1 + if instantiation_fdata["execution_count"] > 0: + num_instantiated_functions_run += 1 + for line_index, execution_count in instantiation_fdata["lines"].items(): + num_instantiated_lines += 1 + if execution_count > 0: + num_instantiated_lines_run += 1 + + for line_index, execution_count in data["loose_lines"].items(): + num_instantiated_lines += 1 + all_lines.add(line_index) + if execution_count > 0: + num_instantiated_lines_run += 1 + run_lines.add(line_index) + + summary_by_source_file[file_path] = { + "num_instantiated_lines": num_instantiated_lines, + "num_instantiated_lines_run": num_instantiated_lines_run, + "num_instantiated_functions": num_instantiated_functions, + "num_instantiated_functions_run": num_instantiated_functions_run, + "num_lines": len(all_lines), + "num_lines_run": len(run_lines), + "num_functions": len(all_function_keys), + "num_functions_run": len(run_function_keys), + } + + print() + + summary = { + "files": summary_by_source_file, + } + + return summary + + +def clear_old_analysis_on_disk(analysis_dir): + print("Clear old analysis...") + try: + shutil.rmtree(analysis_dir) + except: + pass + + +def write_analysis_to_disk(analysis_dir, summary, data_by_source_file, source_file_order): + print("Write summary...") + write_dict_to_zip_file(analysis_dir / "summary.json.zip", summary) + + print("Write per file analysis...") + for i, file_path in enumerate(source_file_order): + analysis_file_path = analysis_dir / "files" / Path(file_path).relative_to("/") + analysis_file_path = str(analysis_file_path) + ".json.zip" + + data = data_by_source_file[file_path] + print_updateable_line("[{}/{}] written: {}".format(i + 1, len(data_by_source_file), analysis_file_path)) + write_dict_to_zip_file(analysis_file_path, data) + print() + print("Parsed data written to {}.".format(analysis_dir)) + + +def gcov_line_number_to_index(line_number): + # Gcov starts counting lines at 1. + return line_number - 1 + + +def write_dict_to_zip_file(zip_file_path, data): + zip_file_path = Path(zip_file_path) + zip_file_path.parent.mkdir(parents=True, exist_ok=True) + # Was way faster to serialize first before writing to the file instead of using json.dump. + data_str = json.dumps(data) + + name = zip_file_path.with_suffix("").name + with zipfile.ZipFile(zip_file_path, "w", compression=zipfile.ZIP_DEFLATED) as f: + f.writestr(name, data_str) diff --git a/tests/coverage/coverage_report/single_file_template.html b/tests/coverage/coverage_report/single_file_template.html new file mode 100644 index 00000000000..e0775c56b2c --- /dev/null +++ b/tests/coverage/coverage_report/single_file_template.html @@ -0,0 +1,518 @@ + + + + + + TITLE Coverage + + + + + + + + + + + + + + + + + +
+

File: SOURCE_FILE_PATH

+

Back to Overview

+
+

Lines:

+

Functions:

+
+
+
+
+
+ + +
+ + + + diff --git a/tests/coverage/coverage_report/util.py b/tests/coverage/coverage_report/util.py new file mode 100644 index 00000000000..f6882298554 --- /dev/null +++ b/tests/coverage/coverage_report/util.py @@ -0,0 +1,13 @@ +# SPDX-FileCopyrightText: 2024 Blender Authors +# +# SPDX-License-Identifier: GPL-2.0-or-later + + +last_line_length = 0 + + +def print_updateable_line(data): + global last_line_length + print(" " * last_line_length, end="\r") + print(data, end="\r") + last_line_length = len(data)