Files
test2/tests/coverage/coverage_report/build_report.py

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

118 lines
3.9 KiB
Python
Raw Permalink Normal View History

Tests: support generating code coverage report This only works with GCC and has only been tested on Linux. The main goal is to automatically generate the code coverage reports on the buildbot and to publish them. With some luck, this motivates people to increase test coverage in their respective areas. Nevertheless, it should be easy to generate the reports locally too (at least on supported software stacks). Usage: 1. Create a **debug** build using **GCC** with **WITH_COMPILER_CODE_COVERAGE** enabled. 2. Run tests. This automatically generates `.gcda` files in the build directory. 3. Run `make/ninja coverage-report` in the build directory. If everything is successful, this will open a browser with the final report which is stored in `build-dir/coverage/report/`. For a bit more control one can also run `coverage.py` script directly. This allows passing in the `--no-browser` option which may be benefitial when running it on the buildbot. Running `make/ninja coverage-reset` deletes all `.gcda` files which resets the line execution counts. The final report has a main entry point (`index.html`) and a separate `.html` file for every source code file that coverage data was available for. This also contains some code that is not in Blender's git repository. We could filter those out, but it also seems interesting (to me anyway), so I just kept it in. Doing the analysis and writing the report takes ~1 min. The slow part is running all tests in a debug build which takes ~12 min for me. Since the coverage data is fairly large and the report also includes the entire source code, file compression is used in two places: * The intermediate analysis results for each file are stored in compressed zip files. This data is still independent from the report html and could be used to build other tools on top of. I could imagine storing the analysis data for each day for example to gather greater insights into how coverage changes over time in different parts of the code. * The analysis data and source code is compressed and base64 encoded embedded into the `.html` files. This makes them much smaller than embedding the data without compression (5-10x). Pull Request: https://projects.blender.org/blender/blender/pulls/126181
2024-08-15 12:17:55 +02:00
# SPDX-FileCopyrightText: 2024 Blender Authors
#
# SPDX-License-Identifier: GPL-2.0-or-later
import base64
import gzip
import html
import json
import shutil
import textwrap
import zipfile
from .util import print_updateable_line
from collections import defaultdict
from pathlib import Path
from pprint import pprint
index_template_path = Path(__file__).parent / "index_template.html"
single_file_template_path = Path(__file__).parent / "single_file_template.html"
def report_as_html(analysis_dir, report_dir, *, reference_dir=None):
Tests: support generating code coverage report This only works with GCC and has only been tested on Linux. The main goal is to automatically generate the code coverage reports on the buildbot and to publish them. With some luck, this motivates people to increase test coverage in their respective areas. Nevertheless, it should be easy to generate the reports locally too (at least on supported software stacks). Usage: 1. Create a **debug** build using **GCC** with **WITH_COMPILER_CODE_COVERAGE** enabled. 2. Run tests. This automatically generates `.gcda` files in the build directory. 3. Run `make/ninja coverage-report` in the build directory. If everything is successful, this will open a browser with the final report which is stored in `build-dir/coverage/report/`. For a bit more control one can also run `coverage.py` script directly. This allows passing in the `--no-browser` option which may be benefitial when running it on the buildbot. Running `make/ninja coverage-reset` deletes all `.gcda` files which resets the line execution counts. The final report has a main entry point (`index.html`) and a separate `.html` file for every source code file that coverage data was available for. This also contains some code that is not in Blender's git repository. We could filter those out, but it also seems interesting (to me anyway), so I just kept it in. Doing the analysis and writing the report takes ~1 min. The slow part is running all tests in a debug build which takes ~12 min for me. Since the coverage data is fairly large and the report also includes the entire source code, file compression is used in two places: * The intermediate analysis results for each file are stored in compressed zip files. This data is still independent from the report html and could be used to build other tools on top of. I could imagine storing the analysis data for each day for example to gather greater insights into how coverage changes over time in different parts of the code. * The analysis data and source code is compressed and base64 encoded embedded into the `.html` files. This makes them much smaller than embedding the data without compression (5-10x). Pull Request: https://projects.blender.org/blender/blender/pulls/126181
2024-08-15 12:17:55 +02:00
analysis_dir = Path(analysis_dir).absolute()
report_dir = Path(report_dir).absolute()
if reference_dir is not None:
reference_dir = Path(reference_dir).absolute()
Tests: support generating code coverage report This only works with GCC and has only been tested on Linux. The main goal is to automatically generate the code coverage reports on the buildbot and to publish them. With some luck, this motivates people to increase test coverage in their respective areas. Nevertheless, it should be easy to generate the reports locally too (at least on supported software stacks). Usage: 1. Create a **debug** build using **GCC** with **WITH_COMPILER_CODE_COVERAGE** enabled. 2. Run tests. This automatically generates `.gcda` files in the build directory. 3. Run `make/ninja coverage-report` in the build directory. If everything is successful, this will open a browser with the final report which is stored in `build-dir/coverage/report/`. For a bit more control one can also run `coverage.py` script directly. This allows passing in the `--no-browser` option which may be benefitial when running it on the buildbot. Running `make/ninja coverage-reset` deletes all `.gcda` files which resets the line execution counts. The final report has a main entry point (`index.html`) and a separate `.html` file for every source code file that coverage data was available for. This also contains some code that is not in Blender's git repository. We could filter those out, but it also seems interesting (to me anyway), so I just kept it in. Doing the analysis and writing the report takes ~1 min. The slow part is running all tests in a debug build which takes ~12 min for me. Since the coverage data is fairly large and the report also includes the entire source code, file compression is used in two places: * The intermediate analysis results for each file are stored in compressed zip files. This data is still independent from the report html and could be used to build other tools on top of. I could imagine storing the analysis data for each day for example to gather greater insights into how coverage changes over time in different parts of the code. * The analysis data and source code is compressed and base64 encoded embedded into the `.html` files. This makes them much smaller than embedding the data without compression (5-10x). Pull Request: https://projects.blender.org/blender/blender/pulls/126181
2024-08-15 12:17:55 +02:00
if not analysis_dir.exists():
raise RuntimeError("Missing analysis at: {}".format(analysis_dir))
try:
shutil.rmtree(report_dir)
except:
pass
build_summary(analysis_dir, report_dir, reference_dir)
Tests: support generating code coverage report This only works with GCC and has only been tested on Linux. The main goal is to automatically generate the code coverage reports on the buildbot and to publish them. With some luck, this motivates people to increase test coverage in their respective areas. Nevertheless, it should be easy to generate the reports locally too (at least on supported software stacks). Usage: 1. Create a **debug** build using **GCC** with **WITH_COMPILER_CODE_COVERAGE** enabled. 2. Run tests. This automatically generates `.gcda` files in the build directory. 3. Run `make/ninja coverage-report` in the build directory. If everything is successful, this will open a browser with the final report which is stored in `build-dir/coverage/report/`. For a bit more control one can also run `coverage.py` script directly. This allows passing in the `--no-browser` option which may be benefitial when running it on the buildbot. Running `make/ninja coverage-reset` deletes all `.gcda` files which resets the line execution counts. The final report has a main entry point (`index.html`) and a separate `.html` file for every source code file that coverage data was available for. This also contains some code that is not in Blender's git repository. We could filter those out, but it also seems interesting (to me anyway), so I just kept it in. Doing the analysis and writing the report takes ~1 min. The slow part is running all tests in a debug build which takes ~12 min for me. Since the coverage data is fairly large and the report also includes the entire source code, file compression is used in two places: * The intermediate analysis results for each file are stored in compressed zip files. This data is still independent from the report html and could be used to build other tools on top of. I could imagine storing the analysis data for each day for example to gather greater insights into how coverage changes over time in different parts of the code. * The analysis data and source code is compressed and base64 encoded embedded into the `.html` files. This makes them much smaller than embedding the data without compression (5-10x). Pull Request: https://projects.blender.org/blender/blender/pulls/126181
2024-08-15 12:17:55 +02:00
build_file_pages(analysis_dir, report_dir)
print("Report written to {}.".format(report_dir / "index.html"))
def build_summary(analysis_dir, report_dir, reference_dir):
Tests: support generating code coverage report This only works with GCC and has only been tested on Linux. The main goal is to automatically generate the code coverage reports on the buildbot and to publish them. With some luck, this motivates people to increase test coverage in their respective areas. Nevertheless, it should be easy to generate the reports locally too (at least on supported software stacks). Usage: 1. Create a **debug** build using **GCC** with **WITH_COMPILER_CODE_COVERAGE** enabled. 2. Run tests. This automatically generates `.gcda` files in the build directory. 3. Run `make/ninja coverage-report` in the build directory. If everything is successful, this will open a browser with the final report which is stored in `build-dir/coverage/report/`. For a bit more control one can also run `coverage.py` script directly. This allows passing in the `--no-browser` option which may be benefitial when running it on the buildbot. Running `make/ninja coverage-reset` deletes all `.gcda` files which resets the line execution counts. The final report has a main entry point (`index.html`) and a separate `.html` file for every source code file that coverage data was available for. This also contains some code that is not in Blender's git repository. We could filter those out, but it also seems interesting (to me anyway), so I just kept it in. Doing the analysis and writing the report takes ~1 min. The slow part is running all tests in a debug build which takes ~12 min for me. Since the coverage data is fairly large and the report also includes the entire source code, file compression is used in two places: * The intermediate analysis results for each file are stored in compressed zip files. This data is still independent from the report html and could be used to build other tools on top of. I could imagine storing the analysis data for each day for example to gather greater insights into how coverage changes over time in different parts of the code. * The analysis data and source code is compressed and base64 encoded embedded into the `.html` files. This makes them much smaller than embedding the data without compression (5-10x). Pull Request: https://projects.blender.org/blender/blender/pulls/126181
2024-08-15 12:17:55 +02:00
print("Write index...")
with open(index_template_path) as f:
template = f.read()
result = template
result = result.replace(
"ANALYSIS_DATA",
zip_file_to_compressed_base64(analysis_dir / "summary.json.zip"),
)
reference_data_str = ""
if reference_dir is not None:
reference_summary_path = reference_dir / "summary.json.zip"
if reference_summary_path.exists():
reference_data_str = zip_file_to_compressed_base64(reference_summary_path)
result = result.replace("REFERENCE_DATA", reference_data_str)
Tests: support generating code coverage report This only works with GCC and has only been tested on Linux. The main goal is to automatically generate the code coverage reports on the buildbot and to publish them. With some luck, this motivates people to increase test coverage in their respective areas. Nevertheless, it should be easy to generate the reports locally too (at least on supported software stacks). Usage: 1. Create a **debug** build using **GCC** with **WITH_COMPILER_CODE_COVERAGE** enabled. 2. Run tests. This automatically generates `.gcda` files in the build directory. 3. Run `make/ninja coverage-report` in the build directory. If everything is successful, this will open a browser with the final report which is stored in `build-dir/coverage/report/`. For a bit more control one can also run `coverage.py` script directly. This allows passing in the `--no-browser` option which may be benefitial when running it on the buildbot. Running `make/ninja coverage-reset` deletes all `.gcda` files which resets the line execution counts. The final report has a main entry point (`index.html`) and a separate `.html` file for every source code file that coverage data was available for. This also contains some code that is not in Blender's git repository. We could filter those out, but it also seems interesting (to me anyway), so I just kept it in. Doing the analysis and writing the report takes ~1 min. The slow part is running all tests in a debug build which takes ~12 min for me. Since the coverage data is fairly large and the report also includes the entire source code, file compression is used in two places: * The intermediate analysis results for each file are stored in compressed zip files. This data is still independent from the report html and could be used to build other tools on top of. I could imagine storing the analysis data for each day for example to gather greater insights into how coverage changes over time in different parts of the code. * The analysis data and source code is compressed and base64 encoded embedded into the `.html` files. This makes them much smaller than embedding the data without compression (5-10x). Pull Request: https://projects.blender.org/blender/blender/pulls/126181
2024-08-15 12:17:55 +02:00
report_summary_path = report_dir / "index.html"
report_summary_path.parent.mkdir(parents=True, exist_ok=True)
with open(report_summary_path, "w") as f:
f.write(result)
def build_file_pages(analysis_dir, report_dir):
with open(single_file_template_path) as f:
template = f.read()
analysis_files_dir = analysis_dir / "files"
analysis_paths = list(analysis_files_dir.glob("**/*.json.zip"))
print("Write report pages...")
for i, analysis_path in enumerate(analysis_paths):
relative_path = analysis_path.relative_to(analysis_files_dir)
relative_path = Path(str(relative_path)[: -len(".json.zip")])
source_path = "/" / relative_path
report_path = Path(str(report_dir / "files" / relative_path) + ".html")
index_page_link = "../" * len(relative_path.parents) + "index.html"
build_report_for_source_file(template, source_path, analysis_path, report_path, index_page_link)
print_updateable_line("[{}/{}] written: {}".format(i + 1, len(analysis_paths), report_path))
print()
def build_report_for_source_file(template_str, source_path, analysis_path, report_path, index_page_link):
result = template_str
result = result.replace("TITLE", source_path.name)
result = result.replace("INDEX_PAGE_LINK", index_page_link)
result = result.replace("SOURCE_FILE_PATH", str(source_path))
result = result.replace("SOURCE_CODE", file_to_compressed_base64(source_path))
result = result.replace("ANALYSIS_DATA", zip_file_to_compressed_base64(analysis_path))
report_path.parent.mkdir(parents=True, exist_ok=True)
with open(report_path, "w") as f:
f.write(result)
def file_to_compressed_base64(file_path):
with open(file_path, "rb") as f:
text = f.read()
return bytes_to_compressed_base64(text)
def zip_file_to_compressed_base64(zip_file_path):
file_name = zip_file_path.with_suffix("").name
with zipfile.ZipFile(zip_file_path) as zip_file:
with zip_file.open(file_name) as f:
data = f.read()
return bytes_to_compressed_base64(data)
def bytes_to_compressed_base64(data):
data = gzip.compress(data)
data = base64.b64encode(data)
data = data.decode("utf-8")
return data