Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • asapo/asapo
  • joao.alvim/asapo
  • philipp.middendorf/asapo
  • stefan.dietrich/asapo
4 results
Show changes
Commits on Source (1726)
Showing
with 2927 additions and 69 deletions
docs/contributing/architecture/decisions
# To ignore the commits specified in this file:
# git blame --ignore-revs-file .git-blame-ignore-revs <FILEPATH>
# To always ignore commits specifcied in this file:
# git config --global blame.ignoreRevsFile .git-blame-ignore-revs
# style: ensure that files terminate with empty new line
58c48bd339485010df0662da04eb3fd6c4de85c3
# style: remove trailing whitespaces
7eda1a6ff8f517825b9bc84f25eb5638cacbe3ea
# style: fix Python code formatting using `black` and `isort`
bf674c190caba5ff47e7a255c80fa940381e5723
......@@ -19,9 +19,6 @@
.dylib
.dll
# Fortran module files
*.mod
*.smod
# Compiled Static libraries
.lai
......@@ -34,6 +31,9 @@
*.out
*.app
### VSCode
.vscode/
### CLion+all ###
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
......@@ -57,8 +57,7 @@
.idea/**/libraries
# CMake
cmake-build-debug/
cmake-build-release/
cmake-build-*/
# Mongo Explorer plugin:
.idea/**/mongoSettings.xml
......@@ -107,20 +106,21 @@ compile_commands.json
CTestTestfile.cmake
build
### CCache ###
.ccache/
# End of https://www.gitignore.io/api/c++,cmake,clion+all
#Astyle
*.orig
### Doxygen ###
doxygen
### eclipse ###
.cproject
.project
.settings
#GO
.go/
broker/pkg
discovery/pkg
common/go/pkg
......@@ -128,16 +128,34 @@ authorizer/pkg
asapo_tools/pkg
# Python
.venv/
venv/
__pycache__/
#
*.rpm
linux_packages/
#version files
common/cpp/include/common/version.h
common/cpp/include/asapo/common/internal/version.h
common/go/src/asapo_common/version/version_lib.go
!sphinx/*
!docs/sphinx/*
.terraform
terraform.tfstate*
#helm chart
deploy/asapo_helm_chart/asapo/Chart.lock
deploy/asapo_helm_chart/asapo/charts/*.tgz
# Files downloaded for Mongo DB
mongo-c-driver-*
# GitLab Pages
/public/
---
workflow:
rules:
- if: $CI_COMMIT_BRANCH
- if: $CI_COMMIT_TAG
variables:
GO_VERSION_FILE: $CI_PROJECT_DIR/common/go/src/asapo_common/version/version_lib.go
# So that containers are automatically cleaned-up, PIPELINE_TAG must be consistent
# with the containers cleanup rules defined on
# https://gitlab.desy.de/asapo/asapo/-/settings/packages_and_registries/cleanup_image_tags
PIPELINE_TAG: "dev-${CI_COMMIT_SHORT_SHA}"
STABLE_VERSION_REGEX: "^[0-9.]+$"
#---------------------------------------------------------------------------------------
# .pre
#---------------------------------------------------------------------------------------
# Template CI job to create Docker images that are used in other CI jobs for building
# and testing `ASAP::O` (server and/or client) on different platforms.
#
# CI jobs that extend this template CI job `.docker-create-build-env` must provide the
# following variables:
# - DOCKER_IMAGE_NAME: suffix of the name given to the Docker image, the full image name
# being "$CI_REGISTRY_IMAGE/$DOCKER_IMAGE_NAME"
# - DOCKER_DIR: directory that contains the Dockerfile and possible additional resources
# used to create the Docker image.
# - DOCKER_FILENAME (optional): name of the Dockerfile, `Dockerfile` by default.
#
# Note: unfortunately, I could not make a generic `rules` section based on `DOCKER_DIR`
# work so that CI jobs that extend `.docker-create-build-env` only run on the `develop`
# branch and if the Dockerfile has changed.
.docker-create-build-env:
stage: .pre
image:
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
variables:
DOCKER_FILENAME: Dockerfile
script:
- full_docker_image_name="$CI_REGISTRY_IMAGE/$DOCKER_IMAGE_NAME"
- echo "full_docker_image_name=$full_docker_image_name"
- dockerfile_path="$DOCKER_DIR/$DOCKER_FILENAME"
- echo "dockerfile_path='$dockerfile_path'"
- cat "$dockerfile_path"
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json
- /kaniko/executor --cache=false --context $DOCKER_DIR --dockerfile $dockerfile_path --destination $full_docker_image_name:${CI_COMMIT_SHORT_SHA} --destination $full_docker_image_name:latest
tags:
- kubernetes-executor
docker-create-asapo-services-linux-build-env:
extends: .docker-create-build-env
variables:
DOCKER_DIR: $CI_PROJECT_DIR/deploy/build_env/services-linux
DOCKER_IMAGE_NAME: asapo-services-linux-build-env
rules:
- if: $CI_COMMIT_REF_NAME == "develop"
changes:
paths:
- deploy/build_env/services-linux/Dockerfile
docker-create-asapo-packages-ubuntu1804-build-env:
extends: .docker-create-build-env
variables:
DOCKER_DIR: $CI_PROJECT_DIR/deploy/build_env/ubuntu
DOCKER_FILENAME: Dockerfile.gitlab.18.04
DOCKER_IMAGE_NAME: asapo-packages-ubuntu1804-build-env
rules:
- if: $CI_COMMIT_REF_NAME == "develop"
changes:
paths:
- deploy/build_env/ubuntu/Dockerfile.gitlab.18.04
docker-create-asapo-packages-ubuntu2004-build-env:
extends: .docker-create-build-env
variables:
DOCKER_DIR: $CI_PROJECT_DIR/deploy/build_env/ubuntu
DOCKER_FILENAME: Dockerfile.gitlab.20.04
DOCKER_IMAGE_NAME: asapo-packages-ubuntu2004-build-env
rules:
- if: $CI_COMMIT_REF_NAME == "develop"
changes:
paths:
- deploy/build_env/ubuntu/Dockerfile.gitlab.20.04
docker-create-asapo-packages-debian9-build-env:
extends: .docker-create-build-env
variables:
DOCKER_DIR: $CI_PROJECT_DIR/deploy/build_env/debians
DOCKER_FILENAME: Dockerfile.gitlab.9
DOCKER_IMAGE_NAME: asapo-packages-debian9-build-env
rules:
- if: $CI_COMMIT_REF_NAME == "develop"
changes:
paths:
- deploy/build_env/debians/Dockerfile.gitlab.9
docker-create-asapo-packages-debian10-build-env:
extends: .docker-create-build-env
variables:
DOCKER_DIR: $CI_PROJECT_DIR/deploy/build_env/debians
DOCKER_FILENAME: Dockerfile.gitlab.10
DOCKER_IMAGE_NAME: asapo-packages-debian10-build-env
rules:
- if: $CI_COMMIT_REF_NAME == "develop"
changes:
paths:
- deploy/build_env/debians/Dockerfile.gitlab.10
docker-create-asapo-packages-debian11-build-env:
extends: .docker-create-build-env
variables:
DOCKER_DIR: $CI_PROJECT_DIR/deploy/build_env/debians
DOCKER_FILENAME: Dockerfile.gitlab.11
DOCKER_IMAGE_NAME: asapo-packages-debian11-build-env
rules:
- if: $CI_COMMIT_REF_NAME == "develop"
changes:
paths:
- deploy/build_env/debians/Dockerfile.gitlab.11
docker-create-asapo-packages-debian12-build-env:
extends: .docker-create-build-env
variables:
DOCKER_DIR: $CI_PROJECT_DIR/deploy/build_env/debians
DOCKER_FILENAME: Dockerfile.gitlab.12
DOCKER_IMAGE_NAME: asapo-packages-debian12-build-env
rules:
- if: $CI_COMMIT_REF_NAME == "develop"
changes:
paths:
- deploy/build_env/debians/Dockerfile.gitlab.12
docker-create-asapo-packages-centos7-build-env:
extends: .docker-create-build-env
variables:
DOCKER_DIR: $CI_PROJECT_DIR/deploy/build_env/centos
DOCKER_FILENAME: Dockerfile.gitlab.7
DOCKER_IMAGE_NAME: asapo-packages-centos7-build-env
rules:
- if: $CI_COMMIT_REF_NAME == "develop"
changes:
paths:
- deploy/build_env/centos/Dockerfile.gitlab.7
docker-create-asapo-packages-centos8-build-env:
extends: .docker-create-build-env
variables:
DOCKER_DIR: $CI_PROJECT_DIR/deploy/build_env/centos
DOCKER_FILENAME: Dockerfile.gitlab.8
DOCKER_IMAGE_NAME: asapo-packages-centos8-build-env
rules:
- if: $CI_COMMIT_REF_NAME == "develop"
changes:
paths:
- deploy/build_env/centos/Dockerfile.gitlab.8
docker-create-asapo-site-build-env:
extends: .docker-create-build-env
variables:
DOCKER_DIR: $CI_PROJECT_DIR/deploy/build_env/site
DOCKER_IMAGE_NAME: asapo-site-build-env
rules:
- if: $CI_COMMIT_REF_NAME == "develop"
changes:
paths:
- deploy/build_env/site/Dockerfile
docker-create-asapo-secondary-fluentd-elasticsearch:
extends: .docker-create-build-env
variables:
DOCKER_DIR: $CI_PROJECT_DIR/deploy/secondary_services/fluentd_elastic
DOCKER_IMAGE_NAME: asapo-secondary-fluentd-elasticsearch
rules:
- if: $CI_COMMIT_REF_NAME == "develop"
changes:
paths:
- deploy/secondary_services/fluentd_elastic/Dockerfile
docker-create-asapo-services-windows-build-env:
extends: .docker-create-build-env
variables:
DOCKER_DIR: $CI_PROJECT_DIR/deploy/build_env/services-windows
script:
- echo Just a placeholder task, windows runner required
rules:
- if: $CI_COMMIT_REF_NAME == "develop"
changes:
paths:
- deploy/build_env/services-windows/Dockerfile
#---------------------------------------------------------------------------------------
.go-cache:
variables:
GOPATH: $CI_PROJECT_DIR/.go
before_script:
- mkdir -p .go
cache:
paths:
- .go/pkg/mod/
.client-packages:
stage: build
script:
- mkdir $CI_PROJECT_DIR/build
- cd $CI_PROJECT_DIR/build
- PACKAGE_UP=${PACKAGE_TYPE^^}
- PACKAGE_LOW=${PACKAGE_TYPE,,}
- >
if [ -z "$WITH_LIBFABRIC" ]; then
ENABLE_LIBFABRIC="OFF"
else
ENABLE_LIBFABRIC="ON"
fi
- cmake -DCMAKE_BUILD_TYPE="Release" -DENABLE_LIBFABRIC=$ENABLE_LIBFABRIC -DINSTALL_EXAMPLES=ON -DBUILD_CLIENTS_ONLY=ON -DPACKAGE_RELEASE_SUFFIX=$OS_SUFFIX -DBUILD_PYTHON=OFF -DCPACK_PACKAGE_NAME="asapo-devel" -DCPACK_GENERATOR=$PACKAGE_UP ..
- make -j 4
- make package
- rm -f CMakeCache.txt
- cmake -DCMAKE_BUILD_TYPE="Release" -DENABLE_LIBFABRIC=$ENABLE_LIBFABRIC -DBUILD_CLIENTS_ONLY=ON -DBUILD_PYTHON=ON -DPACKAGE_RELEASE_SUFFIX=$OS_SUFFIX -DBUILD_PYTHON_PACKAGES="source;${PACKAGE_LOW}" -DBUILD_PYTHON_DOCS=OFF ..
- make -j 1
- PACKAGES_PATH="$CI_PROJECT_DIR/linux_packages/$PACKAGE_FOLDER"
- mkdir -p "$PACKAGES_PATH"
- cp $CI_PROJECT_DIR/build/asapo-devel* "$PACKAGES_PATH/"
- cp $CI_PROJECT_DIR/build/*/api/python/dist/* "$PACKAGES_PATH/"
- ls -ls "$PACKAGES_PATH"
# Test installation
- (! python3 -c "import asapo_consumer")
- (! python3 -c "import asapo_producer")
- if [ "$PACKAGE_LOW" == "rpm" ]; then
- rpm --install "$PACKAGES_PATH"/python3-asapo_{consumer,producer}*.$PACKAGE_LOW
- rpm --query --list python3-asapo_{consumer,producer}
- elif [ "$PACKAGE_LOW" == "deb" ]; then
- dpkg --install "$PACKAGES_PATH"/python3-asapo-{consumer,producer}*.$PACKAGE_LOW
- dpkg-query --listfiles python3-asapo-{consumer,producer}
- else
- exit 1
- fi
- python3 -m pip freeze
- python3 -c "import asapo_consumer; import asapo_producer"
tags:
- kubernetes-executor
rules:
- when: on_success
artifacts:
paths:
- linux_packages
expire_in: 3 hrs
build-python-manylinux2014:
image:
name: quay.io/pypa/manylinux2014_x86_64:latest
stage: build
variables:
BUILD_DIR: $CI_PROJECT_DIR/build
before_script:
- yum install -y libcurl-devel libfabric-devel
script:
- bash deploy/build_env/manylinux2014/build.sh ${CLIENT_TYPE}
parallel:
matrix:
- CLIENT_TYPE: ["consumer", "producer"]
tags:
- kubernetes-executor
rules:
- when: on_success
artifacts:
paths:
- wheel_packages
expire_in: 3 hrs
pre-commit:
stage: build
tags:
- kubernetes-executor
image:
name: $CI_REGISTRY_IMAGE/asapo-services-linux-build-env:latest
entrypoint: [""]
before_script:
- pip install pre-commit
script:
- pre-commit run -a
after_script:
- git --no-pager diff
build-services-linux-debug:
extends: .go-cache
image:
name: $CI_REGISTRY_IMAGE/asapo-services-linux-build-env:latest
entrypoint: [""]
stage: build
script:
- mkdir $CI_PROJECT_DIR/build
- cd $CI_PROJECT_DIR/build
- cmake -DCMAKE_BUILD_TYPE=Debug -DENABLE_COVERAGE=ON -DBUILD_TESTS=ON -DBUILD_INTEGRATION_TESTS=ON -DBUILD_EXAMPLES=ON -DBUILD_CONSUMER_TOOLS=ON -DENABLE_LIBFABRIC=ON -DENABLE_LIBFABRIC_LOCALHOST=ON -DBUILD_EVENT_MONITOR_PRODUCER=ON -DSTOP_BUILD_ON_WARNINGS=ON -DENABLE_NEW_RECEIVER_MONITORING=ON ..
- make -j 4
# Ensures that the definition of versions of Go packages is present and will be uploaded.
- cat "$GO_VERSION_FILE"
tags:
- kubernetes-executor
rules:
- when: always
artifacts:
paths:
- build
- "$GO_VERSION_FILE"
exclude:
- build/**/CMakeFiles/**/*.o
- build/**/CMakeFiles/**/*.objlib
- build/**/CMakeFiles/**/*.includecache
- build/**/*.a
expire_in: 3 hrs
test-services-linux-debug:
extends: .go-cache
image:
name: $CI_REGISTRY_IMAGE/asapo-services-linux-build-env:latest
stage: test
variables:
BUILD_DIR: $CI_PROJECT_DIR/build
script:
- bash $CI_PROJECT_DIR/deploy/build_env/services-linux/run_asapo.sh "$BUILD_DIR"
- cmake --build "$BUILD_DIR" -- coverage_reset_counters
- ctest --test-dir "$BUILD_DIR" --no-compress-output -T Test
--output-on-failure --output-junit "$BUILD_DIR"/testResult.xml
-E "full_chain_monitoring|noaccess|restart|logger_fluentd|coverage"
after_script:
- cmake --build "$BUILD_DIR" -- coverage_generate_report
- mkdir -p "$BUILD_DIR"/reports/logs && cp /tmp/*.log "$BUILD_DIR"/reports/logs/
tags:
- kubernetes-executor
rules:
- when: on_success
dependencies:
- build-services-linux-debug
coverage: /^\s*lines\.*:\s*\d+.\d+\%/
artifacts:
when: always
paths:
# Contains the HTML coverage report. Hard-coded in the CMake target
# `coverage_generate_report`
- build/reports
reports:
junit: build/testResult.xml
coverage_report:
coverage_format: cobertura
path: build/reports/coverage/coverage.xml
test-cpp-doc-snippets:
services:
- name: gitlab.desy.de:5555/asapo/asapo/asapo-standalone-dev:${PIPELINE_TAG}
stage: test
image: debian:10-slim
variables:
example_dir: "$CI_PROJECT_DIR/docs/site/examples/cpp/"
example_build_dir: "$example_dir/build"
before_script:
- apt-get update
- apt-get install -y --no-install-recommends build-essential cmake
- apt-get install -y --no-install-recommends $CI_PROJECT_DIR/linux_packages/debian10.*/asapo*.deb
script:
- cmake -B "$example_build_dir" -S "$example_dir"
- cmake --build "$example_build_dir"
- cd "$example_build_dir" && ctest --verbose
dependencies:
- build-packages-debian10
- build-asapo-standalone-dev-docker-image
tags:
- kubernetes-executor
test-c-doc-snippets:
services:
- name: gitlab.desy.de:5555/asapo/asapo/asapo-standalone-dev:${PIPELINE_TAG}
stage: test
image: debian:10-slim
before_script:
- apt-get update
- apt-get install -y --no-install-recommends build-essential pkg-config
- apt-get install -y --no-install-recommends $CI_PROJECT_DIR/linux_packages/debian10.*/asapo*.deb
script:
- "$CI_PROJECT_DIR/docs/site/examples/c/run-code-snippets.sh"
dependencies:
- build-packages-debian10
- build-asapo-standalone-dev-docker-image
tags:
- kubernetes-executor
test-python-wheels-22.03:
services:
- name: gitlab.desy.de:5555/asapo/asapo/asapo-standalone-dev:${PIPELINE_TAG}
stage: test
image: python:${PY_VERSION}-slim
before_script:
- python3 --version
- python3 -m pip install pytest "numpy<2"
script:
- python3 -m pip install --trusted-host nims.desy.de --find-links=http://nims.desy.de/extra/asapo/linux_wheels
asapo_consumer==22.03 asapo_producer==22.03
- pytests_dir="$CI_PROJECT_DIR"/tests/automatic/pytests
- python3 -m pytest "$pytests_dir" --token-path "$pytests_dir/standalone_token.txt"
-vv -o log_cli=true --log-cli-level=DEBUG -m compatible
dependencies:
- build-asapo-standalone-dev-docker-image
tags:
- DESY-intern
parallel:
matrix:
- PY_VERSION: ["3.6", "3.7", "3.8", "3.9"]
test-python-asapo_standalone:
image:
name: gitlab.desy.de:5555/asapo/asapo/asapo-standalone-dev:${PIPELINE_TAG}
entrypoint: [""]
stage: test
script:
- apt-get update && apt-get install -y python3-pip
- pip install pytest asapo_consumer asapo_producer
- cd /
- bash $CI_PROJECT_DIR/deploy/asapo_services_light/start_asapo.sh
- ls /shared
- python3 -m pytest -vv -o log_cli=true --log-cli-level=DEBUG $CI_PROJECT_DIR/tests/automatic/standalone
dependencies:
- build-asapo-standalone-dev-docker-image
- build-python-manylinux2014
test-python-wheels-22.10:
services:
- name: gitlab.desy.de:5555/asapo/asapo/asapo-standalone-dev:${PIPELINE_TAG}
stage: test
image: python:${PY_VERSION}-slim
before_script:
# Importing this old version of `asapo_producer` raises the following error:
# ```
# ImportError: libcurl.so.4: cannot open shared object file: No such file or directory
# ```
# Installing `libcurl4` fixes it.
- apt-get update && apt-get install -y libcurl4
- python3 -m pip install pytest "numpy<2"
script:
- python3 -m pip install --trusted-host nims.desy.de --find-links=http://nims.desy.de/extra/asapo/linux_wheels
asapo_consumer==22.10 asapo_producer==22.10
- pytests_dir="$CI_PROJECT_DIR"/tests/automatic/pytests
- python3 -m pytest "$pytests_dir" --token-path "$pytests_dir/standalone_token.txt"
-vv -o log_cli=true --log-cli-level=DEBUG -m compatible
dependencies:
- build-asapo-standalone-dev-docker-image
tags:
- DESY-intern
parallel:
matrix:
- PY_VERSION: ["3.10"]
test-python-wheels:
services:
- name: gitlab.desy.de:5555/asapo/asapo/asapo-standalone-dev:${PIPELINE_TAG}
stage: test
image: python:${PY_VERSION}-slim
before_script:
- pip install pytest
script:
- pip install wheel_packages/python${PY_VERSION//./}/asapo*cp${PY_VERSION//./}*.whl
- pytests_dir="$CI_PROJECT_DIR"/tests/automatic/pytests
- python3 -m pytest "$pytests_dir" --token-path "$pytests_dir/standalone_token.txt" -vv -o log_cli=true --log-cli-level=DEBUG
parallel:
matrix:
- PY_VERSION: ["3.6", "3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]
dependencies:
- build-asapo-standalone-dev-docker-image
- build-python-manylinux2014
test_performance_stress_test:
services:
- name: gitlab.desy.de:5555/asapo/asapo/asapo-standalone-dev:${PIPELINE_TAG}
image:
name: python:3.9-slim
stage: test
script:
- pip install pytest wheel_packages/python39/asapo_*cp39*.whl
# This checks only that the script works at all. The CI runner is likely not
# powerful enough to really stress test performance
- python3 -m pytest tests/performance/test_stress_test.py
dependencies:
- build-asapo-standalone-dev-docker-image
- build-python-manylinux2014
build-services-linux-release:
extends: .go-cache
image:
name: $CI_REGISTRY_IMAGE/asapo-services-linux-build-env:latest
entrypoint: [""]
variables:
CI_REGISTRY_IMAGE_USERNAME: "asapo-dev-registry"
CI_REGISTRY_IMAGE_PASSWORD: ${asapo_dev_registry_password}
stage: build
script:
- mkdir $CI_PROJECT_DIR/build
- cd $CI_PROJECT_DIR/build
- cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_EXAMPLES=ON -DBUILD_CONSUMER_TOOLS=ON -DBUILD_BROKER=ON -DBUILD_INTEGRATION_TESTS=ON -DENABLE_LIBFABRIC=ON -DBUILD_EVENT_MONITOR_PRODUCER=ON -DSTOP_BUILD_ON_WARNINGS=ON -DENABLE_NEW_RECEIVER_MONITORING=ON ..
- make -j 4
tags:
- kubernetes-executor
rules:
- when: on_success
artifacts:
paths:
- build
exclude:
- build/**/CMakeFiles/**/*
- build/**/*.a
expire_in: 3 hrs
build-packages-centos7:
extends: .client-packages
image:
name: $CI_REGISTRY_IMAGE/asapo-packages-centos7-build-env:latest
entrypoint: [""]
variables:
PACKAGE_TYPE: "RPM"
WITH_LIBFABRIC: "YES"
OS_SUFFIX: "1.el7"
PACKAGE_FOLDER: "centos7.9.2009"
# For some reasons I don't understand, the command `python3 setup.py` run by
# `python3 setup.py --bdist_rpm` does have `"/usr/local/lib64/python3.6/site-packages"`
# in `sys.path`. So it cannot find `Cython` and fails.
PYTHONPATH: "/usr/local/lib64/python3.6/site-packages"
build-packages-centos8:
extends: .client-packages
image:
name: $CI_REGISTRY_IMAGE/asapo-packages-centos8-build-env:latest
entrypoint: [""]
variables:
PACKAGE_TYPE: "RPM"
WITH_LIBFABRIC: "YES"
OS_SUFFIX: "1.el8"
PACKAGE_FOLDER: "centos8.3.2011"
# For some reasons I don't understand, the command `python3 setup.py` run by
# `python3 setup.py --bdist_rpm` does have `"/usr/local/lib64/python3.6/site-packages"`
# in `sys.path`. So it cannot find `Cython` and fails.
PYTHONPATH: "/usr/local/lib64/python3.6/site-packages"
build-packages-debian9:
extends: .client-packages
image:
name: $CI_REGISTRY_IMAGE/asapo-packages-debian9-build-env:latest
entrypoint: [""]
variables:
PACKAGE_TYPE: "DEB"
OS_SUFFIX: "debian9.13"
PACKAGE_FOLDER: "debian9.13"
build-packages-debian10:
extends: .client-packages
image:
name: $CI_REGISTRY_IMAGE/asapo-packages-debian10-build-env:latest
entrypoint: [""]
variables:
PACKAGE_TYPE: "DEB"
WITH_LIBFABRIC: "YES"
OS_SUFFIX: "debian10.11"
PACKAGE_FOLDER: "debian10.11"
build-packages-debian11:
extends: .client-packages
image:
name: $CI_REGISTRY_IMAGE/asapo-packages-debian11-build-env:latest
entrypoint: [""]
variables:
PACKAGE_TYPE: "DEB"
WITH_LIBFABRIC: "YES"
OS_SUFFIX: "debian11.3"
PACKAGE_FOLDER: "debian11.3"
build-packages-debian12:
extends: .client-packages
image:
name: $CI_REGISTRY_IMAGE/asapo-packages-debian12-build-env:latest
entrypoint: [""]
variables:
PACKAGE_TYPE: "DEB"
WITH_LIBFABRIC: "YES"
OS_SUFFIX: "debian12.2"
PACKAGE_FOLDER: "debian12.2"
build-packages-ubuntu1804:
extends: .client-packages
image:
name: $CI_REGISTRY_IMAGE/asapo-packages-ubuntu1804-build-env:latest
entrypoint: [""]
variables:
PACKAGE_TYPE: "DEB"
WITH_LIBFABRIC: "YES"
OS_SUFFIX: "ubuntu18.04"
PACKAGE_FOLDER: "ubuntu18.04"
# The CMake scripts searches for the correct version of Python by using the
# `FindPython.cmake` provided by CMake. However, CMake 3.10 installed on Ubuntu 18.04
# does not contain this module (this module was introduced in CMake 3.12, see
# https://cmake.org/cmake/help/git-stage/release/3.12.html#modules).
# As a workaround, the CI job `build-packages-ubuntu1804` defines the environment
# variable `Python_EXECUTABLE` that contains the command to execute the Python
# interpreter so that CMake does not have to search for it (using the non-existing
# `FindPython.cmake` module).
Python_EXECUTABLE: "python3"
build-packages-ubuntu2004:
extends: .client-packages
image:
name: $CI_REGISTRY_IMAGE/asapo-packages-ubuntu2004-build-env:latest
entrypoint: [""]
variables:
PACKAGE_TYPE: "DEB"
WITH_LIBFABRIC: "YES"
OS_SUFFIX: "ubuntu20.04"
PACKAGE_FOLDER: "ubuntu20.04"
build-docs-api:
image:
name: $CI_REGISTRY_IMAGE/asapo-services-linux-build-env:latest
entrypoint: [""]
stage: build
script:
- mkdir $CI_PROJECT_DIR/build
- cd $CI_PROJECT_DIR/build
- cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_CPP_DOCS=ON -DBUILD_PYTHON_DOCS=ON ..
- cd $CI_PROJECT_DIR/build/docs/doxygen
- make -j 4
- cd $CI_PROJECT_DIR/build/docs/sphinx
- make -j 4
- mkdir -p $CI_PROJECT_DIR/public
- mv $CI_PROJECT_DIR/build/docs/doxygen/html $CI_PROJECT_DIR/public/cpp
- mv $CI_PROJECT_DIR/build/docs/sphinx/sphinx/build/html $CI_PROJECT_DIR/public/python
tags:
- kubernetes-executor
rules:
- when: on_success
artifacts:
paths:
- public
expire_in: 3 hrs
build-docs-site:
stage: build
image: $CI_REGISTRY_IMAGE/asapo-site-build-env:latest
script:
# See https://docusaurus.io/docs/cli for the documentation about Docusaurus CLI.
- npm --prefix "$CI_PROJECT_DIR/docs/site/" install
- npm --prefix "$CI_PROJECT_DIR/docs/site/" run build -- --out-dir "$CI_PROJECT_DIR/public"
artifacts:
# You can download the artifacts and serve the site locally by executing
# `npm --prefix "$CI_PROJECT_DIR/docs/site/" install &&
# npm --prefix "$CI_PROJECT_DIR/docs/site/" run serve -- --dir "$download_path"`
# where:
# - `CI_PROJECT_DIR` has to be set to the root directory of this project
# - `download_path` is the path where of the unzipped artifact
paths:
- public/
build-front:
image:
name: node:16
entrypoint: [""]
stage: build
script:
- mkdir -p $CI_PROJECT_DIR/build/monitoring_ui
- cd $CI_PROJECT_DIR/build/monitoring_ui
- cp -r $CI_PROJECT_DIR/monitoring/monitoring_ui/* .
- npm install
- ./generate-proto.sh
- npm run build
tags:
- kubernetes-executor
rules:
- when: on_success
artifacts:
paths:
- build
expire_in: 3 hrs
build-services-docker-images:
image:
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
stage: deploy
script:
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json
- docker_context="$CI_PROJECT_DIR/$docker_context_prefix/${service//"-"/"_"}"
- docker_image="$CI_REGISTRY_IMAGE/asapo-$service"
- docker_destinations="--destination $docker_image:$CI_COMMIT_TAG"
- if [[ "$CI_COMMIT_TAG" =~ $STABLE_VERSION_REGEX ]]; then
- docker_destinations="$docker_destinations --destination $docker_image:latest"
- fi
- /kaniko/executor --cleanup --cache=false --context "$docker_context" --dockerfile "$docker_context"/Dockerfile $docker_destinations
parallel:
matrix:
- service: ["broker", "authorizer", "discovery", "receiver", "file-transfer", "monitoring-ui"]
docker_context_prefix: "build"
- service: "monitoring-server"
docker_context_prefix: "build/monitoring"
tags:
- kubernetes-executor
dependencies:
- build-services-linux-release
- build-front
rules:
- if: $CI_COMMIT_TAG
build-asapo-cluster-docker-image:
image:
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
stage: deploy
script:
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json
- docker_context="$CI_PROJECT_DIR/build/deploy/asapo_services"
- docker_image="$CI_REGISTRY_IMAGE/asapo-cluster"
- docker_destinations="--destination $docker_image:$CI_COMMIT_TAG"
- if [[ "$CI_COMMIT_TAG" =~ $STABLE_VERSION_REGEX ]]; then
- docker_destinations="$docker_destinations --destination $docker_image:latest"
- fi
- /kaniko/executor --cleanup --cache=false --context "$docker_context" --dockerfile "$docker_context"/Dockerfile $docker_destinations
tags:
- kubernetes-executor
dependencies:
- build-services-linux-release
- build-front
rules:
- if: $CI_COMMIT_TAG
build-asapo-standalone-dev-docker-image:
image:
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
stage: build
script:
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json
- cd $CI_PROJECT_DIR/build/deploy/asapo_services_light
- ./prepare-docker.sh
- /kaniko/executor --cache=false --single-snapshot --context $CI_PROJECT_DIR/build/deploy/asapo_services_light
--dockerfile $CI_PROJECT_DIR/build/deploy/asapo_services_light/Dockerfile
--destination $CI_REGISTRY_IMAGE/asapo-standalone-dev:${PIPELINE_TAG}
tags:
- kubernetes-executor
needs:
- build-services-linux-release
- build-front
# Inspired by https://github.com/GoogleContainerTools/kaniko/issues/676#issuecomment-597504724
deploy-asapo-standalone-docker-image:
image:
name: gcr.io/go-containerregistry/crane:debug
entrypoint: [""]
stage: deploy
variables:
GIT_STRATEGY: none
script:
- crane auth login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- if [ -n "$CI_COMMIT_TAG" ]; then
- crane cp $CI_REGISTRY_IMAGE/asapo-standalone-dev:${PIPELINE_TAG} $CI_REGISTRY_IMAGE/asapo-standalone:${CI_COMMIT_TAG}
- if [[ "$CI_COMMIT_TAG" =~ $STABLE_VERSION_REGEX ]]; then
- crane tag $CI_REGISTRY_IMAGE/asapo-standalone:${CI_COMMIT_TAG} latest
- fi
- else
- crane tag $CI_REGISTRY_IMAGE/asapo-standalone-dev:${PIPELINE_TAG} latest
- fi
tags:
- kubernetes-executor
dependencies: []
rules:
- if: $CI_COMMIT_REF_NAME == "develop"
- if: $CI_COMMIT_TAG
.upload-client-packages-template:
image:
name: $CI_REGISTRY_IMAGE/asapo-services-linux-build-env:latest
entrypoint: [""]
stage: deploy
variables:
GIT_STRATEGY: none
script:
- mkdir -p ~/.ssh
- echo "$NIMS_SSH_KNOWN_HOST" >> ~/.ssh/known_hosts
- chmod 600 $NIMS_SSH_PRIVATE_KEY
- >
for dist in $(ls $CI_PROJECT_DIR/linux_packages); do
files="asapo-devel python-asapo python3-asapo"
for file in $files; do
if ls $CI_PROJECT_DIR/linux_packages/${dist}/${file}* 1> /dev/null 2>&1; then
PACKAGE_ID=$(curl -s -G --header "JOB-TOKEN: $CI_JOB_TOKEN" ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages -d package_name=$file-$dist -d package_type=generic | jq -c ".[] | select(.pipeline != null) | select(.pipeline.ref | contains(\"$CI_COMMIT_REF_NAME\")) | (.id)")
if ! [ -z "$PACKAGE_ID" ]; then
echo Removing Package ${PACKAGE_ID}
curl --request DELETE --header "JOB-TOKEN: $CI_JOB_TOKEN" "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/${PACKAGE_ID}"
fi
for distfile in $(ls $CI_PROJECT_DIR/linux_packages/$dist/${file}*); do
DIST_FILE=${distfile//"~"/"_"}
curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file $distfile "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/$file-$dist/$DIST_VERSION/$(basename $DIST_FILE)"
done
echo "put $CI_PROJECT_DIR/linux_packages/$dist/${file}* asapo/linux_packages/$dist/" | sftp -v -i $NIMS_SSH_PRIVATE_KEY -P 2022 asapo@it-fs7.desy.de
fi
done
done
tags:
- DESY-intern
dependencies:
- build-packages-centos7
- build-packages-centos8
- build-packages-debian9
- build-packages-debian10
- build-packages-debian11
- build-packages-debian12
- build-packages-ubuntu1804
- build-packages-ubuntu2004
upload-client-packages-dev:
extends: .upload-client-packages-template
rules:
- if: $CI_COMMIT_REF_NAME == "develop"
when: on_success
- when: never
variables:
DIST_VERSION: "100.0.dev"
upload-client-packages-release:
extends: .upload-client-packages-template
only:
- tags
variables:
DIST_VERSION: ${CI_COMMIT_TAG}
upload-python-packages:
image:
name: python:latest
entrypoint: [""]
stage: deploy
variables:
GIT_STRATEGY: none
script:
- mkdir -p ~/.ssh
- echo "$NIMS_SSH_KNOWN_HOST" >> ~/.ssh/known_hosts
- chmod 600 $NIMS_SSH_PRIVATE_KEY
- pip install twine
- >
for pac_id in $(curl -s -G --header "JOB-TOKEN: $CI_JOB_TOKEN" ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages -d package_type=pypi | python -c "import sys, json; [print(x['id']) for x in json.load(sys.stdin) if ('pipeline' in x) and (x['pipeline'] is not None) and ('ref' in x['pipeline']) and (x['pipeline']['ref'] == '$CI_COMMIT_REF_NAME')]"); do
echo Removing package ${pac_id}
curl --request DELETE --header "JOB-TOKEN: $CI_JOB_TOKEN" "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/${pac_id}"
done
- >
for dist in $(ls $CI_PROJECT_DIR/wheel_packages); do
TWINE_PASSWORD=${CI_JOB_TOKEN} TWINE_USERNAME=gitlab-ci-token python -m twine upload --repository-url ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/pypi $CI_PROJECT_DIR/wheel_packages/${dist}/*
TWINE_PASSWORD=${PIPY_TOKEN} TWINE_USERNAME=__token__ python -m twine upload --skip-existing $CI_PROJECT_DIR/wheel_packages/${dist}/*
echo "put $CI_PROJECT_DIR/wheel_packages/${dist}/* asapo/linux_wheels/" | sftp -v -i $NIMS_SSH_PRIVATE_KEY -P 2022 asapo@it-fs7.desy.de
done
- wget -q -O- http://it-fs7.desy.de/cgi-bin/asapoSync.cgi
tags:
- DESY-intern
rules:
- if: $CI_COMMIT_TAG
when: on_success
- when: never
dependencies:
- build-python-manylinux2014
pages:
stage: deploy
image: $CI_REGISTRY_IMAGE/asapo-site-build-env:latest
script:
- find public
artifacts:
paths:
- public
rules:
- if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH
dependencies:
- build-docs-site
- build-docs-api
[submodule "deploy/asapo-worker-node-template"]
path = deploy/asapo_worker_node_template
url = ssh://yakubov@stash.desy.de:7999/asapo/asapo-worker-node-template.git
[submodule "deploy/deploy/asapo_worker_node_template"]
path = deploy/deploy/asapo_worker_node_template
url = ssh://git@stash.desy.de:7999/asapo/asapo-worker-node-template.git
[submodule "deploy/asapo_worker_node_template"]
path = deploy/asapo_worker_node_template
url = ssh://git@stash.desy.de:7999/asapo/asapo-worker-node-template.git
[*.py]
profile = black
line-length = 88
exclude: (3d_party/|(version-))
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- repo: https://github.com/psf/black
rev: 24.10.0
hooks:
- id: black
- repo: https://github.com/PyCQA/isort
rev: 5.13.2
hooks:
- id: isort
- repo: local
hooks:
- id: gofmt
name: gofmt
language: golang
entry: gofmt
args: ["-w"]
types: ["go"]
- id: goimports
name: goimports
language: golang
entry: goimports
types: ["go"]
args: ["-w"]
# v0.16.0 is the latest version I found that supports golang 1.17
additional_dependencies: ["golang.org/x/tools/cmd/goimports@v0.16.0"]
#!/usr/bin/env python
# coding: utf-8
# originally from:
# http://www.warp1337.com/content/how-use-ctest-jenkins-xunit-or-junit-plugin
# improved by:
# Jorge Araya Navarro <elcorreo@deshackra.com>
# Veni, Sancte Spiritus.
from lxml import etree
import argparse
from os.path import expanduser
from os.path import join
import logging
# configure logging
logging.basicConfig(format="%(levelname)s: %(message)s",
level=logging.ERROR)
desc = ("Converts ctest XML file to xUnit/JUnit XML "
"compatible file to use with Jenkins-CI. "
"Did you found any bug? please report it on: "
"https://bitbucket.org/shackra/ctest-jenkins/issues")
# configure argument parser.
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("-x", "--xslt", help="the XSLT file to use", required=True)
parser.add_argument("-t", "--tag", help=("the directory where 'Testing/TAG'"
"file is. Remember to call ctest with"
" '-T test' option to generate it"),
required=True)
parser = argparse.ArgumentParser()
parser.add_argument("-x",dest='xsl', required=True)
parser.add_argument("-t", dest='tag',required=True)
parsed = parser.parse_args()
# expanding user symbol "~"
parsed.xsl = expanduser(parsed.xslt)
parsed.tag = expanduser(parsed.tag)
# opening the TAG file
directory = None
try:
with open(join(parsed.tag, "Testing", "TAG")) as tagfile:
directory = tagfile.readline().strip()
except NotADirectoryError:
logging.error(
"'Testing/TAG' wasn't found on directory '{}'.".format(parsed.tag))
exit(1)
except FileNotFoundError:
logging.error(
"File '{}' not found.".format(join(parsed.tag, "Testing", "TAG")))
exit(1)
xmldoc = None
transform = None
try:
with open(join(parsed.tag, "Testing", directory, "Test.xml"))\
as testxmlfile:
xmldoc = etree.parse(testxmlfile)
with open(join(parsed.tag, "Testing", "TAG")) as tagfile:
directory = tagfile.readline().strip()
except FileNotFoundError:
logging.error("File {} not found. Was it deleted or moved?".format(
join(parsed.tag, "Testing", directory, "Test.xml")))
exit(1)
with open(join(parsed.tag, "Testing", directory, "Test.xml")) as testxmlfile:
xmldoc = etree.parse(testxmlfile)
try:
with open(parsed.xslt) as xsltfile:
xslt_root = etree.XML(xsltfile.read())
transform = etree.XSLT(xslt_root)
except FileNotFoundError:
logging.error("File {} not found.".format(parsed.xslt))
exit(1)
with open(parsed.xsl) as xslfile:
xsl_root = etree.XML(xslfile.read())
transform = etree.XSLT(xsl_root)
result_tree = transform(xmldoc)
print(result_tree)
The MIT License
Copyright (c) 2018-present, Bryan Gillespie
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
[![Build Status](https://travis-ci.com/RPGillespie6/fastcov.svg?branch=master)](https://travis-ci.com/RPGillespie6/fastcov)
[![Code Coverage](https://img.shields.io/codecov/c/github/rpgillespie6/fastcov.svg)](https://codecov.io/gh/RPGillespie6/fastcov)
[![PyPI Version](https://img.shields.io/pypi/v/fastcov.svg)](https://pypi.org/project/fastcov/)
<!-- # SPDX-License-Identifier: MIT -->
# fastcov
A parallelized gcov wrapper for generating intermediate coverage formats *fast*
The goal of fastcov is to generate code coverage intermediate formats *as fast as possible*, even for large projects with hundreds of gcda objects. The intermediate formats may then be consumed by a report generator such as lcov's genhtml, or a dedicated front end such as coveralls, codecov, etc. fastcov was originally designed to be a drop-in replacement for lcov (application coverage only, not kernel coverage).
Currently the only coverage formats supported by fastcov are:
- fastcov json format
- lcov info format
- sonarqube xml format (via [utility](utils/) script)
Note that cobertura xml is not currently supported by fastcov, but can still be achieved by converting lcov info format using [lcov_cobertura.py](https://github.com/eriwen/lcov-to-cobertura-xml).
A few prerequisites apply before you can run fastcov:
1. GCC version >= 9.0.0
These versions of GCOV have support for JSON intermediate format as well as streaming report data straight to stdout. This second feature (the ability for gcov to stream report data to stdout) is critical - without it, fastcov cannot run multiple instances of gcov in parallel without loss of correctness.
If your linux distribution doesn't ship with GCC 9, the current easiest way (in my opinion) to try out fastcov is to use the fastcov docker image, which has GCC 9 compilers (`gcc-9` and `g++-9`), Python3, and CMake inside:
```bash
docker pull rpgillespie6/fastcov:latest
```
If you need other dependencies, just modify the Dockerfile and rebuild.
2. Object files must be either be built:
- Using absolute paths for all `-I` flags passed to the compiler
or
- Invoking the compiler from the same root directory
If you use CMake, you are almost certainly satisfying this second constraint (unless you care about `ExternalProject` coverage).
## Quick Start
Assuming you have docker, fastcov is easy to use:
```bash
$ docker pull rpgillespie6/fastcov
$ docker run -it --rm -v ${PWD}:/mnt/workspace -w /mnt/workspace -u $(id -u ${USER}):$(id -g ${USER}) rpgillespie6/fastcov
$ <build project> # Make sure to compile with gcc-9 or g++-9 and to pass "-g -O0 -fprofile-arcs -ftest-coverage" to all gcc/g++ statements
$ <run unit tests>
$ fastcov.py --gcov gcov-9 --exclude /usr/include --lcov -o report.info
$ genhtml -o code_coverage report.info
$ firefox code_coverage/index.html
```
See the [example](example/) directory for a working CMake example.
## Installation
A minimum of Python 3.5 is currently required (due to recursive `glob` usage).
Fastcov is a single source python tool. That means you can simply copy `fastcov.py` from this repository and run it directly with no other hassle.
However, fastcov is also available as a Python3 package that can be installed via pip.
Install newest stable fastcov release from PyPI:
```bash
$ pip3 install fastcov
```
Or install the bleeding edge version from GitHub:
```bash
$ pip3 install git+https://github.com/rpgillespie6/fastcov.git
```
## Filtering Options
Fastcov uses *substring matching* (not regex) for all of its filtering options. Furthermore, all filtering options take a list of parameters as arguments.
Here are some common filtering combinations you may find useful:
```bash
$ fastcov.py --exclude /usr/include test/ # Exclude system header files and test files from final report
$ fastcov.py --include src/ # Only include files with "src/" in its path in the final report
$ fastcov.py --source-files ../src/source1.cpp ../src/source2.cpp # Only include exactly ../src/source1.cpp and ../src/source2.cpp in the final report
$ fastcov.py --branch-coverage # Only include most useful branches (discards exceptional branches and initializer list branches)
$ fastcov.py --exceptional-branch-coverage # Include ALL branches in coverage report
```
It's possible to include *both* `--include` and `--exclude`. In this case, `--exclude` always takes priority. This could be used, for example, to include files that are in `src/` but not in `src/test/` by passing `--include src/ --exclude test/`.
Branch filters furthermore can stack:
```bash
$ fastcov.py --branch-coverage --include-br-lines-starting-with if else # Only include branch coverage for lines starting with "if" or "else"
$ fastcov.py --branch-coverage --exclude-br-lines-starting-with assert ASSERT # Don't include coverage for lines starting with "assert" or "ASSERT"
```
It's possible to include *both* `--include-br-lines-starting-with` and `--exclude-br-lines-starting-with`. In this case, the branch will be removed if either the line does not start with one of `--include-br-lines-starting-with` or the line does start with one of `--exclude-br-lines-starting-with`. This could be used, for example, to include branches starting with `else` but not with `else if` by passing `--include-br-lines-starting-with else --exclude-br-lines-starting-with "else if"`.
## Combine Operations
Fastcov can combine arbitrary `.info` and `.json` reports into a single report by setting the combine flag `-C`. Furthermore, the same pipeline that is run during non-combine operations can optionally be applied to the combined report (filtering, exclusion scanning, select output format).
Combine operations are not subject to the gcov and python minimum version requirements.
A few example snippets:
```bash
# Basic combine operation combining 3 reports into 1
$ fastcov.py -C report1.info report2.info report3.json --lcov -o report_final.info
# Read in report1.info, remove all coverage for files containing "/usr/include" and write out the result
$ fastcov.py -C report1.info --exclude /usr/include --lcov -o report1_filtered.info
# Combine 2 reports, (re-)scanning all of the source files contained in the final report for exclusion markers
$ fastcov.py -C report1.json report2.json --scan-exclusion-markers -o report3.json
```
## Utilities
This repository contains a few utilities that are complementary to fastcov. They are located in the [utils](utils/) directory, and like fastcov, are single source python scripts that can be copied from this repository and runned directly. Alternatively, installing the latest version of fastcov using pip will also install this utilities. Here is a brief description of what each utility does:
- [fastcov_summary](utils/fastcov_summary.py)
This utility will summarize a provided fastcov JSON file similar to the way [genhtml](https://linux.die.net/man/1/genhtml) summarizes a given lcov info file. Additionally, flags can be passed that check if a certain coverage threshold is met for function, line, or branch coverage.
This script is useful for 2 purposes. It can be used to print out a coverage summary on the command line for a CI system to parse using regex (such as GitLab CI, for example). This script can also be used to fail builds if (for example) line coverage drops below a certain percentage.
- [fastcov_to_sonarqube](utils/fastcov_to_sonarqube.py)
This script will convert a provided fastcov JSON file to the Sonar [generic test coverage](https://docs.sonarqube.org/latest/analysis/generic-test/) XML format.
## Benchmarks
Anecdotal testing on my own projects indicate that fastcov is over 100x faster than lcov and over 30x faster than gcovr:
Project Size: ~250 .gcda, ~500 .gcov generated by gcov
Time to process all gcda and parse all gcov:
- fastcov: ~700ms
- lcov: ~90s
- gcovr: ~30s
Your mileage may vary depending on the number of cores you have available for fastcov to use!
\ No newline at end of file
#!/usr/bin/env bash
repo_url=https://github.com/RPGillespie6/fastcov
# List of available versions:
# https://github.com/RPGillespie6/fastcov/releases
commit_sha1=40dffe81d62c0d897afe4108f3b5489487ff3bce # version 1.14
files_to_download=("fastcov.py" "LICENSE" "README.md")
script_dir="$(dirname "${BASH_SOURCE[0]}")"
source "$script_dir/../../scripts/github.sh"
github_download_files "$script_dir" "$repo_url" "$commit_sha1" "${files_to_download[@]}"
#!/usr/bin/env python3
# SPDX-License-Identifier: MIT
# Copyright 2018-present, Bryan Gillespie
"""
Author: Bryan Gillespie
https://github.com/RPGillespie6/fastcov
A massively parallel gcov wrapper for generating intermediate coverage formats fast
The goal of fastcov is to generate code coverage intermediate formats as fast as possible,
even for large projects with hundreds of gcda objects. The intermediate formats may then be
consumed by a report generator such as lcov's genhtml, or a dedicated frontend such as coveralls.
Sample Usage:
$ cd build_dir
$ ./fastcov.py --zerocounters
$ <run unit tests>
$ ./fastcov.py --exclude /usr/include test/ --lcov -o report.info
$ genhtml -o code_coverage report.info
"""
import re
import os
import sys
import glob
import json
import time
import fnmatch
import logging
import argparse
import threading
import subprocess
import multiprocessing
FASTCOV_VERSION = (1,14)
MINIMUM_PYTHON = (3,5)
MINIMUM_GCOV = (9,0,0)
# Interesting metrics
START_TIME = time.monotonic()
GCOVS_TOTAL = 0
GCOVS_SKIPPED = 0
# Gcov Coverage File Extensions
GCOV_GCNO_EXT = ".gcno" # gcno = "[gc]ov [no]te"
GCOV_GCDA_EXT = ".gcda" # gcda = "[gc]ov [da]ta"
# For when things go wrong...
# Start error codes at 3 because 1-2 are special
# See https://stackoverflow.com/a/1535733/2516916
EXIT_CODE = 0
EXIT_CODES = {
"gcov_version": 3,
"python_version": 4,
"unsupported_coverage_format": 5,
"excl_not_found": 6,
"bad_chunk_file": 7,
"missing_json_key": 8,
}
# Disable all logging in case developers are using this as a module
logging.disable(level=logging.CRITICAL)
class FastcovFormatter(logging.Formatter):
def format(self, record):
record.levelname = record.levelname.lower()
log_message = super(FastcovFormatter, self).format(record)
return "[{:.3f}s] {}".format(stopwatch(), log_message)
class DiffParseError(Exception):
pass
class DiffParser(object):
def _refinePaths(self, diff_metadata, diff_base_dir):
diff_metadata.pop('/dev/null', None)
diff_metadata.pop('', None)
for key, value in diff_metadata.copy().items():
diff_metadata.pop(key)
#sources without added lines will be excluded
if value:
newpath = os.path.join(diff_base_dir, key) if diff_base_dir else os.path.abspath(key)
diff_metadata[newpath] = value
def _parseTargetFile(self, line_with_target_file):
#f.e. '+++ b/README.md1' or '+++ b/README.md1 timestamp'
target_source = line_with_target_file[4:].partition('\t')[0].strip()
target_source = target_source[2:] if target_source.startswith('b/') else target_source
return target_source
def _parseHunkBoundaries(self, line_with_hunk_boundaries, line_index):
#f.e. '@@ -121,4 +122,4 @@ Time to process all gcda and parse all gcov:'
# Here ['-121,4', '+122,4']
lines_info = line_with_hunk_boundaries[3:].partition("@@")[0].strip().split(' ')
if len(lines_info) != 2:
raise DiffParseError("Found invalid hunk. Line #{}. {}".format(line_index, line_with_hunk_boundaries))
# Here ['122','4']
target_lines_info = lines_info[1].strip('+').partition(',')
target_line_current = int(target_lines_info[0])
target_lines_count = int(target_lines_info[2]) if target_lines_info[2] else 1
# Here ['121','4']
source_lines_info = lines_info[0].strip('-').partition(',')
source_line_current = int(source_lines_info[0])
source_lines_count = int(source_lines_info[2]) if source_lines_info[2] else 1
return target_line_current, target_lines_count, source_line_current, source_lines_count
def parseDiffFile(self, diff_file, diff_base_dir, fallback_encodings=[]):
diff_metadata = {}
target_source = None
target_hunk = set()
target_line_current = 0
target_line_end = 0
source_line_current = 0
source_line_end = 0
found_hunk = False
for i, line in enumerate(getSourceLines(diff_file, fallback_encodings), 1):
line = line.rstrip()
if not found_hunk:
if line.startswith('+++ '):
# refresh file
target_source = self._parseTargetFile(line)
elif line.startswith('@@ '):
# refresh hunk
target_line_current, target_lines_count, source_line_current, source_lines_count = self._parseHunkBoundaries(line, i)
target_line_end = target_line_current + target_lines_count
source_line_end = source_line_current + source_lines_count
target_hunk = set()
found_hunk = True
continue
if target_line_current > target_line_end or source_line_current > source_line_end:
raise DiffParseError("Hunk longer than expected. Line #{}. {}".format(i, line))
if line.startswith('+'):
#line related to target
target_hunk.add(target_line_current)
target_line_current = target_line_current + 1
elif line.startswith(' ') or line == '':
# line related to both
target_line_current = target_line_current + 1
source_line_current = source_line_current + 1
elif line.startswith('-'):
# line related to source
source_line_current = source_line_current + 1
elif not line.startswith('\\'): # No newline at end of file
# line with newline marker is not included into any boundaries
raise DiffParseError("Found unrecognized hunk line type. Line #{}. {}".format(i, line))
if target_line_current == target_line_end and source_line_current == source_line_end:
# Checked all lines, save data
if target_source in diff_metadata:
diff_metadata[target_source] = target_hunk.union(diff_metadata[target_source])
else:
diff_metadata[target_source] = target_hunk
target_hunk = set()
found_hunk = False
if target_line_current != target_line_end or source_line_current != source_line_end:
raise DiffParseError("Unexpected end of file. Expected hunk with {} target lines, {} source lines".format(
target_line_end - target_line_current, source_line_end - source_line_current))
self._refinePaths(diff_metadata, diff_base_dir)
return diff_metadata
def filterByDiff(self, diff_file, dir_base_dir, fastcov_json, fallback_encodings=[]):
diff_metadata = self.parseDiffFile(diff_file, dir_base_dir, fallback_encodings)
logging.debug("Include only next files: {}".format(diff_metadata.keys()))
excluded_files_count = 0
excluded_lines_count = 0
for source in list(fastcov_json["sources"].keys()):
diff_lines = diff_metadata.get(source, None)
if not diff_lines:
excluded_files_count = excluded_files_count + 1
logging.debug("Exclude {} according to diff file".format(source))
fastcov_json["sources"].pop(source)
continue
for test_name, report_data in fastcov_json["sources"][source].copy().items():
#No info about functions boundaries, removing all
for function in list(report_data["functions"].keys()):
report_data["functions"].pop(function, None)
for line in list(report_data["lines"].keys()):
if line not in diff_lines:
excluded_lines_count = excluded_lines_count + 1
report_data["lines"].pop(line)
for branch_line in list(report_data["branches"].keys()):
if branch_line not in diff_lines:
report_data["branches"].pop(branch_line)
if len(report_data["lines"]) == 0:
fastcov_json["sources"][source].pop(test_name)
if len(fastcov_json["sources"][source]) == 0:
excluded_files_count = excluded_files_count + 1
logging.debug('Exclude {} file as it has no lines due to diff filter'.format(source))
fastcov_json["sources"].pop(source)
logging.info("Excluded {} files and {} lines according to diff file".format(excluded_files_count, excluded_lines_count))
return fastcov_json
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def setExitCode(key):
global EXIT_CODE
EXIT_CODE = EXIT_CODES[key]
def setExitCodeRaw(code):
global EXIT_CODE
EXIT_CODE = code
def incrementCounters(total, skipped):
global GCOVS_TOTAL
global GCOVS_SKIPPED
GCOVS_TOTAL += total
GCOVS_SKIPPED += skipped
def stopwatch():
"""Return number of seconds since last time this was called."""
global START_TIME
end_time = time.monotonic()
delta = end_time - START_TIME
START_TIME = end_time
return delta
def parseVersionFromLine(version_str):
"""Given a string containing a dotted integer version, parse out integers and return as tuple."""
version = re.search(r'(\d+\.\d+\.\d+)', version_str)
if not version:
return (0,0,0)
return tuple(map(int, version.group(1).split(".")))
def getGcovVersion(gcov):
p = subprocess.Popen([gcov, "-v"], stdout=subprocess.PIPE)
output = p.communicate()[0].decode('UTF-8')
p.wait()
return parseVersionFromLine(output.split("\n")[0])
def removeFiles(files):
for file in files:
os.remove(file)
def getFilteredCoverageFiles(coverage_files, exclude):
def excludeGcda(gcda):
for ex in exclude:
if ex in gcda:
logging.debug("Omitting %s due to '--exclude-gcda %s'", gcda, ex)
return False
return True
return list(filter(excludeGcda, coverage_files))
def globCoverageFiles(cwd, coverage_type):
return glob.glob(os.path.join(os.path.abspath(cwd), "**/*" + coverage_type), recursive=True)
def findCoverageFiles(cwd, coverage_files, use_gcno):
coverage_type = "user provided"
if not coverage_files:
# gcov strips off extension of whatever you pass it and searches [extensionless name] + .gcno/.gcda
# We should pass either gcno or gcda, but not both - if you pass both it will be processed twice
coverage_type = GCOV_GCNO_EXT if use_gcno else GCOV_GCDA_EXT
coverage_files = globCoverageFiles(cwd, coverage_type)
logging.info("Found {} coverage files ({})".format(len(coverage_files), coverage_type))
logging.debug("Coverage files found:\n %s", "\n ".join(coverage_files))
return coverage_files
def gcovWorker(data_q, metrics_q, args, chunk, gcov_filter_options):
base_report = {"sources": {}}
gcovs_total = 0
gcovs_skipped = 0
error_exit = False
gcov_bin = args.gcov
gcov_args = ["--json-format", "--stdout"]
if args.branchcoverage or args.xbranchcoverage:
gcov_args.append("--branch-probabilities")
encoding = sys.stdout.encoding if sys.stdout.encoding else 'UTF-8'
workdir = args.cdirectory if args.cdirectory else "."
p = subprocess.Popen([gcov_bin] + gcov_args + chunk, cwd=workdir, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
for i, line in enumerate(iter(p.stdout.readline, b'')):
try:
intermediate_json = json.loads(line.decode(encoding))
except json.decoder.JSONDecodeError as e:
logging.error("Could not process chunk file '{}' ({}/{})".format(chunk[i], i+1, len(chunk)))
logging.error(str(e))
setExitCode("bad_chunk_file")
continue
if "current_working_directory" not in intermediate_json:
logging.error("Missing 'current_working_directory' for data file: {}".format(intermediate_json))
setExitCode("missing_json_key")
continue
intermediate_json_files = processGcovs(args.cdirectory, intermediate_json["files"], intermediate_json["current_working_directory"], gcov_filter_options)
for f in intermediate_json_files:
distillSource(f, base_report["sources"], args.test_name, args.xbranchcoverage)
gcovs_total += len(intermediate_json["files"])
gcovs_skipped += len(intermediate_json["files"]) - len(intermediate_json_files)
p.wait()
data_q.put(base_report)
metrics_q.put((gcovs_total, gcovs_skipped))
sys.exit(EXIT_CODE)
def processGcdas(args, coverage_files, gcov_filter_options):
chunk_size = max(args.minimum_chunk, int(len(coverage_files) / args.jobs) + 1)
processes = []
data_q = multiprocessing.Queue()
metrics_q = multiprocessing.Queue()
for chunk in chunks(coverage_files, chunk_size):
p = multiprocessing.Process(target=gcovWorker, args=(data_q, metrics_q, args, chunk, gcov_filter_options))
processes.append(p)
p.start()
logging.info("Spawned {} gcov processes, each processing at most {} coverage files".format(len(processes), chunk_size))
fastcov_jsons = []
for p in processes:
fastcov_jsons.append(data_q.get())
incrementCounters(*metrics_q.get())
for p in processes:
p.join()
if p.exitcode != 0:
setExitCodeRaw(p.exitcode)
base_fastcov = fastcov_jsons.pop()
for fj in fastcov_jsons:
combineReports(base_fastcov, fj)
return base_fastcov
def shouldFilterSource(source, gcov_filter_options):
"""Returns true if the provided source file should be filtered due to CLI options, otherwise returns false."""
# If explicit sources were passed, check for match
if gcov_filter_options["sources"]:
if source not in gcov_filter_options["sources"]:
logging.debug("Filtering coverage for '%s' due to option '--source-files'", source)
return True
# Check exclude filter
for ex in gcov_filter_options["exclude"]:
if ex in source:
logging.debug("Filtering coverage for '%s' due to option '--exclude %s'", source, ex)
return True
# Check exclude filter
for ex_glob in gcov_filter_options["exclude_glob"]:
if fnmatch.fnmatch(source, ex_glob):
logging.debug("Filtering coverage for '%s' due to option '--exclude-glob %s'", source, ex_glob)
return True
# Check include filter
if gcov_filter_options["include"]:
included = False
for inc in gcov_filter_options["include"]:
if inc in source:
included = True
break
if not included:
logging.debug("Filtering coverage for '%s' due to option '--include %s'", source, " ".join(gcov_filter_options["include"]))
return True
return False
def filterFastcov(fastcov_json, args):
logging.info("Performing filtering operations (if applicable)")
gcov_filter_options = getGcovFilterOptions(args)
for source in list(fastcov_json["sources"].keys()):
if shouldFilterSource(source, gcov_filter_options):
del fastcov_json["sources"][source]
def processGcov(cwd, gcov, source_base_dir, files, gcov_filter_options):
# Uses cwd if set, else source_base_dir from gcov json. If both are empty, uses "."
base_dir = cwd if cwd else source_base_dir
base_dir = base_dir if base_dir else "."
# Add absolute path
gcov["file_abs"] = os.path.abspath(os.path.join(base_dir, gcov["file"]))
if shouldFilterSource(gcov["file_abs"], gcov_filter_options):
return
files.append(gcov)
logging.debug("Accepted coverage for '%s'", gcov["file_abs"])
def processGcovs(cwd, gcov_files, source_base_dir, gcov_filter_options):
files = []
for gcov in gcov_files:
processGcov(cwd, gcov, source_base_dir, files, gcov_filter_options)
return files
def dumpBranchCoverageToLcovInfo(f, branches):
branch_miss = 0
branch_found = 0
brda = []
for line_num, branch_counts in branches.items():
for i, count in enumerate(branch_counts):
# Branch (<line number>, <block number>, <branch number>, <taken>)
brda.append((line_num, int(i/2), i, count))
branch_miss += int(count == 0)
branch_found += 1
for v in sorted(brda):
f.write("BRDA:{},{},{},{}\n".format(*v))
f.write("BRF:{}\n".format(branch_found)) # Branches Found
f.write("BRH:{}\n".format(branch_found - branch_miss)) # Branches Hit
def dumpToLcovInfo(fastcov_json, output):
with open(output, "w") as f:
sources = fastcov_json["sources"]
for sf in sorted(sources.keys()):
for tn in sorted(sources[sf].keys()):
data = sources[sf][tn]
f.write("TN:{}\n".format(tn)) #Test Name - used mainly in conjuction with genhtml --show-details
f.write("SF:{}\n".format(sf)) #Source File
fn_miss = 0
fn = []
fnda = []
for function, fdata in data["functions"].items():
fn.append((fdata["start_line"], function)) # Function Start Line
fnda.append((fdata["execution_count"], function)) # Function Hits
fn_miss += int(fdata["execution_count"] == 0)
# NOTE: lcov sorts FN, but not FNDA.
for v in sorted(fn):
f.write("FN:{},{}\n".format(*v))
for v in sorted(fnda):
f.write("FNDA:{},{}\n".format(*v))
f.write("FNF:{}\n".format(len(data["functions"]))) #Functions Found
f.write("FNH:{}\n".format((len(data["functions"]) - fn_miss))) #Functions Hit
if data["branches"]:
dumpBranchCoverageToLcovInfo(f, data["branches"])
line_miss = 0
da = []
for line_num, count in data["lines"].items():
da.append((line_num, count))
line_miss += int(count == 0)
for v in sorted(da):
f.write("DA:{},{}\n".format(*v)) # Line
f.write("LF:{}\n".format(len(data["lines"]))) #Lines Found
f.write("LH:{}\n".format((len(data["lines"]) - line_miss))) #Lines Hit
f.write("end_of_record\n")
def getSourceLines(source, fallback_encodings=[]):
"""Return a list of lines from the provided source, trying to decode with fallback encodings if the default fails."""
default_encoding = sys.getdefaultencoding()
for encoding in [default_encoding] + fallback_encodings:
try:
with open(source, encoding=encoding) as f:
return f.readlines()
except UnicodeDecodeError:
pass
logging.warning("Could not decode '{}' with {} or fallback encodings ({}); ignoring errors".format(source, default_encoding, ",".join(fallback_encodings)))
with open(source, errors="ignore") as f:
return f.readlines()
# Returns whether source coverage changed or not
def exclProcessSource(fastcov_sources, source, exclude_branches_sw, include_branches_sw, fallback_encodings):
# Before doing any work, check if this file even needs to be processed
if not exclude_branches_sw and not include_branches_sw:
# Ignore unencodable characters
with open(source, errors="ignore") as f:
if "LCOV_EXCL" not in f.read():
return False
# If we've made it this far we have to check every line
start_line = 0
end_line = 0
# Start enumeration at line 1 because the first line of the file is line 1 not 0
for i, line in enumerate(getSourceLines(source, fallback_encodings), 1):
# Cycle through test names (likely only 1)
for test_name in fastcov_sources[source]:
fastcov_data = fastcov_sources[source][test_name]
# Check if branch coverage should be deleted based on CLI options
if (exclude_branches_sw or include_branches_sw) and (i in fastcov_data["branches"]):
del_exclude_br = exclude_branches_sw and any(line.lstrip().startswith(e) for e in exclude_branches_sw)
del_include_br = include_branches_sw and all(not line.lstrip().startswith(e) for e in include_branches_sw)
if del_exclude_br or del_include_br:
del fastcov_data["branches"][i]
# Skip to next line as soon as possible
if "LCOV_EXCL" not in line:
continue
# Build line to function dict so can quickly delete by line number
line_to_func = {}
for f in fastcov_data["functions"].keys():
l = fastcov_data["functions"][f]["start_line"]
if l not in line_to_func:
line_to_func[l] = set()
line_to_func[l].add(f)
if "LCOV_EXCL_LINE" in line:
for key in ["lines", "branches"]:
if i in fastcov_data[key]:
del fastcov_data[key][i]
if i in line_to_func:
for key in line_to_func[i]:
if key in fastcov_data["functions"]:
del fastcov_data["functions"][key]
elif "LCOV_EXCL_START" in line:
start_line = i
elif "LCOV_EXCL_STOP" in line:
end_line = i
if not start_line:
end_line = 0
continue
for key in ["lines", "branches"]:
for line_num in list(fastcov_data[key].keys()):
if start_line <= line_num <= end_line:
del fastcov_data[key][line_num]
for line_num in range(start_line, end_line):
if line_num in line_to_func:
for key in line_to_func[line_num]:
if key in fastcov_data["functions"]:
del fastcov_data["functions"][key]
start_line = end_line = 0
elif "LCOV_EXCL_BR_LINE" in line:
if i in fastcov_data["branches"]:
del fastcov_data["branches"][i]
# Source coverage changed
return True
def exclMarkerWorker(data_q, fastcov_sources, chunk, exclude_branches_sw, include_branches_sw, fallback_encodings):
changed_sources = []
for source in chunk:
try:
if exclProcessSource(fastcov_sources, source, exclude_branches_sw, include_branches_sw, fallback_encodings):
changed_sources.append((source, fastcov_sources[source]))
except FileNotFoundError:
logging.error("Could not find '%s' to scan for exclusion markers...", source)
setExitCode("excl_not_found") # Set exit code because of error
# Write out changed sources back to main fastcov file
data_q.put(changed_sources)
# Exit current process with appropriate code
sys.exit(EXIT_CODE)
def processExclusionMarkers(fastcov_json, jobs, exclude_branches_sw, include_branches_sw, min_chunk_size, fallback_encodings):
chunk_size = max(min_chunk_size, int(len(fastcov_json["sources"]) / jobs) + 1)
processes = []
data_q = multiprocessing.Queue()
for chunk in chunks(list(fastcov_json["sources"].keys()), chunk_size):
p = multiprocessing.Process(target=exclMarkerWorker, args=(data_q, fastcov_json["sources"], chunk, exclude_branches_sw, include_branches_sw, fallback_encodings))
processes.append(p)
p.start()
logging.info("Spawned {} exclusion marker scanning processes, each processing at most {} source files".format(len(processes), chunk_size))
changed_sources = []
for p in processes:
changed_sources += data_q.get()
for p in processes:
p.join()
if p.exitcode != 0:
setExitCodeRaw(p.exitcode)
for changed_source in changed_sources:
fastcov_json["sources"][changed_source[0]] = changed_source[1]
def validateSources(fastcov_json):
logging.info("Checking if all sources exist")
for source in fastcov_json["sources"].keys():
if not os.path.exists(source):
logging.error("Cannot find '{}'".format(source))
def distillFunction(function_raw, functions):
function_name = function_raw["name"]
# NOTE: need to explicitly cast all counts coming from gcov to int - this is because gcov's json library
# will pass as scientific notation (i.e. 12+e45)
start_line = int(function_raw["start_line"])
execution_count = int(function_raw["execution_count"])
if function_name not in functions:
functions[function_name] = {
"start_line": start_line,
"execution_count": execution_count
}
else:
functions[function_name]["execution_count"] += execution_count
def emptyBranchSet(branch1, branch2):
return (branch1["count"] == 0 and branch2["count"] == 0)
def matchingBranchSet(branch1, branch2):
return (branch1["count"] == branch2["count"])
def filterExceptionalBranches(branches):
filtered_branches = []
exception_branch = False
for i in range(0, len(branches), 2):
if i+1 >= len(branches):
filtered_branches.append(branches[i])
break
# Filter exceptional branch noise
if branches[i+1]["throw"]:
exception_branch = True
continue
# Filter initializer list noise
if exception_branch and emptyBranchSet(branches[i], branches[i+1]) and len(filtered_branches) >= 2 and matchingBranchSet(filtered_branches[-1], filtered_branches[-2]):
return []
filtered_branches.append(branches[i])
filtered_branches.append(branches[i+1])
return filtered_branches
def distillLine(line_raw, lines, branches, include_exceptional_branches):
line_number = int(line_raw["line_number"])
count = int(line_raw["count"])
if line_number not in lines:
lines[line_number] = count
else:
lines[line_number] += count
# Filter out exceptional branches by default unless requested otherwise
if not include_exceptional_branches:
line_raw["branches"] = filterExceptionalBranches(line_raw["branches"])
# Increment all branch counts
for i, branch in enumerate(line_raw["branches"]):
if line_number not in branches:
branches[line_number] = []
blen = len(branches[line_number])
glen = len(line_raw["branches"])
if blen < glen:
branches[line_number] += [0] * (glen - blen)
branches[line_number][i] += int(branch["count"])
def distillSource(source_raw, sources, test_name, include_exceptional_branches):
source_name = source_raw["file_abs"]
if source_name not in sources:
sources[source_name] = {
test_name: {
"functions": {},
"branches": {},
"lines": {}
}
}
for function in source_raw["functions"]:
distillFunction(function, sources[source_name][test_name]["functions"])
for line in source_raw["lines"]:
distillLine(line, sources[source_name][test_name]["lines"], sources[source_name][test_name]["branches"], include_exceptional_branches)
def dumpToJson(intermediate, output):
with open(output, "w") as f:
json.dump(intermediate, f)
def getGcovFilterOptions(args):
return {
"sources": set([os.path.abspath(s) for s in args.sources]), #Make paths absolute, use set for fast lookups
"include": args.includepost,
"exclude": args.excludepost,
"exclude_glob":args.excludepost_glob
}
def addDicts(dict1, dict2):
"""Add dicts together by value. i.e. addDicts({"a":1,"b":0}, {"a":2}) == {"a":3,"b":0}."""
result = {k:v for k,v in dict1.items()}
for k,v in dict2.items():
if k in result:
result[k] += v
else:
result[k] = v
return result
def addLists(list1, list2):
"""Add lists together by value. i.e. addLists([1,1], [2,2]) == [3,3]."""
# Find big list and small list
blist, slist = list(list2), list(list1)
if len(list1) > len(list2):
blist, slist = slist, blist
# Overlay small list onto big list
for i, b in enumerate(slist):
blist[i] += b
return blist
def combineReports(base, overlay):
for source, scov in overlay["sources"].items():
# Combine Source Coverage
if source not in base["sources"]:
base["sources"][source] = scov
continue
for test_name, tcov in scov.items():
# Combine Source Test Name Coverage
if test_name not in base["sources"][source]:
base["sources"][source][test_name] = tcov
continue
# Drill down and create convenience variable
base_data = base["sources"][source][test_name]
# Combine Line Coverage
base_data["lines"] = addDicts(base_data["lines"], tcov["lines"])
# Combine Branch Coverage
for branch, cov in tcov["branches"].items():
if branch not in base_data["branches"]:
base_data["branches"][branch] = cov
else:
base_data["branches"][branch] = addLists(base_data["branches"][branch], cov)
# Combine Function Coverage
for function, cov in tcov["functions"].items():
if function not in base_data["functions"]:
base_data["functions"][function] = cov
else:
base_data["functions"][function]["execution_count"] += cov["execution_count"]
def parseInfo(path):
"""Parse an lcov .info file into fastcov json."""
fastcov_json = {
"sources": {}
}
with open(path) as f:
for line in f:
if line.startswith("TN:"):
current_test_name = line[3:].strip()
elif line.startswith("SF:"):
current_sf = line[3:].strip()
fastcov_json["sources"][current_sf] = {
current_test_name: {
"functions": {},
"branches": {},
"lines": {},
}
}
current_data = fastcov_json["sources"][current_sf][current_test_name]
elif line.startswith("FN:"):
line_num, function_name = line[3:].strip().split(",")
current_data["functions"][function_name] = {}
current_data["functions"][function_name]["start_line"] = int(line_num)
elif line.startswith("FNDA:"):
count, function_name = line[5:].strip().split(",")
current_data["functions"][function_name]["execution_count"] = int(count)
elif line.startswith("DA:"):
line_num, count = line[3:].strip().split(",")
current_data["lines"][line_num] = int(count)
elif line.startswith("BRDA:"):
branch_tokens = line[5:].strip().split(",")
line_num, count = branch_tokens[0], branch_tokens[-1]
if line_num not in current_data["branches"]:
current_data["branches"][line_num] = []
current_data["branches"][line_num].append(int(count))
return fastcov_json
def convertKeysToInt(report):
for source in report["sources"].keys():
for test_name in report["sources"][source].keys():
report_data = report["sources"][source][test_name]
report_data["lines"] = {int(k):v for k,v in report_data["lines"].items()}
report_data["branches"] = {int(k):v for k,v in report_data["branches"].items()}
def parseAndCombine(paths):
base_report = {}
for path in paths:
if path.endswith(".json"):
with open(path) as f:
report = json.load(f)
elif path.endswith(".info"):
report = parseInfo(path)
else:
logging.error("Currently only fastcov .json and lcov .info supported for combine operations, aborting due to %s...\n", path)
sys.exit(EXIT_CODES["unsupported_coverage_format"])
# In order for sorting to work later when we serialize,
# make sure integer keys are int
convertKeysToInt(report)
if not base_report:
base_report = report
logging.info("Setting {} as base report".format(path))
else:
combineReports(base_report, report)
logging.info("Adding {} to base report".format(path))
return base_report
def getCombineCoverage(args):
logging.info("Performing combine operation")
fastcov_json = parseAndCombine(args.combine)
filterFastcov(fastcov_json, args)
return fastcov_json
def getGcovCoverage(args):
# Need at least python 3.5 because of use of recursive glob
checkPythonVersion(sys.version_info[0:2])
# Need at least gcov 9.0.0 because that's when gcov JSON and stdout streaming was introduced
checkGcovVersion(getGcovVersion(args.gcov))
# Get list of gcda files to process
coverage_files = findCoverageFiles(args.directory, args.coverage_files, args.use_gcno)
# If gcda/gcno filtering is enabled, filter them out now
if args.excludepre:
coverage_files = getFilteredCoverageFiles(coverage_files, args.excludepre)
logging.info("Found {} coverage files after filtering".format(len(coverage_files)))
# We "zero" the "counters" by simply deleting all gcda files
if args.zerocounters:
removeFiles(globCoverageFiles(args.directory, GCOV_GCDA_EXT))
logging.info("Removed {} .gcda files".format(len(coverage_files)))
sys.exit()
# Fire up one gcov per cpu and start processing gcdas
gcov_filter_options = getGcovFilterOptions(args)
fastcov_json = processGcdas(args, coverage_files, gcov_filter_options)
# Summarize processing results
logging.info("Processed {} .gcov files ({} total, {} skipped)".format(GCOVS_TOTAL - GCOVS_SKIPPED, GCOVS_TOTAL, GCOVS_SKIPPED))
logging.debug("Final report will contain coverage for the following %d source files:\n %s", len(fastcov_json["sources"]), "\n ".join(fastcov_json["sources"]))
return fastcov_json
def formatCoveredItems(covered, total):
coverage = (covered * 100.0) / total if total > 0 else 100.0
coverage = round(coverage, 2)
return "{:.2f}%, {}/{}".format(coverage, covered, total)
def dumpStatistic(fastcov_json):
total_lines = 0
covered_lines = 0
total_functions = 0
covered_functions = 0
total_files = len(fastcov_json["sources"])
covered_files = 0
for source_name, source in fastcov_json["sources"].items():
is_file_covered = False
for test_name, test in source.items():
total_lines += len(test["lines"])
for execution_count in test["lines"].values():
covered_lines += 1 if execution_count > 0 else 0
is_file_covered = is_file_covered or execution_count > 0
total_functions += len(test["functions"])
for function in test["functions"].values():
covered_functions += 1 if function['execution_count'] > 0 else 0
is_file_covered = is_file_covered or function['execution_count'] > 0
if is_file_covered:
covered_files = covered_files + 1
logging.info("Files Coverage: {}".format(formatCoveredItems(covered_files, total_files)))
logging.info("Functions Coverage: {}".format(formatCoveredItems(covered_functions, total_functions)))
logging.info("Lines Coverage: {}".format(formatCoveredItems(covered_lines, total_lines)))
def dumpFile(fastcov_json, args):
if args.lcov:
dumpToLcovInfo(fastcov_json, args.output)
logging.info("Created lcov info file '{}'".format(args.output))
else:
dumpToJson(fastcov_json, args.output)
logging.info("Created fastcov json file '{}'".format(args.output))
if args.dump_statistic:
dumpStatistic(fastcov_json)
def tupleToDotted(tup):
return ".".join(map(str, tup))
def parseArgs():
parser = argparse.ArgumentParser(description='A parallel gcov wrapper for fast coverage report generation')
parser.add_argument('-z', '--zerocounters', dest='zerocounters', action="store_true", help='Recursively delete all gcda files')
# Enable Branch Coverage
parser.add_argument('-b', '--branch-coverage', dest='branchcoverage', action="store_true", help='Include only the most useful branches in the coverage report.')
parser.add_argument('-B', '--exceptional-branch-coverage', dest='xbranchcoverage', action="store_true", help='Include ALL branches in the coverage report (including potentially noisy exceptional branches).')
parser.add_argument('-A', '--exclude-br-lines-starting-with', dest='exclude_branches_sw', nargs="+", metavar='', default=[], help='Exclude branches from lines starting with one of the provided strings (i.e. assert, return, etc.)')
parser.add_argument('-a', '--include-br-lines-starting-with', dest='include_branches_sw', nargs="+", metavar='', default=[], help='Include only branches from lines starting with one of the provided strings (i.e. if, else, while, etc.)')
parser.add_argument('-X', '--skip-exclusion-markers', dest='skip_exclusion_markers', action="store_true", help='Skip reading source files to search for lcov exclusion markers (such as "LCOV_EXCL_LINE")')
parser.add_argument('-x', '--scan-exclusion-markers', dest='scan_exclusion_markers', action="store_true", help='(Combine operations) Force reading source files to search for lcov exclusion markers (such as "LCOV_EXCL_LINE")')
# Capture untested file coverage as well via gcno
parser.add_argument('-n', '--process-gcno', dest='use_gcno', action="store_true", help='Process both gcno and gcda coverage files. This option is useful for capturing untested files in the coverage report.')
# Filtering Options
parser.add_argument('-s', '--source-files', dest='sources', nargs="+", metavar='', default=[], help='Filter: Specify exactly which source files should be included in the final report. Paths must be either absolute or relative to current directory.')
parser.add_argument('-e', '--exclude', dest='excludepost', nargs="+", metavar='', default=[], help='Filter: Exclude source files from final report if they contain one of the provided substrings (i.e. /usr/include test/, etc.)')
parser.add_argument('-eg', '--exclude-glob', dest='excludepost_glob', nargs="+", metavar='', default=[], help='Filter: Exclude source files by glob pattern from final report if they contain one of the provided substrings (i.e. /usr/include test/, etc.)')
parser.add_argument('-i', '--include', dest='includepost', nargs="+", metavar='', default=[], help='Filter: Only include source files in final report that contain one of the provided substrings (i.e. src/ etc.)')
parser.add_argument('-f', '--gcda-files', dest='coverage_files', nargs="+", metavar='', default=[], help='Filter: Specify exactly which gcda or gcno files should be processed. Note that specifying gcno causes both gcno and gcda to be processed.')
parser.add_argument('-E', '--exclude-gcda', dest='excludepre', nargs="+", metavar='', default=[], help='Filter: Exclude gcda or gcno files from being processed via simple find matching (not regex)')
parser.add_argument('-u', '--diff-filter', dest='diff_file', default='', help='Unified diff file with changes which will be included into final report')
parser.add_argument('-ub', '--diff-base-dir', dest='diff_base_dir', default='', help='Base directory for sources in unified diff file, usually repository dir')
parser.add_argument('-g', '--gcov', dest='gcov', default='gcov', help='Which gcov binary to use')
parser.add_argument('-d', '--search-directory', dest='directory', default=".", help='Base directory to recursively search for gcda files (default: .)')
parser.add_argument('-c', '--compiler-directory', dest='cdirectory', default="", help='Base directory compiler was invoked from (default: . or read from gcov) \
This needs to be set if invoking fastcov from somewhere other than the base compiler directory. No need to set it if gcc version > 9.1')
parser.add_argument('-j', '--jobs', dest='jobs', type=int, default=multiprocessing.cpu_count(), help='Number of parallel gcov to spawn (default: {}).'.format(multiprocessing.cpu_count()))
parser.add_argument('-m', '--minimum-chunk-size', dest='minimum_chunk', type=int, default=5, help='Minimum number of files a thread should process (default: 5). \
If you have only 4 gcda files but they are monstrously huge, you could change this value to a 1 so that each thread will only process 1 gcda. Otherwise fastcov will spawn only 1 thread to process all of them.')
parser.add_argument('-F', '--fallback-encodings', dest='fallback_encodings', nargs="+", metavar='', default=[], help='List of encodings to try if opening a source file with the default fails (i.e. latin1, etc.). This option is not usually needed.')
parser.add_argument('-l', '--lcov', dest='lcov', action="store_true", help='Output in lcov info format instead of fastcov json')
parser.add_argument('-o', '--output', dest='output', default="", help='Name of output file (default: coverage.json or coverage.info, depends on --lcov option)')
parser.add_argument('-q', '--quiet', dest='quiet', action="store_true", help='Suppress output to stdout')
parser.add_argument('-t', '--test-name', dest='test_name', default="", help='Specify a test name for the coverage. Equivalent to lcov\'s `-t`.')
parser.add_argument('-C', '--add-tracefile', dest='combine', nargs="+", help='Combine multiple coverage files into one. If this flag is specified, fastcov will do a combine operation instead invoking gcov. Equivalent to lcov\'s `-a`.')
parser.add_argument('-V', '--verbose', dest="verbose", action="store_true", help="Print more detailed information about what fastcov is doing")
parser.add_argument('-w', '--validate-sources', dest="validate_sources", action="store_true", help="Check if every source file exists")
parser.add_argument('-p', '--dump-statistic', dest="dump_statistic", action="store_true", help="Dump total statistic at the end")
parser.add_argument('-v', '--version', action="version", version='%(prog)s {version}'.format(version=__version__), help="Show program's version number and exit")
args = parser.parse_args()
if not args.output:
args.output = 'coverage.info' if args.lcov else 'coverage.json'
return args
def checkPythonVersion(version):
"""Exit if the provided python version is less than the supported version."""
if version < MINIMUM_PYTHON:
sys.stderr.write("Minimum python version {} required, found {}\n".format(tupleToDotted(MINIMUM_PYTHON), tupleToDotted(version)))
sys.exit(EXIT_CODES["python_version"])
def checkGcovVersion(version):
"""Exit if the provided gcov version is less than the supported version."""
if version < MINIMUM_GCOV:
sys.stderr.write("Minimum gcov version {} required, found {}\n".format(tupleToDotted(MINIMUM_GCOV), tupleToDotted(version)))
sys.exit(EXIT_CODES["gcov_version"])
def setupLogging(quiet, verbose):
handler = logging.StreamHandler()
handler.setFormatter(FastcovFormatter("[%(levelname)s]: %(message)s"))
root = logging.getLogger()
root.setLevel(logging.INFO)
root.addHandler(handler)
if not quiet:
logging.disable(level=logging.NOTSET) # Re-enable logging
if verbose:
root.setLevel(logging.DEBUG)
def main():
args = parseArgs()
# Setup logging
setupLogging(args.quiet, args.verbose)
# Get report from appropriate source
if args.combine:
fastcov_json = getCombineCoverage(args)
skip_exclusion_markers = not args.scan_exclusion_markers
else:
fastcov_json = getGcovCoverage(args)
skip_exclusion_markers = args.skip_exclusion_markers
# Scan for exclusion markers
if not skip_exclusion_markers:
processExclusionMarkers(fastcov_json, args.jobs, args.exclude_branches_sw, args.include_branches_sw, args.minimum_chunk, args.fallback_encodings)
logging.info("Scanned {} source files for exclusion markers".format(len(fastcov_json["sources"])))
if args.diff_file:
logging.info("Filtering according to {} file".format(args.diff_file))
DiffParser().filterByDiff(args.diff_file, args.diff_base_dir, fastcov_json, args.fallback_encodings)
if args.validate_sources:
validateSources(fastcov_json)
# Dump to desired file format
dumpFile(fastcov_json, args)
# If there was an error along the way, but we still completed the pipeline...
if EXIT_CODE:
sys.exit(EXIT_CODE)
# Set package version... it's way down here so that we can call tupleToDotted
__version__ = tupleToDotted(FASTCOV_VERSION)
if __name__ == '__main__':
main()
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
# lcov to cobertura XML converter
[![CI](https://github.com/eriwen/lcov-to-cobertura-xml/actions/workflows/ci.yml/badge.svg)](https://github.com/eriwen/lcov-to-cobertura-xml/actions/workflows/ci.yml)
[![Docs](https://github.com/eriwen/lcov-to-cobertura-xml/actions/workflows/sphinx.yml/badge.svg)](https://github.com/eriwen/lcov-to-cobertura-xml/actions/workflows/sphinx.yml)
[![Security check - Bandit](https://github.com/eriwen/lcov-to-cobertura-xml/actions/workflows/bandit.yml/badge.svg)](https://github.com/eriwen/lcov-to-cobertura-xml/actions/workflows/bandit.yml)
[![Release](https://github.com/eriwen/lcov-to-cobertura-xml/actions/workflows/release.yml/badge.svg)](https://github.com/eriwen/lcov-to-cobertura-xml/actions/workflows/release.yml)
This project does as the name implies: it converts code coverage report files in [lcov](http://ltp.sourceforge.net/coverage/lcov.php) format to [Cobertura](http://cobertura.sourceforge.net/)'s XML report format so that CI servers like [Jenkins](http://jenkins-ci.org) can aggregate results and determine build stability etc.
Coverage metrics supported:
- Package/folder overall line and branch coverage
- Class/file overall line and branch coverage
- Functions hit
- Line and Branch hits
## Quick usage
[Grab it raw](https://raw.github.com/eriwen/lcov-to-cobertura-xml/master/lcov_cobertura/lcov_cobertura.py) and run it with python:
```bash
python lcov_cobertura.py lcov-file.dat
```
- `-b/--base-dir` - (Optional) Directory where source files are located. Defaults to the current directory
- `-e/--excludes` - (Optional) Comma-separated list of regexes of packages to exclude
- `-o/--output` - (Optional) Path to store cobertura xml file. _Defaults to ./coverage.xml_
- `-d/--demangle` - (Optional) Demangle C++ function names. _Requires c++filt_
```bash
python lcov_cobertura.py lcov-file.dat --base-dir src/dir --excludes test.lib --output build/coverage.xml --demangle
```
## With [pip](http://pypi.python.org/pypi/pip):
```bash
pip install lcov_cobertura
```
### Command-line usage
```bash
lcov_cobertura lcov-file.dat
```
- `-b/--base-dir` - (Optional) Directory where source files are located. Defaults to the current directory
- `-e/--excludes` - (Optional) Comma-separated list of regexes of packages to exclude
- `-o/--output` - (Optional) Path to store cobertura xml file. _Defaults to ./coverage.xml_
- `-d/--demangle` - (Optional) Demangle C++ function names. _Requires c++filt_
```bash
lcov_cobertura lcov-file.dat --base-dir src/dir --excludes test.lib --output build/coverage.xml --demangle
```
### Usage as a Python module
Use it anywhere in your python:
```python
from lcov_cobertura import LcovCobertura
LCOV_INPUT = 'SF:foo/file.ext\nDA:1,1\nDA:2,0\nend_of_record\n'
converter = LcovCobertura(LCOV_INPUT)
cobertura_xml = converter.convert()
print(cobertura_xml)
```
## Environment Support
Python 3.8+ is supported. The last release with Python 2.x support is [version 1.6](https://pypi.org/project/lcov_cobertura/1.6/).
## Contributions
This project is made possible due to the efforts of these fine people:
- [Eric Wendelin](https://eriwen.com)
- [Björge Dijkstra](https://github.com/bjd)
- [Jon Schewe](http://mtu.net/~jpschewe)
- [Yury V. Zaytsev](http://yury.zaytsev.net)
- [Steve Arnold](https://github.com/sarnold)
## License
This project is provided under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0).
#!/usr/bin/env bash
repo_url=https://github.com/eriwen/lcov-to-cobertura-xml
# List of available versions:
# https://github.com/eriwen/lcov-to-cobertura-xml/releases/
commit_sha1=18489f195e5389fca3fec53608a5503af759ee44 # version 2.0.2
files_to_download=("LICENSE" "README.md" "lcov_cobertura/lcov_cobertura.py")
script_dir="$(dirname "${BASH_SOURCE[0]}")"
source "$script_dir/../../scripts/github.sh"
github_download_files "$script_dir" "$repo_url" "$commit_sha1" "${files_to_download[@]}"
#!/usr/bin/env python
# Copyright 2011-2022 Eric Wendelin
#
# This is free software, licensed under the Apache License, Version 2.0,
# available in the accompanying LICENSE.txt file.
"""
Converts lcov line coverage output to Cobertura-compatible XML for CI
"""
import re
import sys
import os
import time
import subprocess # nosec - not for untrusted input
from xml.dom import minidom # nosec - not for untrusted input
from optparse import OptionParser
from distutils.spawn import find_executable
__version__ = '2.0.2'
CPPFILT = "c++filt"
HAVE_CPPFILT = False
if find_executable(CPPFILT) is not None:
HAVE_CPPFILT = True
class Demangler():
def __init__(self):
self.pipe = subprocess.Popen( # nosec - not for untrusted input
[CPPFILT], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def demangle(self, name):
newname = name + "\n"
self.pipe.stdin.write(newname.encode('utf-8'))
self.pipe.stdin.flush()
res = self.pipe.stdout.readline().decode('utf-8')
return res.rstrip()
def __del__(self):
self.pipe.stdin.close()
self.pipe.terminate()
self.pipe.wait()
class LcovCobertura():
"""
Converts code coverage report files in lcov format to Cobertura's XML
report format so that CI servers like Jenkins can aggregate results and
determine build stability etc.
>>> from lcov_cobertura import LcovCobertura
>>> LCOV_INPUT = 'your lcov input'
>>> converter = LcovCobertura(LCOV_INPUT)
>>> cobertura_xml = converter.convert()
>>> print(cobertura_xml)
<?xml version="1.0" ?>
<!DOCTYPE coverage
SYSTEM 'http://cobertura.sourceforge.net/xml/coverage-04.dtd'>
...
"""
def __init__(self, lcov_data, base_dir='.', excludes=None, demangle=False):
"""
Create a new :class:`LcovCobertura` object using the given `lcov_data`
and `options`.
:param lcov_data: Path to LCOV data file
:type lcov_data: string
:param base_dir: Path upon which to base all sources
:type base_dir: string
:param excludes: list of regexes to packages as excluded
:type excludes: [string]
:param demangle: whether to demangle function names using c++filt
:type demangle: bool
"""
if not excludes:
excludes = []
self.lcov_data = lcov_data
self.base_dir = base_dir
self.excludes = excludes
if demangle:
demangler = Demangler()
self.format = demangler.demangle
else:
self.format = lambda x: x
def convert(self):
"""
Convert lcov file to cobertura XML using options from this instance.
"""
coverage_data = self.parse()
return self.generate_cobertura_xml(coverage_data)
def parse(self, **kwargs):
"""
Generate a data structure representing it that can be serialized in any
logical format.
"""
coverage_data = {
'packages': {},
'summary': {'lines-total': 0, 'lines-covered': 0,
'branches-total': 0, 'branches-covered': 0},
'timestamp': str(kwargs["timestamp"]) if "timestamp" in kwargs else str(int(time.time()))
}
package = None
current_file = None
file_lines_total = 0
file_lines_covered = 0
file_lines = {}
file_methods = {}
file_branches_total = 0
file_branches_covered = 0
for line in self.lcov_data.split('\n'):
if line.strip() == 'end_of_record':
if current_file is not None:
package_dict = coverage_data['packages'][package]
package_dict['lines-total'] += file_lines_total
package_dict['lines-covered'] += file_lines_covered
package_dict['branches-total'] += file_branches_total
package_dict['branches-covered'] += file_branches_covered
file_dict = package_dict['classes'][current_file]
file_dict['lines-total'] = file_lines_total
file_dict['lines-covered'] = file_lines_covered
file_dict['lines'] = dict(file_lines)
file_dict['methods'] = dict(file_methods)
file_dict['branches-total'] = file_branches_total
file_dict['branches-covered'] = file_branches_covered
coverage_data['summary']['lines-total'] += file_lines_total
coverage_data['summary']['lines-covered'] += file_lines_covered
coverage_data['summary']['branches-total'] += file_branches_total
coverage_data['summary']['branches-covered'] += file_branches_covered
line_parts = line.split(':', 1)
input_type = line_parts[0]
if input_type == 'SF':
# Get file name
file_name = line_parts[-1].strip()
relative_file_name = os.path.relpath(file_name, self.base_dir)
package = '.'.join(relative_file_name.split(os.path.sep)[0:-1])
class_name = '.'.join(relative_file_name.split(os.path.sep))
if package not in coverage_data['packages']:
coverage_data['packages'][package] = {
'classes': {}, 'lines-total': 0, 'lines-covered': 0,
'branches-total': 0, 'branches-covered': 0
}
coverage_data['packages'][package]['classes'][
relative_file_name] = {
'name': class_name, 'lines': {}, 'lines-total': 0,
'lines-covered': 0, 'branches-total': 0,
'branches-covered': 0
}
package = package
current_file = relative_file_name
file_lines_total = 0
file_lines_covered = 0
file_lines.clear()
file_methods.clear()
file_branches_total = 0
file_branches_covered = 0
elif input_type == 'DA':
# DA:2,0
(line_number, line_hits) = line_parts[-1].strip().split(',')[:2]
line_number = int(line_number)
if line_number not in file_lines:
file_lines[line_number] = {
'branch': 'false', 'branches-total': 0,
'branches-covered': 0
}
file_lines[line_number]['hits'] = line_hits
# Increment lines total/covered for class and package
try:
if int(line_hits) > 0:
file_lines_covered += 1
except ValueError:
pass
file_lines_total += 1
elif input_type == 'BRDA':
# BRDA:1,1,2,0
(line_number, block_number, branch_number, branch_hits) = line_parts[-1].strip().split(',')
line_number = int(line_number)
if line_number not in file_lines:
file_lines[line_number] = {
'branch': 'true', 'branches-total': 0,
'branches-covered': 0, 'hits': 0
}
file_lines[line_number]['branch'] = 'true'
file_lines[line_number]['branches-total'] += 1
file_branches_total += 1
if branch_hits != '-' and int(branch_hits) > 0:
file_lines[line_number]['branches-covered'] += 1
file_branches_covered += 1
elif input_type == 'BRF':
file_branches_total = int(line_parts[1])
elif input_type == 'BRH':
file_branches_covered = int(line_parts[1])
elif input_type == 'FN':
# FN:5,(anonymous_1)
function_line, function_name = line_parts[-1].strip().split(',', 1)
file_methods[function_name] = [function_line, '0']
elif input_type == 'FNDA':
# FNDA:0,(anonymous_1)
(function_hits, function_name) = line_parts[-1].strip().split(',', 1)
if function_name not in file_methods:
file_methods[function_name] = ['0', '0']
file_methods[function_name][-1] = function_hits
# Exclude packages
excluded = [x for x in coverage_data['packages'] for e in self.excludes
if re.match(e, x)]
for package in excluded:
del coverage_data['packages'][package]
# Compute line coverage rates
for package_data in list(coverage_data['packages'].values()):
package_data['line-rate'] = self._percent(
package_data['lines-total'],
package_data['lines-covered'])
package_data['branch-rate'] = self._percent(
package_data['branches-total'],
package_data['branches-covered'])
return coverage_data
def generate_cobertura_xml(self, coverage_data, **kwargs):
"""
Given parsed coverage data, return a String cobertura XML representation.
:param coverage_data: Nested dict representing coverage information.
:type coverage_data: dict
"""
dom_impl = minidom.getDOMImplementation()
doctype = dom_impl.createDocumentType("coverage", None,
"http://cobertura.sourceforge.net/xml/coverage-04.dtd")
document = dom_impl.createDocument(None, "coverage", doctype)
root = document.documentElement
summary = coverage_data['summary']
self._attrs(root, {
'branch-rate': self._percent(summary['branches-total'],
summary['branches-covered']),
'branches-covered': str(summary['branches-covered']),
'branches-valid': str(summary['branches-total']),
'complexity': '0',
'line-rate': self._percent(summary['lines-total'],
summary['lines-covered']),
'lines-covered': str(summary['lines-covered']),
'lines-valid': str(summary['lines-total']),
'timestamp': coverage_data['timestamp'],
'version': '2.0.3'
})
sources = self._el(document, 'sources', {})
source = self._el(document, 'source', {})
source.appendChild(document.createTextNode(self.base_dir))
sources.appendChild(source)
root.appendChild(sources)
packages_el = self._el(document, 'packages', {})
packages = coverage_data['packages']
for package_name, package_data in list(packages.items()):
package_el = self._el(document, 'package', {
'line-rate': package_data['line-rate'],
'branch-rate': package_data['branch-rate'],
'name': package_name,
'complexity': '0',
})
classes_el = self._el(document, 'classes', {})
for class_name, class_data in list(package_data['classes'].items()):
class_el = self._el(document, 'class', {
'branch-rate': self._percent(class_data['branches-total'],
class_data['branches-covered']),
'complexity': '0',
'filename': class_name,
'line-rate': self._percent(class_data['lines-total'],
class_data['lines-covered']),
'name': class_data['name']
})
# Process methods
methods_el = self._el(document, 'methods', {})
for method_name, (line, hits) in list(class_data['methods'].items()):
method_el = self._el(document, 'method', {
'name': self.format(method_name),
'signature': '',
'line-rate': '1.0' if int(hits) > 0 else '0.0',
'branch-rate': '1.0' if int(hits) > 0 else '0.0',
})
method_lines_el = self._el(document, 'lines', {})
method_line_el = self._el(document, 'line', {
'hits': hits,
'number': line,
'branch': 'false',
})
method_lines_el.appendChild(method_line_el)
method_el.appendChild(method_lines_el)
methods_el.appendChild(method_el)
# Process lines
lines_el = self._el(document, 'lines', {})
lines = list(class_data['lines'].keys())
lines.sort()
for line_number in lines:
line_el = self._el(document, 'line', {
'branch': class_data['lines'][line_number]['branch'],
'hits': str(class_data['lines'][line_number]['hits']),
'number': str(line_number)
})
if class_data['lines'][line_number]['branch'] == 'true':
total = int(class_data['lines'][line_number]['branches-total'])
covered = int(class_data['lines'][line_number]['branches-covered'])
percentage = int((covered * 100.0) / total)
line_el.setAttribute('condition-coverage',
'{0}% ({1}/{2})'.format(
percentage, covered, total))
lines_el.appendChild(line_el)
class_el.appendChild(methods_el)
class_el.appendChild(lines_el)
classes_el.appendChild(class_el)
package_el.appendChild(classes_el)
packages_el.appendChild(package_el)
root.appendChild(packages_el)
return document.toprettyxml(**kwargs)
def _el(self, document, name, attrs):
"""
Create an element within document with given name and attributes.
:param document: Document element
:type document: Document
:param name: Element name
:type name: string
:param attrs: Attributes for element
:type attrs: dict
"""
return self._attrs(document.createElement(name), attrs)
def _attrs(self, element, attrs):
"""
Set attributes on given element.
:param element: DOM Element
:type element: Element
:param attrs: Attributes for element
:type attrs: dict
"""
for attr, val in list(attrs.items()):
element.setAttribute(attr, val)
return element
def _percent(self, lines_total, lines_covered):
"""
Get the percentage of lines covered in the total, with formatting.
:param lines_total: Total number of lines in given module
:type lines_total: number
:param lines_covered: Number of lines covered by tests in module
:type lines_covered: number
"""
if lines_total == 0:
return '0.0'
return str(float(float(lines_covered) / float(lines_total)))
def main(argv=None):
"""
Converts LCOV coverage data to Cobertura-compatible XML for reporting.
Usage:
lcov_cobertura.py lcov-file.dat
lcov_cobertura.py lcov-file.dat -b src/dir -e test.lib -o path/out.xml
By default, XML output will be written to ./coverage.xml
"""
if argv is None:
argv = sys.argv
parser = OptionParser()
parser.usage = ('lcov_cobertura.py lcov-file.dat [-b source/dir] '
'[-e <exclude packages regex>] [-o output.xml] [-d]')
parser.description = 'Converts lcov output to cobertura-compatible XML'
parser.add_option('-b', '--base-dir', action='store',
help='Directory where source files are located',
dest='base_dir', default='.')
parser.add_option('-e', '--excludes',
help='Comma-separated list of regexes of packages to exclude',
action='append', dest='excludes', default=[])
parser.add_option('-o', '--output',
help='Path to store cobertura xml file',
action='store', dest='output', default='coverage.xml')
parser.add_option('-d', '--demangle',
help='Demangle C++ function names using %s' % CPPFILT,
action='store_true', dest='demangle', default=False)
parser.add_option('-v', '--version',
help='Display version info',
action='store_true')
(options, args) = parser.parse_args(args=argv)
if options.demangle and not HAVE_CPPFILT:
raise RuntimeError("C++ filter executable (%s) not found!" % CPPFILT)
if options.version:
print('[lcov_cobertura {}]'.format(__version__))
sys.exit(0)
if len(args) != 2:
print(main.__doc__)
sys.exit(1)
try:
with open(args[1], 'r') as lcov_file:
lcov_data = lcov_file.read()
lcov_cobertura = LcovCobertura(lcov_data, options.base_dir, options.excludes, options.demangle)
cobertura_xml = lcov_cobertura.convert()
with open(options.output, mode='wt') as output_file:
output_file.write(cobertura_xml)
except IOError:
sys.stderr.write("Unable to convert %s to Cobertura XML" % args[1])
if __name__ == '__main__':
main()
dn: ou=rgy,o=desy,c=de
objectclass: organizationalUnit
ou: rgy
dn: ou=netgroup,ou=rgy,o=desy,c=de
objectclass: organizationalUnit
ou: netgroup
dn: cn=a3p00-hosts,ou=netgroup,ou=rgy,o=desy,c=de
objectClass :top
objectClass :nisNetgroup
cn:a3p00-hosts
description: Netgroup for nodes on PETRA III Beamline P00
nisNetgroupTriple: (dummymachine,-,)
nisNetgroupTriple: (localhost,-,)
nisNetgroupTriple: (dummymachine2,-,)
dn: cn=a3p07-hosts,ou=netgroup,ou=rgy,o=desy,c=de
objectClass :top
objectClass :nisNetgroup
cn:a3p07-hosts
description: Netgroup for nodes on PETRA III Beamline P07
nisNetgroupTriple: (dummymachine,-,)
nisNetgroupTriple: (localhost,-,)
nisNetgroupTriple: (dummymachine2,-,)
/opt/asapo/ldap/slapd -f /opt/asapo/ldap/ldap.conf
ldapadd -x -D "ou=rgy,o=desy,c=de" -f record.ldif -h localhost
ldapsearch -x -b ou=rgy,o=DESY,c=DE cn=a3p00-hosts -h localhost
\ No newline at end of file
/usr/libexec/slapd -d3 -f /Users/yakubov/Projects/asapo/3d_party/ldap/slapd.conf
ldapadd -x -D "ou=rgy,o=desy,c=de" -f record.ldif
ldapsearch -x -b ou=rgy,o=DESY,c=DE cn=a3p00-hosts
\ No newline at end of file
include /etc/ldap/schema/core.schema
include /etc/ldap/schema/cosine.schema
include /etc/ldap/schema/nis.schema
modulepath /usr/lib/ldap
moduleload back_bdb.la
access to * by * write
access to * by * manage
access to * by * read
allow bind_anon_cred
allow bind_anon_dn
allow update_anon
database bdb
suffix "ou=rgy,o=desy,c=de"