⬆️ updated fastcov

pull/1565/head
Niels Lohmann 2019-04-04 09:50:27 +02:00
parent da279234d5
commit 4676f759e8
No known key found for this signature in database
GPG Key ID: 7F3CEA63AE251B69
3 changed files with 92 additions and 256 deletions

View File

@ -35,9 +35,9 @@ if(JSON_Coverage)
set(CMAKE_CXX_FLAGS "--coverage -g -O0 -fprofile-arcs -ftest-coverage")
# from https://github.com/RWTH-HPC/CMake-codecov/blob/master/cmake/FindGcov.cmake
#get_filename_component(COMPILER_PATH "${CMAKE_CXX_COMPILER}" PATH)
#string(REGEX MATCH "^[0-9]+" GCC_VERSION "${CMAKE_CXX_COMPILER_VERSION}")
#find_program(GCOV_BIN NAMES gcov-${GCC_VERSION} gcov HINTS ${COMPILER_PATH})
get_filename_component(COMPILER_PATH "${CMAKE_CXX_COMPILER}" PATH)
string(REGEX MATCH "^[0-9]+" GCC_VERSION "${CMAKE_CXX_COMPILER_VERSION}")
find_program(GCOV_BIN NAMES gcov-${GCC_VERSION} gcov HINTS ${COMPILER_PATH})
# collect all source files from the chosen include dir
file(GLOB_RECURSE SOURCE_FILES ${NLOHMANN_JSON_INCLUDE_BUILD_DIR}*.hpp)
@ -55,7 +55,7 @@ if(JSON_Coverage)
# add target to collect coverage information and generate HTML file
# (filter script from https://stackoverflow.com/a/43726240/266378)
add_custom_target(lcov_html2
COMMAND ${CMAKE_SOURCE_DIR}/test/thirdparty/fastcov/fastcov.py --lcov -o json.info --gcov ${GCOV_BIN} --exclude-gcov /usr 9.0.1/ test/
COMMAND ${CMAKE_SOURCE_DIR}/test/thirdparty/fastcov/fastcov.py --branch-coverage --lcov -o json.info --gcov ${GCOV_BIN}
COMMAND gsed -i 's%build_coverage/%%g' json.info
COMMAND ${CMAKE_SOURCE_DIR}/test/thirdparty/imapdl/filterbr.py json.info > json.info.filtered.noexcept
COMMAND genhtml --title "JSON for Modern C++" --legend --demangle-cpp --output-directory html --show-details --branch-coverage json.info.filtered.noexcept

View File

@ -13,7 +13,7 @@
$ cd build_dir
$ ./fastcov.py --zerocounters
$ <run unit tests>
$ ./fastcov.py --exclude-gcov /usr/include --lcov -o report.info
$ ./fastcov.py --exclude /usr/include test/ --lcov -o report.info
$ genhtml -o code_coverage report.info
"""
@ -64,24 +64,28 @@ def getGcdaFiles(cwd, gcda_files):
gcda_files = glob.glob(os.path.join(cwd, "**/*.gcda"), recursive=True)
return gcda_files
def gcovWorker(cwd, gcov, files, chunk, exclude):
p = subprocess.Popen([gcov, "-it"] + chunk, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
def gcovWorker(cwd, gcov, files, chunk, gcov_filter_options, branch_coverage):
gcov_args = "-it"
if branch_coverage:
gcov_args += "b"
p = subprocess.Popen([gcov, gcov_args] + chunk, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
for line in iter(p.stdout.readline, b''):
intermediate_json = json.loads(line.decode(sys.stdout.encoding))
intermediate_json_files = processGcovs(intermediate_json["files"], exclude)
intermediate_json_files = processGcovs(intermediate_json["files"], gcov_filter_options)
for f in intermediate_json_files:
files.append(f) #thread safe, there might be a better way to do this though
GCOVS_TOTAL.append(len(intermediate_json["files"]))
GCOVS_SKIPPED.append(len(intermediate_json["files"])-len(intermediate_json_files))
p.wait()
def processGcdas(cwd, gcov, jobs, gcda_files, exclude):
def processGcdas(cwd, gcov, jobs, gcda_files, gcov_filter_options, branch_coverage):
chunk_size = max(MINIMUM_CHUNK_SIZE, int(len(gcda_files) / jobs) + 1)
threads = []
intermediate_json_files = []
for chunk in chunks(gcda_files, chunk_size):
t = threading.Thread(target=gcovWorker, args=(cwd, gcov, intermediate_json_files, chunk, exclude))
t = threading.Thread(target=gcovWorker, args=(cwd, gcov, intermediate_json_files, chunk, gcov_filter_options, branch_coverage))
threads.append(t)
t.start()
@ -91,39 +95,75 @@ def processGcdas(cwd, gcov, jobs, gcda_files, exclude):
return intermediate_json_files
def processGcov(gcov, files, exclude):
for ex in exclude:
def processGcov(gcov, files, gcov_filter_options):
# If explicit sources were passed, check for match
source_file = os.path.abspath(gcov["file"])
if gcov_filter_options["sources"]:
if source_file in gcov_filter_options["sources"]:
files.append(gcov)
return
# Check include filter
if gcov_filter_options["include"]:
for ex in gcov_filter_options["include"]:
if ex in gcov["file"]:
files.append(gcov)
break
return
# Check exclude filter
for ex in gcov_filter_options["exclude"]:
if ex in gcov["file"]:
return
files.append(gcov)
def processGcovs(gcov_files, exclude):
def processGcovs(gcov_files, gcov_filter_options):
files = []
for gcov in gcov_files:
processGcov(gcov, files, exclude)
processGcov(gcov, files, gcov_filter_options)
return files
def dumpToLcovInfo(cwd, intermediate, output):
def dumpBranchCoverageToLcovInfo(f, source):
branch_miss = 0
branch_total = 0
for line in source["lines"]:
if not line["branches"]:
continue
branch_total += len(line["branches"])
for i, branch in enumerate(line["branches"]):
#Branch (<line number>, <block number>, <branch number>, <taken>)
f.write("BRDA:%d,%d,%d,%d\n" % (line["line_number"], int(i/2), i, branch["count"]))
branch_miss += int(branch["count"] == 0)
f.write("BRF:%d\n" % branch_total) #Branches Found
f.write("BRH:%d\n" % (branch_total - branch_miss)) #Branches Hit
def dumpToLcovInfo(cwd, intermediate, output, branch_coverage):
with open(output, "w") as f:
for file in intermediate:
for source in intermediate:
#Convert to absolute path so it plays nice with genhtml
sf = file["file"]
if not os.path.isabs(file["file"]):
sf = os.path.abspath(os.path.join(cwd, file["file"]))
f.write("SF:%s\n" % sf)
sf = source["file"]
if not os.path.isabs(source["file"]):
sf = os.path.abspath(os.path.join(cwd, source["file"]))
f.write("SF:%s\n" % sf) #Source File
fn_miss = 0
for function in file["functions"]:
f.write("FN:%s,%s\n" % (function["start_line"], function["name"]))
f.write("FNDA:%s,%s\n" % (function["execution_count"], function["name"]))
fn_miss += int(not function["execution_count"] == 0)
f.write("FNF:%s\n" % len(file["functions"]))
f.write("FNH:%s\n" % (len(file["functions"]) - fn_miss))
for function in source["functions"]:
f.write("FN:%d,%s\n" % (function["start_line"], function["name"])) #Function Start Line
f.write("FNDA:%d,%s\n" % (function["execution_count"], function["name"])) #Function Hits
fn_miss += int(function["execution_count"] == 0)
f.write("FNF:%d\n" % len(source["functions"])) #Functions Found
f.write("FNH:%d\n" % (len(source["functions"]) - fn_miss)) #Functions Hit
if branch_coverage:
dumpBranchCoverageToLcovInfo(f, source)
line_miss = 0
for line in file["lines"]:
f.write("DA:%s,%s\n" % (line["line_number"], line["count"]))
line_miss += int(not line["count"] == 0)
f.write("LF:%s\n" % len(file["lines"]))
f.write("LH:%s\n" % (len(file["lines"]) - line_miss))
for line in source["lines"]:
f.write("DA:%d,%d\n" % (line["line_number"], line["count"])) #Line
line_miss += int(line["count"] == 0)
f.write("LF:%d\n" % len(source["lines"])) #Lines Found
f.write("LH:%d\n" % (len(source["lines"]) - line_miss)) #Lines Hit
f.write("end_of_record\n")
def dumpToGcovJson(intermediate, output):
@ -134,6 +174,13 @@ def log(line):
if not args.quiet:
print(line)
def getGcovFilterOptions(args):
return {
"sources": set([os.path.abspath(s) for s in args.sources]), #Make paths absolute
"include": args.includepost,
"exclude": args.excludepost,
}
def main(args):
# Need at least gcov 9.0.0 because that's when gcov JSON and stdout streaming was introduced
current_gcov_version = getGcovVersion(args.gcov)
@ -154,7 +201,8 @@ def main(args):
log("%d .gcda files removed" % len(gcda_files))
return
intermediate_json_files = processGcdas(args.cdirectory, args.gcov, args.jobs, gcda_files, args.excludepost)
gcov_filter_options = getGcovFilterOptions(args)
intermediate_json_files = processGcdas(args.cdirectory, args.gcov, args.jobs, gcda_files, gcov_filter_options, args.branchcoverage)
gcov_total = sum(GCOVS_TOTAL)
gcov_skipped = sum(GCOVS_SKIPPED)
@ -162,7 +210,7 @@ def main(args):
log("%d .gcov files processed by fastcov (%d skipped)" % (gcov_total - gcov_skipped, gcov_skipped))
if args.lcov:
dumpToLcovInfo(args.cdirectory, intermediate_json_files, args.output)
dumpToLcovInfo(args.cdirectory, intermediate_json_files, args.output, args.branchcoverage)
log("Created lcov info file '%s'" % args.output)
else:
dumpToGcovJson(intermediate_json_files, args.output)
@ -172,18 +220,24 @@ if __name__ == '__main__':
parser = argparse.ArgumentParser(description='A parallel gcov wrapper for fast coverage report generation')
parser.add_argument('-z', '--zerocounters', dest='zerocounters', action="store_true", help='Recursively delete all gcda files')
parser.add_argument('-f', '--gcda-files', dest='gcda_files', nargs="+", default=[], help='Specify exactly which gcda files should be processed instead of recursivly searching the search directory.')
parser.add_argument('-E', '--exclude-gcda', dest='excludepre', nargs="+", default=[], help='.gcda filter - Exclude gcda files from being processed via simple find matching (not regex)')
parser.add_argument('-e', '--exclude-gcov', dest='excludepost', nargs="+", default=[], help='.gcov filter - Exclude gcov files from being processed via simple find matching (not regex)')
# Enable Branch Coverage
parser.add_argument('-b', '--branch-coverage', dest='branchcoverage', action="store_true", help='Include branch counts in the coverage report')
parser.add_argument('-g', '--gcov', dest='gcov', default='gcov', help='which gcov binary to use')
# Filtering Options
parser.add_argument('-s', '--source-files', dest='sources', nargs="+", default=[], help='Filter: Specify exactly which source files should be included in the final report. Paths must be either absolute or relative to current directory.')
parser.add_argument('-e', '--exclude', dest='excludepost', nargs="+", default=[], help='Filter: Exclude source files from final report if they contain one of the provided substrings (i.e. /usr/include test/, etc.)')
parser.add_argument('-i', '--include', dest='includepost', nargs="+", default=[], help='Filter: Only include source files in final report that contain one of the provided substrings (i.e. src/ etc.)')
parser.add_argument('-f', '--gcda-files', dest='gcda_files', nargs="+", default=[], help='Filter: Specify exactly which gcda files should be processed instead of recursively searching the search directory.')
parser.add_argument('-E', '--exclude-gcda', dest='excludepre', nargs="+", default=[], help='Filter: Exclude gcda files from being processed via simple find matching (not regex)')
parser.add_argument('-g', '--gcov', dest='gcov', default='gcov', help='Which gcov binary to use')
parser.add_argument('-d', '--search-directory', dest='directory', default=".", help='Base directory to recursively search for gcda files (default: .)')
parser.add_argument('-c', '--compiler-directory', dest='cdirectory', default=".", help='Base directory compiler was invoked from (default: .)')
parser.add_argument('-j', '--jobs', dest='jobs', type=int, default=multiprocessing.cpu_count(), help='Number of parallel gcov to spawn (default: %d).' % multiprocessing.cpu_count())
parser.add_argument('-o', '--output', dest='output', default="coverage.json", help='Name of output file (default: coverage.json)')
parser.add_argument('-i', '--lcov', dest='lcov', action="store_true", help='Output in lcov info format instead of gcov json')
parser.add_argument('-l', '--lcov', dest='lcov', action="store_true", help='Output in lcov info format instead of gcov json')
parser.add_argument('-q', '--quiet', dest='quiet', action="store_true", help='Suppress output to stdout')
args = parser.parse_args()
main(args)

View File

@ -1,218 +0,0 @@
#!/usr/bin/env python3
"""
Author: Bryan Gillespie
Legacy version... supports versions 7.1.0 <= GCC < 9.0.0
A massively parallel gcov wrapper for generating intermediate coverage formats fast
The goal of fastcov is to generate code coverage intermediate formats as fast as possible
(ideally < 1 second), even for large projects with hundreds of gcda objects. The intermediate
formats may then be consumed by a report generator such as lcov's genhtml, or a dedicated front
end such as coveralls.
Sample Usage:
$ cd build_dir
$ ./fastcov.py --exclude-gcov /usr/include --lcov -o report.info
$ genhtml -o code_coverage report.info
"""
import re
import os
import glob
import json
import argparse
import subprocess
import multiprocessing
from random import shuffle
MINIMUM_GCOV = (7,1,0)
MINIMUM_CHUNK_SIZE = 10
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def getGcovVersion(gcov):
p = subprocess.Popen([gcov, "-v"], stdout=subprocess.PIPE)
output = p.communicate()[0].decode('UTF-8')
p.wait()
version_str = re.search(r'\s([\d.]+)\s', output.split("\n")[0]).group(1)
version = tuple(map(int, version_str.split(".")))
return version
def removeFiles(files):
for file in files:
os.remove(file)
def getFilteredGcdaFiles(gcda_files, exclude):
def excludeGcda(gcda):
for ex in exclude:
if ex in gcda:
return False
return True
return list(filter(excludeGcda, gcda_files))
def getGcdaFiles(cwd, gcda_files, exclude):
if not gcda_files:
gcda_files = glob.glob(os.path.join(cwd, "**/*.gcda"), recursive=True)
if exclude:
return getFilteredGcdaFiles(gcda_files, exclude)
return gcda_files
def getGcovFiles(cwd):
return glob.glob(os.path.join(cwd, "*.gcov"))
def filterGcovFiles(gcov):
with open(gcov) as f:
path = f.readline()[5:]
for ex in args.exclude:
if ex in path:
return False
return True
def processGcdasPre9(cwd, gcov, jobs, gcda_files):
chunk_size = min(MINIMUM_CHUNK_SIZE, int(len(gcda_files) / jobs) + 1)
processes = []
# shuffle(gcda_files) # improves performance by preventing any one gcov from bottlenecking on a list of sequential, expensive gcdas (?)
for chunk in chunks(gcda_files, chunk_size):
processes.append(subprocess.Popen([gcov, "-i"] + chunk, cwd=cwd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL))
for p in processes:
p.wait()
def processGcdasPre9Accurate(cwd, gcov, gcda_files, exclude):
intermediate_json_files = []
for gcda in gcda_files:
subprocess.Popen([gcov, "-i", gcda], cwd=cwd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL).wait()
gcov_files = getGcovFiles(cwd)
intermediate_json_files += processGcovs(gcov_files, exclude)
removeFiles(gcov_files)
return intermediate_json_files
def processGcovLine(file, line):
line_type, data = line.split(":", 1)
if line_type == "lcount":
num, count = data.split(",")
hit = (count != 0)
file["lines_hit"] += int(hit)
file["lines"].append({
"branches": [],
"line_number": num,
"count": count,
"unexecuted_block": not hit
})
elif line_type == "function":
num, count, name = data.split(",")
hit = (count != 0)
file["functions_hit"] += int(hit)
file["functions"].append({
"name": name,
"execution_count": count,
"start_line": num,
"end_line": None,
"blocks": None,
"blocks_executed": None,
"demangled_name": None
})
def processGcov(files, gcov, exclude):
with open(gcov) as f:
path = f.readline()[5:].rstrip()
for ex in exclude:
if ex in path:
return False
file = {
"file": path,
"functions": [],
"functions_hit": 0,
"lines": [],
"lines_hit": 0
}
for line in f:
processGcovLine(file, line.rstrip())
files.append(file)
return True
def processGcovs(gcov_files, exclude):
files = []
filtered = 0
for gcov in gcov_files:
filtered += int(not processGcov(files, gcov, exclude))
print("Skipped %d .gcov files" % filtered)
return files
def dumpToLcovInfo(intermediate, output):
with open(output, "w") as f:
for file in intermediate:
f.write("SF:%s\n" % file["file"])
for function in file["functions"]:
f.write("FN:%s,%s\n" % (function["start_line"], function["name"]))
f.write("FNDA:%s,%s\n" % (function["execution_count"], function["name"]))
f.write("FNF:%s\n" % len(file["functions"]))
f.write("FNH:%s\n" % file["functions_hit"])
for line in file["lines"]:
f.write("DA:%s,%s\n" % (line["line_number"], line["count"]))
f.write("LF:%s\n" % len(file["lines"]))
f.write("LH:%s\n" % file["lines_hit"])
f.write("end_of_record\n")
def dumpToGcovJson(intermediate, output):
with open(output, "w") as f:
json.dump(intermediate, f)
def main(args):
# Need at least gcov 7.1.0 because of bug not allowing -i in conjunction with multiple files
# See: https://github.com/gcc-mirror/gcc/commit/41da7513d5aaaff3a5651b40edeccc1e32ea785a
current_gcov_version = getGcovVersion(args.gcov)
if current_gcov_version < MINIMUM_GCOV:
print("Minimum gcov version {} required, found {}".format(".".join(map(str, MINIMUM_GCOV)), ".".join(map(str, current_gcov_version))))
exit(1)
gcda_files = getGcdaFiles(args.directory, args.gcda_files, args.excludepre)
print("Found %d .gcda files" % len(gcda_files))
# We "zero" the "counters" by simply deleting all gcda files
if args.zerocounters:
removeFiles(gcda_files)
print("Removed %d .gcda files" % len(gcda_files))
return
# If we are less than gcov 9.0.0, convert .gcov files to GCOV 9 JSON format
processGcdasPre9(args.cdirectory, args.gcov, args.jobs, gcda_files)
gcov_files = getGcovFiles(args.cdirectory)
print("Found %d .gcov files" % len(gcov_files))
intermediate_json_files = processGcovs(gcov_files, args.excludepost)
removeFiles(gcov_files)
intermediate_json_files += processGcdasPre9Accurate(args.cdirectory, args.gcov, args.gcda_files_accurate, args.excludepost)
if args.lcov:
dumpToLcovInfo(intermediate_json_files, args.output)
else:
dumpToGcovJson(intermediate_json_files, args.output)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='A parallel gcov wrapper for fast coverage report generation')
parser.add_argument('-z', '--zerocounters', dest='zerocounters', action="store_true", help='Recursively delete all gcda files')
parser.add_argument('-f', '--gcda-files', dest='gcda_files', nargs="+", default=[], help='Specify exactly which gcda files should be processed instead of recursivly searching the search directory.')
parser.add_argument('-F', '--gcda-files-accurate', dest='gcda_files_accurate', nargs="+", default=[], help='(< gcov 9.0.0) Get accurate header coverage information for just these. These files cannot be processed in parallel')
parser.add_argument('-E', '--exclude-gcda', dest='excludepre', nargs="+", default=[], help='.gcda filter - Exclude gcda files from being processed via simple find matching (not regex)')
parser.add_argument('-e', '--exclude-gcov', dest='excludepost', nargs="+", default=[], help='.gcov filter - Exclude gcov files from being processed via simple find matching (not regex)')
parser.add_argument('-g', '--gcov', dest='gcov', default='gcov', help='which gcov binary to use')
parser.add_argument('-d', '--search-directory', dest='directory', default=".", help='Base directory to recursively search for gcda files (default: .)')
parser.add_argument('-c', '--compiler-directory', dest='cdirectory', default=".", help='Base directory compiler was invoked from (default: .)')
parser.add_argument('-j', '--jobs', dest='jobs', type=int, default=multiprocessing.cpu_count(), help='Number of parallel gcov to spawn (default: %d).' % multiprocessing.cpu_count())
parser.add_argument('-o', '--output', dest='output', default="coverage.json", help='Name of output file (default: coverage.json)')
parser.add_argument('-i', '--lcov', dest='lcov', action="store_true", help='Output in lcov info format instead of gcov json')
args = parser.parse_args()
main(args)