Capture coverage of integration tests

To capture the coverage also for the integration tests,
a test only executing the cmd.Run function is used.

The test always exits with code 0 and prints the
real exit code to stdout. Otherwise no coverage
report is generated.

Those changes enable a more accurate coverage report
for future contributions.
This commit is contained in:
Benj Fassbind
2022-07-11 16:04:06 +02:00
parent 69d473ea6f
commit 1d4e6183be
9 changed files with 140 additions and 23 deletions

View File

@@ -19,6 +19,8 @@ import urllib.request
import pprint
import socketserver
import http.server
from uuid import uuid4
from pathlib import Path
import zlib
@@ -264,6 +266,10 @@ class BaseTest(object):
command = string.Template(command).substitute(params)
command = shlex.split(command)
if command[0] == "aptly":
aptly_testing_bin = Path(__file__).parent / ".." / "aptly.test"
command = [str(aptly_testing_bin), f"-test.coverprofile={Path(self.coverage_dir) / self.__class__.__name__}-{uuid4()}.out", *command[1:]]
environ = os.environ.copy()
environ["LC_ALL"] = "C"
environ.update(self.environmentOverride)
@@ -272,14 +278,34 @@ class BaseTest(object):
def run_cmd(self, command, expected_code=0):
try:
proc = self._start_process(command, stdout=subprocess.PIPE)
output, _ = proc.communicate()
raw_output, _ = proc.communicate()
returncodes = [proc.returncode]
is_aptly_command = False
if isinstance(command, str):
is_aptly_command = command.startswith("aptly")
if isinstance(command, list):
is_aptly_command = command[0] == "aptly"
if is_aptly_command:
# remove the last two rows as go tests always print PASS/FAIL and coverage in those
# two lines. This would otherwise fail the tests as they would not match gold
output, _, returncode = re.findall(r"((.|\n)*)EXIT: (\d)\n.*\ncoverage: .*", raw_output.decode("utf-8"))[0]
output = output.encode()
returncodes.append(int(returncode))
else:
output = raw_output
if expected_code is not None:
if proc.returncode != expected_code:
if expected_code not in returncodes:
raise Exception("exit code %d != %d (output: %s)" % (
proc.returncode, expected_code, output))
proc.returncode, expected_code, raw_output))
return output
except Exception as e:
raise Exception("Running command %s failed: %s" %
raise Exception("Running command '%s' failed: %s" %
(command, str(e)))
def gold_processor(self, gold):

View File

@@ -7,6 +7,7 @@ import inspect
import fnmatch
import re
import sys
from tempfile import mkdtemp
import traceback
import random
import subprocess
@@ -42,7 +43,7 @@ def walk_modules(package):
yield importlib.import_module(package + "." + name)
def run(include_long_tests=False, capture_results=False, tests=None, filters=None):
def run(include_long_tests=False, capture_results=False, tests=None, filters=None, coverage_dir=None):
"""
Run system test.
"""
@@ -51,6 +52,8 @@ def run(include_long_tests=False, capture_results=False, tests=None, filters=Non
fails = []
numTests = numFailed = numSkipped = 0
lastBase = None
if not coverage_dir:
coverage_dir = mkdtemp(suffix="aptly-coverage")
for test in tests:
for testModule in walk_modules(test):
@@ -95,6 +98,7 @@ def run(include_long_tests=False, capture_results=False, tests=None, filters=Non
try:
t.captureResults = capture_results
t.coverage_dir = coverage_dir
t.test()
except Exception:
numFailed += 1
@@ -110,6 +114,8 @@ def run(include_long_tests=False, capture_results=False, tests=None, filters=Non
if lastBase is not None:
lastBase.shutdown_class()
print("COVERAGE_RESULTS: %s" % coverage_dir)
print("TESTS: %d SUCCESS: %d FAIL: %d SKIP: %d" % (
numTests, numTests - numFailed, numFailed, numSkipped))
@@ -149,6 +155,7 @@ if __name__ == "__main__":
random.seed()
include_long_tests = False
capture_results = False
coverage_dir = None
tests = None
args = sys.argv[1:]
@@ -157,6 +164,9 @@ if __name__ == "__main__":
include_long_tests = True
elif args[0] == "--capture":
capture_results = True
elif args[0] == "--coverage-dir":
coverage_dir = args[1]
args = args[1:]
args = args[1:]
@@ -169,4 +179,4 @@ if __name__ == "__main__":
else:
filters.append(arg)
run(include_long_tests, capture_results, tests, filters)
run(include_long_tests, capture_results, tests, filters, coverage_dir)