Commit c144b742 authored by Tomas Krizek's avatar Tomas Krizek

Merge branch 'pytest_experiment' into 'master'

Migration to Py.test

See merge request !96
parents 94ed593b f409c4df
Pipeline #36146 failed with stage
in 1 minute and 12 seconds
.cache
*.pyc
*.swp
/env.sh
......@@ -62,21 +62,27 @@ test:unittests:
- linux
- amd64
# changes in Deckard itself must not change result of tests
test:comparative:kresd:
stage: test
script:
# test kresd binary
- git clone --depth=1 https://gitlab.labs.nic.cz/knot/knot-resolver.git /tmp/kresd-local-build
- GIT_DIR=/tmp/kresd-local-build/.git git log -1
- ( cd /tmp/kresd-local-build ; git submodule update --init --recursive )
- PREFIX=/tmp/.local make -C /tmp/kresd-local-build -k all
- PREFIX=/tmp/.local make -C /tmp/kresd-local-build install
# compare results from latest Deckard with results from merge base
- cp ci/common.sh /tmp
- cp ci/compare-tests.sh /tmp
- /tmp/compare-tests.sh $(pwd)/kresd_run.sh
- LD_LIBRARY_PATH=/tmp/.local/lib PATH=/tmp/.local/sbin:$PATH /tmp/compare-tests.sh $(pwd)/kresd_run.sh
artifacts:
when: always
expire_in: '1 hour'
paths:
- modified_tests
- base.log
- base.tests
- head.log
- head.tests
- base.xml
- head.xml
tags:
- docker
- linux
......@@ -92,7 +98,7 @@ test:latest:kresd:
- ( cd kresd-local-build ; git submodule update --init --recursive )
- PREFIX=$(pwd)/.local make -C kresd-local-build -k all
- PREFIX=$(pwd)/.local make -C kresd-local-build install
- TMPDIR=$(pwd) LD_LIBRARY_PATH=$(pwd)/.local/lib DAEMON=$(pwd)/.local/sbin/kresd MAKEFLAGS="-j $(nproc) --keep-going" ./kresd_run.sh
- TMPDIR=$(pwd) LD_LIBRARY_PATH=$(pwd)/.local/lib PATH=$(pwd)/.local/sbin:$PATH ./kresd_run.sh -n $(nproc)
artifacts:
when: on_failure
expire_in: 1 week
......@@ -109,8 +115,8 @@ test:latest:kresd:
test:sanity:unbound:
stage: test
script:
- TESTS=sets/resolver/iter_hint_lame.rpl TMPDIR=$(pwd) ./unbound_run.sh
- TESTS=sets/resolver/iter_lame_root.rpl TMPDIR=$(pwd) ./unbound_run.sh
- TMPDIR=$(pwd) ./unbound_run.sh -k sets/resolver/iter_hint_lame.rpl
- TMPDIR=$(pwd) ./unbound_run.sh -k sets/resolver/iter_lame_root.rpl
# these do not work with Unbound 1.5.8 which is in CI container
#- TESTS=sets/resolver/nsec_wildcard_answer_response.rpl ./unbound_run.sh
#- TESTS=sets/resolver/world_cz_lidovky_www.rpl ./unbound_run.sh
......@@ -130,8 +136,8 @@ test:sanity:unbound:
test:sanity:pdnsrecursor:
stage: test
script:
- TESTS=sets/resolver/iter_recurse.rpl TMPDIR=$(pwd) ./pdns_run.sh
- TESTS=sets/resolver/iter_tcbit.rpl TMPDIR=$(pwd) ./pdns_run.sh
- TMPDIR=$(pwd) ./pdns_run.sh -k sets/resolver/iter_recurse.rpl
- TMPDIR=$(pwd) ./pdns_run.sh -k sets/resolver/iter_tcbit.rpl
artifacts:
when: on_failure
expire_in: 1 week
......
# Defaults
TESTS ?= sets/resolver
DAEMON ?= kresd
TEMPLATE ?= template/kresd.j2
CONFIG ?= config
ADDITIONAL ?= -f 1
OPTS ?=
PYTHON ?= python3
LIBEXT := .so
PLATFORM := $(shell uname -s)
......@@ -13,14 +6,6 @@ ifeq ($(PLATFORM),Darwin)
LIBEXT := .dylib
endif
# Find all sub-targets
TARGETS := $(TESTS)
ifeq (,$(findstring .rpl,$(TESTS)))
TARGETS := $(wildcard $(TESTS)/*.rpl)
endif
SOURCES := $(TARGETS)
TARGETS := $(sort $(patsubst %.rpl,%.out-qmin,$(SOURCES))) $(sort $(patsubst %.rpl,%.out-noqmin,$(SOURCES)))
# Dependencies
include platform.mk
libcwrap_DIR := contrib/libswrap
......@@ -40,37 +25,14 @@ else
preload_syms := LD_PRELOAD="$(libfaketime):$(libcwrap)"
endif
# Test coverage measurement
# User has to provide own coverage_env.sh to generate environment variables for daemon under test
ifdef COVERAGE
ifndef COVERAGE_ENV_SCRIPT
$(error COVERAGE requires COVERAGE_ENV_SCRIPT with path to scripts/coverage_env.sh for given daemon)
endif
ifndef DAEMONSRCDIR
$(error COVERAGE requires DAEMONSRCDIR pointing to source directory of daemon under test)
endif
ifndef COVERAGE_STATSDIR
$(error COVERAGE requires COVERAGE_STATSDIR pointing to output directory)
endif
define set_coverage_env
$(shell "$(COVERAGE_ENV_SCRIPT)" "$(DAEMONSRCDIR)" "$(COVERAGE_STATSDIR)" "$(1)")
endef
endif
# Targets
all: $(TARGETS)
all:
@echo "Deckard is now run using *run.sh scripts in its root directory."
@echo "To build the dependencies (libfaketime and libcwrap) run 'make depend'."
exit 1
depend: $(libfaketime) $(libcwrap)
# Generic rule to run test
$(SOURCES): depend
%.out-qmin: %.rpl
@test "$${QMIN:-true}" = "true" || exit 0 && \
$(call set_coverage_env,$@) $(preload_syms) $(PYTHON) $(abspath ./deckard.py) --qmin true $(OPTS) $< one $(DAEMON) $(TEMPLATE) $(CONFIG) -- $(ADDITIONAL)
%.out-noqmin: %.rpl
@test "$${QMIN:-false}" = "false" || exit 0 && \
$(call set_coverage_env,$@) $(preload_syms) $(PYTHON) $(abspath ./deckard.py) --qmin false $(OPTS) $< one $(DAEMON) $(TEMPLATE) $(CONFIG) -- $(ADDITIONAL)
@echo "export $(preload_syms)" > env.sh
# Synchronize submodules
submodules: .gitmodules
......
......@@ -27,6 +27,8 @@ Deckard requires following software to be installed:
- dnspython_ - DNS library for Python
- Jinja2_ - template engine for generating config files
- PyYAML_ - YAML parser for Python
- pytest_ - testing framework for Python, used for running the test cases
- pytest-xdist_ - module for pytest for distributed testing
- custom C libraries (installed automatically, see below)
For convenient use it is strongly recommended to have a C compiler, Git, and ``make`` available.
......@@ -88,3 +90,5 @@ Happy testing.
.. _`libfaketime`: https://github.com/wolfcw/libfaketime
.. _`modified socket_wrapper`: https://gitlab.labs.nic.cz/labs/socket_wrapper
.. _`original socket_wrapper`: https://cwrap.org/socket_wrapper.html
.. _`pytest`: https://pytest.org/
.. _`pytest-xdist`: https://pypi.python.org/pypi/pytest-xdist
......@@ -4,6 +4,7 @@ HEAD="$(git log -1 --format="%H" HEAD)"
MERGEBASE="$(git merge-base origin/master "${HEAD}")"
LOGDIR="$(pwd)"
PYTHON=${PYTHON:-"python3"}
CIDIR="$(dirname "${0}")"
# workaround for Gitlab's missing support for absolute paths in artifacts:
# https://gitlab.com/gitlab-org/gitlab-ci-multi-runner/issues/1011
......
......@@ -8,46 +8,21 @@ TESTRUNNER="$1"
# Fail if result of any test not modified between master..HEAD changed
# (i.e. any change in Deckard should not change results of non-modified tests)
function extract_test_results {
# from log $1 extract test status lines like this:
# [ FAIL ] sets/resolver/iter_badglue.rpl
# [ OK ] sets/resolver/iter_badraw.rpl
# no spaces are allowed in test names
grep -o '^\[[^]]*\] [^ ]*\.rpl' "$1" | sort --field-separator=']' --key=2 | uniq
}
function find_modified_tests {
: detect tests affected by current merge request
: store list of modified tests in ${MODIFIED_TESTS_FILE}
git diff --numstat "${MERGEBASE}..${HEAD}" | cut -f 3 | fgrep .rpl > "${MODIFIED_TESTS_FILE}" || : no modified tests detected
}
function filter_test_results {
: skip tests which are listed in ${MODIFIED_TESTS_FILE}
grep --fixed-strings --invert-match --file="${MODIFIED_TESTS_FILE}"
}
MODIFIED_TESTS_FILE="/tmp/modified_tests"
find_modified_tests
LOGS[0]="${MODIFIED_TESTS_FILE}"
: get results from all tests, including the failing ones
export MAKEFLAGS="--output-sync=target --keep-going -j$(nproc)"
: get test results from version under test
PYTHON=${PYTHON} "${TESTRUNNER}" &> /tmp/head.log || :
LOGS[1]="/tmp/head.log"
extract_test_results /tmp/head.log | filter_test_results &> /tmp/head.tests || (: "no tests left, huh?" && cat /tmp/head.log)
LOGS[2]="/tmp/head.tests"
"${TESTRUNNER}" -n $(nproc) --junit-xml=/tmp/head.xml || : some tests on HEAD ${HEAD} failed
: get test results from common ancestor with master branch
git checkout --force --detach "${MERGEBASE}"
git clean -xdf
PYTHON=${PYTHON} "${TESTRUNNER}" &> /tmp/base.log || :
LOGS[3]="/tmp/base.log"
extract_test_results /tmp/base.log | filter_test_results &> /tmp/base.tests || (: "no tests left, huh?" && cat /tmp/base.log)
LOGS[4]="/tmp/base.tests"
: tests which were not modified should produce the same results
diff -U0 /tmp/base.tests /tmp/head.tests && echo "OK, no differences found"
"${TESTRUNNER}" -n $(nproc) --junit-xml=/tmp/base.xml || : some tests on merge base ${MERGEBASE} failed
"${CIDIR}/junit_compare.py" /tmp/head.xml /tmp/base.xml /tmp/modified_tests && echo "OK, no differences found"
#!/usr/bin/python3
import sys
import xml.etree.ElementTree as xml
def parse_junit_xml(filename):
"""
Transform junit XML file into set of tuples:
(test description, file name, test result)
"""
results = set()
root = xml.parse(filename).getroot()
for case in root:
if case.find("failure") is not None: # Because empty XML elements are falsey
results.add((case.get("name"), case.get("name").split("'")[1], "FAILED"))
elif case.find("skipped") is not None:
results.add((case.get("name"), case.get("name").split("'")[1], "SKIPPED"))
else:
results.add((case.get("name"), case.get("name").split("'")[1], "PASSED"))
return results
new = sys.argv[1]
old = sys.argv[2]
modified_tests = [line.strip() for line in open(sys.argv[3]).readlines()]
test_diffs = parse_junit_xml(old) ^ parse_junit_xml(new)
errorneous_rpls = [diff[1] for diff in test_diffs
if diff[1] not in modified_tests]
if errorneous_rpls:
print('FAIL! Following tests changed their result without test modification:')
for rpl in sorted(set(errorneous_rpls)):
print(rpl)
sys.exit(1)
programs:
- name: getdns
binary: getdns_query
additional:
- -C
- getdns.conf
templates:
- template/getdns.j2
- template/dnssec_getdns.j2
configs:
- getdns.conf
- getdns-root.key
programs:
- name: knotd
binary: knotd
additional:
- -c
- ./knotd.conf
templates:
- "template/knotd_master.j2"
configs:
- "knotd.conf"
programs:
- name: knotd
binary: knotd
additional:
- -c
- ./knotd.conf
templates:
- "template/knotd_slave.j2"
configs:
- "knotd.conf"
\ No newline at end of file
programs:
- name: kresd
binary: kresd
additional:
- -f
- "1"
templates:
- template/kresd.j2
configs:
- config
programs:
- name: pdns
binary: pdns_recursor
additional:
- --config-dir=./
templates:
- template/pdns_recursor.j2
- template/hints_zone.j2
- template/pdns_dnssec.j2
configs:
- recursor.conf
- hints.pdns
- dnssec.lua
# SIGTERM leads to return code -15 instead of clean 0 so we have to ignore it
ignore_exit_code: true
programs:
- name: unbound
binary: unbound
additional:
- -d
- -c
- unbound.conf
templates:
- template/unbound.j2
- template/hints_zone.j2
configs:
- unbound.conf
- hints.zone
from collections import namedtuple, OrderedDict
import glob
import os
import re
import yaml
Scenario = namedtuple("Scenario", ["path", "qmin", "config"])
def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
"""Make YaML load to OrderedDict.
This is done to ensure compability with Python versions prior to 3.6.
See docs.python.org/3.6/whatsnew/3.6.html#new-dict-implementation for more information.
repr(config) is a part of testcase's name in pytest.
We need to ensure that it is ordered in the same way.
See https://github.com/pytest-dev/pytest/issues/1075.
"""
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(stream, OrderedLoader)
def config_sanity_check(config_dict, config_name):
"""Checks if parsed configuration is valid"""
mandatory_keys = {'name', 'binary', 'templates', 'configs', 'additional'}
for cfg in config_dict['programs']:
missing_keys = mandatory_keys - set(cfg.keys())
assert not missing_keys, 'Mandatory fields in configuration are missing: %s' % missing_keys
# sanity check templates vs. configs
assert len(cfg['templates']) == len(cfg['configs']),\
('Number of jinja2 template files is not equal '
'to number of config files to be generated for '
'program "%s" (%s), i.e. len(templates) != len(configs)'
% (cfg['name'], config_name))
for additional in cfg["additional"]:
assert type(additional) is str,\
"All additional arguments in yaml should be strings. (%s, %s)"\
% (cfg['name'], config_name)
def get_qmin_config(path):
"""Reads configuration from the *.rpl file and determines query-minimization setting."""
with open(path) as f:
for line in f:
if re.search(r"^CONFIG_END", line) or re.search(r"^SCENARIO_BEGIN", line):
return None
if re.search(r"^\s*query-minimization:\s*(on|yes)", line):
return True
if re.search(r"^\s*query-minimization:\s*(off|no)", line):
return False
def scenarios(paths, configs):
"""Returns list of *.rpl files from given path and packs them with their minimization setting"""
assert len(paths) == len(configs),\
"Number of --config has to be equal to number of --scenarios arguments."
scenario_list = []
for path, config in zip(paths, configs):
config_dict = ordered_load(open(config), yaml.SafeLoader)
config_sanity_check(config_dict, config)
if os.path.isfile(path):
filelist = [path] # path to single file, accept it
else:
filelist = sorted(glob.glob(os.path.join(path, "*.rpl")))
if not filelist:
raise ValueError('no *.rpl files found in path "{}"'.format(path))
for file in filelist:
scenario_list.append(Scenario(file, get_qmin_config(file), config_dict))
return scenario_list
def pytest_addoption(parser):
parser.addoption("--config", action="append", help="path to Deckard configuration .yaml file")
parser.addoption("--scenarios", action="append", help="directory with .rpl files")
def pytest_generate_tests(metafunc):
"""This is pytest weirdness to parametrize the test over all the *.rpl files."""
if 'scenario' in metafunc.fixturenames:
if metafunc.config.option.config is None:
configs = []
else:
configs = metafunc.config.option.config
if metafunc.config.option.scenarios is None:
paths = ["sets/resolver"] * len(configs)
else:
paths = metafunc.config.option.scenarios
metafunc.parametrize("scenario", scenarios(paths, configs), ids=str)
#!/usr/bin/env python3
import argparse
from datetime import datetime
import errno
import logging
......@@ -8,14 +7,12 @@ import os
import shutil
import socket
import subprocess
import sys
import tempfile
import time
import jinja2
import yaml
from pydnstest import scenario, testserver, test
from pydnstest import scenario, testserver
# path to Deckard files
......@@ -86,18 +83,6 @@ class IfaceManager(object):
for name in self.name2iface}
def find_objects(path):
""" Recursively scan file/directory for scenarios. """
result = []
if os.path.isdir(path):
for e in os.listdir(path):
result += find_objects(os.path.join(path, e))
elif os.path.isfile(path):
if path.endswith('.rpl'):
result.append(path)
return result
def write_timestamp_file(path, tst):
time_file = open(path, 'w')
time_file.write(datetime.fromtimestamp(tst).strftime('@%Y-%m-%d %H:%M:%S'))
......@@ -234,8 +219,8 @@ def setup_daemon_files(prog_cfg, template_ctx, ta_files):
ta_files, prog_cfg['dir'])
# generate configuration files
j2template_loader = jinja2.FileSystemLoader(
searchpath=os.path.dirname(os.path.abspath(__file__)))
j2template_loader = jinja2.FileSystemLoader(searchpath=os.getcwd())
print(os.path.abspath(os.getcwd()))
j2template_env = jinja2.Environment(loader=j2template_loader)
logging.getLogger('deckard.daemon.%s.template' % name).debug(subst)
......@@ -287,11 +272,11 @@ def conncheck_daemon(process, cfg, sockfamily):
sock.close()
def process_file(path, args, prog_cfgs):
def process_file(path, qmin, prog_cfgs):
"""Parse scenario from a file object and create workdir."""
# Parse scenario
case, cfg_text = scenario.parse_file(os.path.realpath(path))
cfg_ctx, ta_files = scenario.parse_config(cfg_text, args.qmin, INSTALLDIR)
cfg_ctx, ta_files = scenario.parse_config(cfg_text, qmin, INSTALLDIR)
template_ctx = setup_network(cfg_ctx['_SOCKET_FAMILY'], prog_cfgs)
# merge variables from scenario with generated network variables (scenario has priority)
template_ctx.update(cfg_ctx)
......@@ -301,6 +286,7 @@ def process_file(path, args, prog_cfgs):
# get working directory and environment variables
tmpdir = setup_common_env(cfg_ctx)
shutil.copy2(path, os.path.join(tmpdir))
try:
daemons = setup_daemons(tmpdir, prog_cfgs, template_ctx, ta_files)
run_testcase(daemons,
......@@ -352,143 +338,10 @@ def run_testcase(daemons, case, root_addr, addr_family, prog_under_test_ip):
with open(daemon['cfg']['log']) as logf:
for line in logf:
daemon_logger_log.debug(line.strip())
ignore_exit = bool(os.environ.get('IGNORE_EXIT_CODE', 0))
ignore_exit = daemon["cfg"].get('ignore_exit_code', False)
if daemon['proc'].returncode != 0 and not ignore_exit:
raise ValueError('process %s terminated with return code %s'
% (daemon['cfg']['name'], daemon['proc'].returncode))
# Do not clear files if the server crashed (for analysis)
if server.undefined_answers > 0:
raise ValueError('the scenario does not define all necessary answers (see error log)')
def test_platform():
if sys.platform == 'windows':
raise NotImplementedError('not supported at all on Windows')
def deckard():
"""Entrypoint for script"""
# auxilitary classes for argparse
class ColonSplitter(argparse.Action): # pylint: disable=too-few-public-methods
"""Split argument string into list holding items separated by colon."""
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values.split(':'))
class EnvDefault(argparse.Action): # pylint: disable=too-few-public-methods
"""Get default value for parameter from environment variable."""
def __init__(self, envvar, required=True, default=None, **kwargs):
if envvar and envvar in os.environ:
default = os.environ[envvar]
if required and default is not None:
required = False
super(EnvDefault, self).__init__(default=default, required=required, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
def loglevel2number(level):
"""Convert direct log level number or symbolic name to a number."""
try:
return int(level)
except ValueError:
pass # not a number, try if it is a named constant from logging module
try:
return getattr(logging, level.upper())
except AttributeError:
raise ValueError('unknown log level %s' % level)
test_platform()
argparser = argparse.ArgumentParser()
argparser.add_argument('--qmin', help='query minimization (default: enabled)', default=True,
action=EnvDefault, envvar='QMIN', type=scenario.str2bool)
argparser.add_argument('--loglevel', help='verbosity (default: errors + test results)',
action=EnvDefault, envvar='VERBOSE',
type=loglevel2number, required=False)
argparser.add_argument('scenario', help='path to test scenario')
argparser.add_argument('--noclean', action='store_true',
help='don\'t delete working directory')
subparsers = argparser.add_subparsers(
dest='cmd', title='sub-commands',
description='run scenario with one binary specified on command line '
'or multiple binaries specified in config file')
run_one = subparsers.add_parser('one', help='run single binary inside single scenario')
run_one.add_argument('binary', help='executable to test')
run_one.add_argument('templates', help='colon-separated list of jinja2 template files',
action=ColonSplitter)
run_one.add_argument('configs',
help='colon-separated list of files to be generated from templates',
action=ColonSplitter)
run_one.add_argument('additional', help='additional parameters for the binary', nargs='*')
run_cfg = subparsers.add_parser(
'multiple',
help='run all binaries specified in YaML file; '
'all binaries will be executed inside single scenario')
run_cfg.add_argument('yaml', help='YaML specifying binaries and their parameter',
type=open)
args = argparser.parse_args()
if not args.loglevel:
# default verbosity: errors + test results
args.loglevel = logging.ERROR
logging.config.dictConfig(
{
'version': 1,
'incremental': True,
'loggers': {
'deckard.hint': {'level': 'INFO'},
'pydnstest.test.Test': {'level': 'INFO'}
}
})
if args.loglevel <= logging.DEBUG: # include message origin
logging.basicConfig(level=args.loglevel)
else:
logging.basicConfig(level=args.loglevel, format='%(message)s')
log = logging.getLogger('deckard')
if args.cmd == 'multiple':
config = yaml.load(args.yaml)
else:
assert args.cmd == 'one'
config = {
'programs': [{
'binary': args.binary,
'templates': args.templates,
'configs': args.configs,
'additional': args.additional,
'name': os.path.basename(args.binary),
}],
'noclean': args.noclean,
}
mandatory_keys = {'name', 'binary', 'templates', 'configs', 'additional'}
for cfg in config['programs']:
missing_keys = mandatory_keys - set(cfg.keys())
if missing_keys:
log.critical('Mandatory fields in configuration are missing: %s', missing_keys)
sys.exit(1)
# sanity check templates vs. configs
if len(cfg['templates']) != len(cfg['configs']):
log.critical('Number of jinja2 template files is not equal '
'to number of config files to be generated for '
'program "%s", i.e. len(templates) != len(configs)',
cfg['name'])