Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
No results found
Show changes
Showing
with 2250 additions and 254 deletions
include config.mk
include platform.mk
# Targets
all: info lib daemon modules
install: lib-install daemon-install modules-install etc-install
check: all tests
clean: contrib-clean lib-clean daemon-clean modules-clean tests-clean doc-clean bench-clean
doc: doc-html
.PHONY: all install check clean doc info
# Options
ifdef COVERAGE
BUILD_CFLAGS += --coverage
endif
# Dependencies
$(eval $(call find_lib,libknot,2.3.1,yes))
$(eval $(call find_lib,libdnssec,2.3.1,yes))
$(eval $(call find_lib,libzscanner,2.3.1,yes))
$(eval $(call find_lib,lmdb))
$(eval $(call find_lib,libuv,1.0,yes))
$(eval $(call find_lib,nettle,,yes))
$(eval $(call find_alt,lua,luajit))
$(eval $(call find_lib,cmocka))
$(eval $(call find_bin,doxygen))
$(eval $(call find_bin,sphinx-build))
$(eval $(call find_lib,libmemcached,1.0))
$(eval $(call find_lib,hiredis,,yes))
$(eval $(call find_lib,socket_wrapper))
$(eval $(call find_lib,libsystemd,227))
$(eval $(call find_lib,gnutls))
# Lookup SONAME
$(eval $(call find_soname,libknot))
$(eval $(call find_soname,libzscanner))
ifeq ($(libknot_SONAME),)
$(error "Unable to resolve libknot_SONAME, update find_soname in platform.mk")
endif
ifeq ($(libzscanner_SONAME),)
$(error "Unable to resolve libzscanner_SONAME, update find_some in platform.mk")
endif
# Find Go version and platform
GO_VERSION := $(shell $(GO) version 2>/dev/null)
ifeq ($(GO_VERSION),)
GO_VERSION := 0
else
GO_PLATFORM := $(word 2,$(subst /, ,$(word 4,$(GO_VERSION))))
GO_VERSION := $(subst .,,$(subst go,,$(word 3,$(GO_VERSION))))
endif
$(eval $(call find_ver,go,$(GO_VERSION),16))
# Check if Go is able to build shared libraries
ifeq ($(HAS_go),yes)
ifneq ($(GO_PLATFORM),$(filter $(GO_PLATFORM),amd64 386 arm arm64))
HAS_go := no
endif
else
$(eval $(call find_ver,go,$(GO_VERSION),15))
ifeq ($HAS_go,yes)
ifneq ($(GO_PLATFORM),$(filter $(GO_PLATFORM),arm amd64))
HAS_go := no
endif
endif
endif
# Work around luajit on OS X
ifeq ($(PLATFORM), Darwin)
ifneq (,$(findstring luajit, $(lua_LIBS)))
lua_LIBS += -pagezero_size 10000 -image_base 100000000
endif
endif
BUILD_CFLAGS += $(libknot_CFLAGS) $(libuv_CFLAGS) $(nettle_CFLAGS) $(cmocka_CFLAGS) $(lua_CFLAGS) $(libdnssec_CFLAGS) $(libsystemd_CFLAGS)
BUILD_CFLAGS += $(addprefix -I,$(wildcard contrib/ccan/*) contrib/murmurhash3)
# Check if it has libknot 2.3.0 and nettle to support DNS cookies
$(eval $(call find_alt,knot230,libknot,2.3))
ifeq ($(HAS_nettle)|$(HAS_knot230),yes|yes)
BUILD_CFLAGS += -DENABLE_COOKIES
ENABLE_COOKIES := yes
endif
# Overview
info:
$(info Target: Knot DNS Resolver $(VERSION)-$(PLATFORM))
$(info Compiler: $(CC) $(BUILD_CFLAGS))
$(info )
$(info Variables)
$(info ---------)
$(info HARDENING: $(HARDENING))
$(info BUILDMODE: $(BUILDMODE))
$(info PREFIX: $(PREFIX))
$(info PREFIX: $(PREFIX))
$(info DESTDIR: $(DESTDIR))
$(info BINDIR: $(BINDIR))
$(info SBINDIR: $(SBINDIR))
$(info LIBDIR: $(LIBDIR))
$(info ETCDIR: $(ETCDIR))
$(info INCLUDEDIR: $(INCLUDEDIR))
$(info MODULEDIR: $(MODULEDIR))
$(info )
$(info Core Dependencies)
$(info ------------)
$(info [$(HAS_libknot)] libknot (lib))
$(info [yes] $(if $(filter $(HAS_lmdb),yes),system,embedded) lmdb (lib))
$(info [$(HAS_lua)] luajit (daemon))
$(info [$(HAS_libuv)] libuv (daemon))
$(info [$(HAS_gnutls)] libgnutls (daemon))
$(info [$(HAS_nettle)] nettle (DNS cookies))
$(info )
$(info Optional)
$(info --------)
$(info [$(HAS_doxygen)] doxygen (doc))
$(info [$(HAS_go)] go (modules/go, Go buildmode=c-shared support))
$(info [$(HAS_libmemcached)] libmemcached (modules/memcached))
$(info [$(HAS_hiredis)] hiredis (modules/redis))
$(info [$(HAS_cmocka)] cmocka (tests/unit))
$(info [$(HAS_libsystemd)] systemd (daemon))
$(info )
ifeq ($(HAS_libknot),no)
$(error libknot >= 2.3.1 required)
endif
ifeq ($(HAS_libzscanner),no)
$(error libzscanner >= 2.3.1 required)
endif
ifeq ($(HAS_libdnssec),no)
$(error libdnssec >= 2.3.1 required)
endif
ifeq ($(HAS_libuv),no)
$(error libuv >= 1.0 required)
endif
# Installation directories
$(DESTDIR)$(MODULEDIR):
$(INSTALL) -d $@
$(DESTDIR)$(ETCDIR):
$(INSTALL) -m 0750 -d $@
# Sub-targets
include contrib/contrib.mk
include lib/lib.mk
include daemon/daemon.mk
include modules/modules.mk
include tests/tests.mk
include doc/doc.mk
include etc/etc.mk
include bench/bench.mk
This diff is collapsed.
# Knot DNS Resolver
# Knot Resolver
[![Build Status](https://img.shields.io/travis/CZ-NIC/knot-resolver/master.svg)](https://travis-ci.org/CZ-NIC/knot-resolver)
[![Coverage Status](https://img.shields.io/coveralls/CZ-NIC/knot-resolver.svg)](https://coveralls.io/r/CZ-NIC/knot-resolver)
[![Coverity](https://img.shields.io/coverity/scan/3912.svg)](https://scan.coverity.com/projects/3912)
[![Documentation Status](https://readthedocs.org/projects/knot-resolver/badge/?version=latest)](https://readthedocs.org/projects/knot-resolver/?badge=latest)
[![Build Status](https://gitlab.nic.cz/knot/knot-resolver/badges/nightly/pipeline.svg?x)](https://gitlab.nic.cz/knot/knot-resolver/commits/nightly)
[![Coverage Status](https://gitlab.nic.cz/knot/knot-resolver/badges/nightly/coverage.svg?x)](https://www.knot-resolver.cz/documentation/latest)
[![Packaging status](https://repology.org/badge/tiny-repos/knot-resolver.svg)](https://repology.org/project/knot-resolver/versions)
Knot Resolver is a full caching DNS resolver implementation. The core architecture is tiny and efficient, written in C and [LuaJIT][luajit], providing a foundation and a state-machine-like API for extension modules. There are three built-in modules - *iterator*, *validator* and *cache* - which provide the main functionality of the resolver. A few other modules are automatically loaded by default to extend the resolver's functionality.
The Knot DNS Resolver is a caching full resolver implementation written in C and [LuaJIT][luajit], both a resolver library and a daemon. The core architecture is tiny and efficient, and provides a foundation and
a state-machine like API for extensions. There are three of those built-in - *iterator*, *cache*, *validator*, and most of the [rich features](https://knot-resolver.readthedocs.io/en/latest/modules.html) are written in LuaJIT, Go and C. Batteries are included, but optional.
Since Knot Resolver version 6, it also includes a so-called [manager][manager]. It is a new component written in [Python][python] that hides the complexity of older versions and makes it more user friendly. For example, new features include declarative configuration in YAML format and HTTP API for dynamic changes in the resolver and more.
The LuaJIT modules, support for DNS privacy and DNSSEC, and persistent cache with low memory footprint make it a great personal DNS resolver or a research tool to tap into DNS data. TL;DR it's the [OpenResty][openresty] of DNS.
Knot Resolver uses a [different scaling strategy][scaling] than the rest of the DNS resolvers - no threading, shared-nothing architecture (except MVCC cache which can be shared), which allows you to pin workers to available CPU cores and grow by self-replication. You can start and stop additional workers based on the contention without downtime, which is automated by the [manager][manager] by default.
Several cache backends (LMDB, Redis and Memcached), strong filtering rules, and auto-configuration with etcd make it a great large-scale resolver solution.
The LuaJIT modules, support for DNS privacy and DNSSEC, and persistent cache with low memory footprint make it a great personal DNS resolver or a research tool to tap into DNS data. Strong filtering rules, and auto-configuration with etcd make it a great large-scale resolver solution. It also has strong support for DNS over TCP, in particular TCP Fast-Open, query pipelining and deduplication, and response reordering.
The server adopts a [different scaling strategy][scaling] than the rest of the DNS recursors - no threading, shared-nothing architecture (except MVCC cache that may be shared) that allows you to pin instances on available CPU cores and grow by self-replication. You can start and stop additional nodes depending on the contention without downtime.
For more on using the resolver, see the [User Documentation][doc]. See the [Developer Documentation][doc-dev] for detailed architecture and development.
It also has strong support for DNS over TCP, notably TCP Fast-Open, query pipelining and deduplication, and response reordering.
## Packages
### Packages
The latest stable packages for various distributions are available in our
[upstream repository](https://pkg.labs.nic.cz/doc/?project=knot-resolver).
Follow the installation instructions to add this repository to your system.
Knot Resolver is packaged for Debian, Fedora, Ubuntu and [openSUSE](https://build.opensuse.org/package/show/server:dns/knot-resolver).
See [project page](https://www.knot-resolver.cz/pages/try.html) for more information.
Knot Resolver is also available from the following distributions' repositories:
### Building from sources
* [Fedora and Fedora EPEL](https://src.fedoraproject.org/rpms/knot-resolver)
* [Debian stable](https://packages.debian.org/stable/knot-resolver),
[Debian testing](https://packages.debian.org/testing/knot-resolver),
[Debian unstable](https://packages.debian.org/sid/knot-resolver)
* [Ubuntu](https://packages.ubuntu.com/jammy/knot-resolver)
* [Arch Linux](https://archlinux.org/packages/extra/x86_64/knot-resolver/)
* [Alpine Linux](https://pkgs.alpinelinux.org/packages?name=knot-resolver)
The Knot DNS Resolver [depends][depends] on the 2.1 version of the Knot DNS library, [LuaJIT][luajit] and [libuv][libuv].
See the [Building project][depends] documentation page for more information.
### Packaging
### Docker image
The project uses [`apkg`](https://gitlab.nic.cz/packaging/apkg) for packaging.
See [`distro/README.md`](distro/README.md) for packaging specific instructions.
This is simple and doesn't require any dependencies or system modifications, just run:
## Building from sources
Knot Resolver mainly depends on [KnotDNS][knot-dns] libraries, [LuaJIT][luajit], [libuv][libuv] and [Python][python].
See the [Building project][build] documentation page for more information.
## Running
By default, Knot Resolver comes with [systemd][systemd] integration and you just need to start its service. It requires no configuration changes to run a server on localhost.
```
$ docker run -it cznic/knot-resolver
# systemctl start knot-resolver
```
See the build page [hub.docker.com/r/cznic/knot-resolver](https://hub.docker.com/r/cznic/knot-resolver/) for more information and options.
See the documentation at [knot-resolver.cz/documentation/latest][doc] for more information.
### Running
## Running the Docker image
The project builds a resolver library in the `lib` directory, and a daemon in the `daemon` directory. It requires no configuration or parameters to run a server on localhost.
Running the Docker image is simple and doesn't require any dependencies or system modifications, just run:
```
$ kresd
$ docker run -Pit cznic/knot-resolver
```
See the documentation at [knot-resolver.readthedocs.io][doc] for more options.
The images are meant as an easy way to try the resolver, and they're not designed for production use.
[depends]: https://knot-resolver.readthedocs.io/en/latest/build.html
[doc]: https://knot-resolver.readthedocs.io/en/latest/index.html
[scaling]: https://knot-resolver.readthedocs.io/en/latest/daemon.html#scaling-out
[deckard]: https://gitlab.labs.nic.cz/knot/deckard
[luajit]: http://luajit.org/
[libuv]: https://github.com/libuv/libuv
[openresty]: https://openresty.org/
## Contacting us
### Contacting us
- [GitLab issues](https://gitlab.nic.cz/knot/knot-resolver/issues) (you may authenticate via GitHub)
- [mailing list](https://lists.nic.cz/postorius/lists/knot-resolver-announce.lists.nic.cz/)
- [![Join the chat at https://gitter.im/CZ-NIC/knot-resolver](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/CZ-NIC/knot-resolver?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Join the chat at https://gitter.im/CZ-NIC/knot-resolver](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/CZ-NIC/knot-resolver?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[build]: https://www.knot-resolver.cz/documentation/latest/dev/build.html
[doc]: https://www.knot-resolver.cz/documentation/latest/
[doc-dev]: https://www.knot-resolver.cz/documentation/latest/dev
[knot-dns]: https://www.knot-dns.cz/
[luajit]: https://luajit.org/
[libuv]: http://libuv.org
[python]: https://www.python.org/
[systemd]: https://systemd.io/
[scaling]: https://www.knot-resolver.cz/documentation/latest/config-multiple-workers.html
[manager]: https://www.knot-resolver.cz/documentation/latest/dev/architecture.html
bench_BIN := \
bench_lru
# Dependencies
bench_DEPEND := $(libkres)
bench_LIBS := $(libkres_TARGET) $(libkres_LIBS)
# Platform-specific library injection
ifeq ($(PLATFORM),Darwin)
preload_syms := DYLD_FORCE_FLAT_NAMESPACE=1 DYLD_LIBRARY_PATH="$(DYLD_LIBRARY_PATH):$(abspath lib)"
else
preload_syms := LD_LIBRARY_PATH="$(LD_LIBRARY_PATH):$(abspath lib)"
endif
# Make bench binaries
define make_bench
$(1)_CFLAGS := -fPIE
$(1)_SOURCES := bench/$(1).c
$(1)_LIBS := $(bench_LIBS)
$(1)_DEPEND := $(bench_DEPEND)
$(call make_bin,$(1),bench)
endef
$(foreach bench,$(bench_BIN),$(eval $(call make_bench,$(bench))))
# Targets
.PHONY: bench bench-clean
bench-clean: $(foreach bench,$(bench_BIN),$(bench)-clean)
bench: $(foreach bench,$(bench_BIN),bench/$(bench))
@echo "Test LRU with increasing overfill, misses should increase ~ linearly" >&2
@./bench/bench_lru 23 bench/bench_lru_set1.tsv - 65536 # fill ~ 1
@./bench/bench_lru 23 bench/bench_lru_set1.tsv - 32768 # fill ~ 2
@./bench/bench_lru 23 bench/bench_lru_set1.tsv - 16384 # fill ~ 4
@./bench/bench_lru 23 bench/bench_lru_set1.tsv - 8192 # fill ~ 8
@./bench/bench_lru 23 bench/bench_lru_set1.tsv - 4096 # fill ~ 16
/* Copyright (C) 2015 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
/* Copyright (C) CZ.NIC, z.s.p.o. <knot-resolver@labs.nic.cz>
* SPDX-License-Identifier: GPL-3.0-or-later
*/
#include <math.h>
......@@ -23,19 +11,23 @@
#include "contrib/ucw/lib.h"
#include "daemon/engine.h"
#include "lib/nsrep.h"
#include "lib/selection.h"
typedef kr_nsrep_lru_t lru_bench_t;
typedef lru_t(unsigned) lru_bench_t;
#define p_out(...) do { \
printf(__VA_ARGS__); \
fflush(stdout); \
} while (0)
#define p_err(...) fprintf(stderr, __VA_ARGS__)
(void)fflush(stdout); \
} while (0)
#define p_err(...) ((void)fprintf(stderr, __VA_ARGS__))
#ifndef LRU_RTT_SIZE
#define LRU_RTT_SIZE 65536 /**< NS RTT cache size */
#endif
static int die(const char *cause)
{
fprintf(stderr, "%s: %s\n", cause, strerror(errno));
(void)fprintf(stderr, "%s: %s\n", cause, strerror(errno));
exit(1);
}
......@@ -151,7 +143,7 @@ static struct key * read_lines(const char *fname, size_t *count, char **pfree)
return result;
}
// compatibility layer for the oler lru_* names; it's more compler with lru_create
// compatibility layer for the older lru_* names
#ifndef lru_create
#define lru_get_new lru_set
#define lru_get_try lru_get
......@@ -179,7 +171,7 @@ int main(int argc, char ** argv)
struct key *keys = read_lines(argv[2], &key_count, &data_to_free);
size_t run_count;
{
size_t run_log = atoi(argv[1]);
size_t run_log = atoi(argv[1]); // NOLINT: atoi is fine for this tool...
assert(run_log < 64);
run_count = 1ULL << run_log;
p_err("\ntest run length:\t2^");
......@@ -187,7 +179,7 @@ int main(int argc, char ** argv)
}
struct timeval time;
const int lru_size = argc > 4 ? atoi(argv[4]) : LRU_RTT_SIZE;
const int lru_size = argc > 4 ? atoi(argv[4]) : LRU_RTT_SIZE; // NOLINT: ditto atoi
lru_bench_t *lru;
#ifdef lru_create
......@@ -212,7 +204,7 @@ int main(int argc, char ** argv)
p_err("\nload everything:\t");
time_get(&time);
for (size_t i = 0, ki = key_count - 1; i < run_count; ++i, --ki) {
unsigned *r = lru_get_new(lru, keys[ki].chars, keys[ki].len);
unsigned *r = lru_get_new(lru, keys[ki].chars, keys[ki].len, NULL);
if (!r || *r == 0)
++miss;
if (r)
......
# bench
# SPDX-License-Identifier: GPL-3.0-or-later
bench_lru_src = files([
'bench_lru.c',
])
cc = meson.get_compiler('c')
m_dep = cc.find_library('m', required : false)
bench_lru = executable(
'bench_lru',
bench_lru_src,
dependencies: [
contrib_dep,
libkres_dep,
m_dep,
],
)
run_target(
'bench',
command: '../scripts/meson/bench.sh',
)
from typing import Any, Dict
from setuptools import Extension
def build(setup_kwargs: Dict[Any, Any]) -> None:
setup_kwargs.update(
{
"ext_modules": [
Extension(
name="knot_resolver.controller.supervisord.plugin.notify",
sources=["python/knot_resolver/controller/supervisord/plugin/notifymodule.c"],
),
]
}
)
DECKARD_COMMIT=$(git ls-tree HEAD:tests/integration/ | grep commit | grep deckard | cut -f1 | cut -f3 '-d ')
DECKARD_PATH="tests/integration/deckard"
pushd $DECKARD_PATH > /dev/null
if git merge-base --is-ancestor $DECKARD_COMMIT origin/master; then
echo "Deckard submodule commit is on in its master branch. All good in the hood."
exit 0
else
echo "Deckard submodule commit $DECKARD_COMMIT is not in Deckard's master branch."
echo "This WILL cause CI breakages so make sure your changes in Deckard are merged"
echo "or point the submodule to another commit."
exit 1
fi
#!/bin/sh
sed 's|</testcase>|</testcase>\n|g' -i "$@"
sed -e '/<failure \/>/,/<\/testcase>/s/<\(\/\?\)system-\(out\|err\)>/<\1failure>/g' \
-e 's/<failure \/>//g' \
-i "$@"
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-3.0-or-later
import json
import time
import sys
import requests
BRANCH_API_ENDPOINT = "https://api.github.com/repos/CZ-NIC/knot-resolver/actions/runs?branch={branch}" # noqa
TIMEOUT = 20*60 # 20 mins max
POLL_DELAY = 60
SYNC_TIMEOUT = 10*60
def exit(msg='', html_url='', code=1):
print(msg, file=sys.stderr)
print(html_url)
sys.exit(code)
end_time = time.time() + TIMEOUT
sync_timeout = time.time() + SYNC_TIMEOUT
while time.time() < end_time:
response = requests.get(
BRANCH_API_ENDPOINT.format(branch=sys.argv[1]),
headers={"Accept": "application/vnd.github.v3+json"})
if response.status_code == 404:
pass # not created yet?
elif response.status_code == 200:
data = json.loads(response.content.decode('utf-8'))
try:
for i in range(0, 1): # two runs ATM
run = data['workflow_runs'][i]
conclusion = run['conclusion']
html_url = run['html_url']
commit_sha = run['head_sha']
except (KeyError, IndexError):
time.sleep(POLL_DELAY)
continue
if commit_sha != sys.argv[2]:
if time.time() < sync_timeout:
time.sleep(POLL_DELAY)
continue
exit("Fetched invalid GH Action: commit mismatch. Re-run or push again?")
if conclusion is None:
pass
if conclusion == "success":
exit("SUCCESS!", html_url, code=0)
elif isinstance(conclusion, str):
# failure, neutral, cancelled, skipped, timed_out, or action_required
exit("GitHub Actions Conclusion: {}!".format(conclusion.upper()), html_url)
else:
exit("API Response Code: {}".format(response.status_code), code=2)
time.sleep(POLL_DELAY)
exit("Timed out!")
#!/bin/sh
grep '\<assert\>' -- $(git ls-files | grep '\.[hc]$' | grep -vE '^(contrib|bench|tests|daemon/ratelimiting.test)/|^lib/kru')
test $? -eq 1
default:
interruptible: true
stages:
- pkgbuild
- pkgtest
# pkgbuild {{{
.pkgbuild: &pkgbuild
stage: pkgbuild
tags:
- lxc
- amd64
before_script:
- git config --global user.name CI
- git config --global user.email ci@nic
needs: # https://gitlab.nic.cz/help/ci/yaml/README.md#artifact-downloads-to-child-pipelines
- pipeline: $PARENT_PIPELINE_ID
job: archive
artifacts:
when: always
expire_in: '1 day'
paths:
- pkg/
.apkgbuild: &apkgbuild # new jinja2 breaks docs (sphinx/breathe)
- pip3 install -U apkg 'jinja2<3.1'
- apkg build-dep -y
- apkg build
.pkgdebrepo: &pkgdebrepo
- apt-get update
- apt-get install -y curl gnupg2
- echo "deb http://download.opensuse.org/repositories/home:/CZ-NIC:/$OBS_REPO/$DISTROTEST_REPO/ /" > /etc/apt/sources.list.d/obs.list
- curl -fsSL "https://download.opensuse.org/repositories/home:CZ-NIC:$OBS_REPO/$DISTROTEST_REPO/Release.key" | gpg --dearmor > /etc/apt/trusted.gpg.d/obs.gpg
- apt-get update
.debpkgbuild: &debpkgbuild
- *pkgdebrepo
- apt-get install -y python3-pip devscripts
- *apkgbuild
centos-7:pkgbuild:
<<: *pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/centos-7
before_script:
- export LC_ALL=en_US.UTF-8
- git config --global user.name CI
- git config --global user.email ci@nic
script:
- yum install -y rpm-build python3-pip epel-release
- *apkgbuild
debian-10:pkgbuild:
<<: *pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/debian-10
variables:
OBS_REPO: knot-resolver-build
DISTROTEST_REPO: Debian_10
script:
- *debpkgbuild
debian-11:pkgbuild:
<<: *pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/debian-11
variables:
OBS_REPO: knot-resolver-build
DISTROTEST_REPO: Debian_11
script:
- *debpkgbuild
fedora-34:pkgbuild:
<<: *pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/fedora-34
script:
- dnf install -y rpm-build python3-pip
- *apkgbuild
fedora-35:pkgbuild:
<<: *pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/fedora-35
script:
- dnf install -y rpm-build python3-pip
- *apkgbuild
rocky-8:pkgbuild:
<<: *pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/rocky-8
script:
- dnf install -y rpm-build python3-pip epel-release dnf-plugins-core
- dnf config-manager --set-enabled powertools
- *apkgbuild
ubuntu-18.04:pkgbuild:
<<: *pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/ubuntu-18.04
variables:
OBS_REPO: knot-resolver-build
DISTROTEST_REPO: xUbuntu_18.04
script:
- *debpkgbuild
ubuntu-20.04:pkgbuild:
<<: *pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/ubuntu-20.04
variables:
OBS_REPO: knot-resolver-build
DISTROTEST_REPO: xUbuntu_20.04
script:
- *debpkgbuild
nixos-unstable:pkgbuild:
<<: *pkgbuild
# We do NOT use LXC, for now at least.
parallel:
matrix:
- PLATFORM: [ amd64, arm64 ]
tags:
- docker
- linux
- ${PLATFORM}
# https://github.com/NixOS/nix/issues/10648#issuecomment-2101993746
image: docker.io/nixos/nix:latest-${PLATFORM}
variables:
NIX_PATH: nixpkgs=https://github.com/nixos/nixpkgs/archive/nixos-unstable.tar.gz
before_script:
script:
- nix-build '<nixpkgs>' -QA apkg
# the image auto-detects as alpine distro
# If apkg version differs (too much), it will fail to reuse archive and fail.
- ./result/bin/apkg install -d nix
- kresd --version
# }}}
# pkgtest {{{
.pkgtest: &pkgtest
stage: pkgtest
tags:
- lxc
- amd64
.debpkgtest: &debpkgtest
- *pkgdebrepo
- apt-get install -y knot-dnsutils
- apt-get install -y $(find ./pkg/pkgs -name '*.deb' | grep -v module | grep -v debug | grep -v devel)
- systemctl start kresd@1
- kdig @127.0.0.1 nic.cz | grep -qi NOERROR
centos-7:pkgtest:
<<: *pkgtest
needs:
- centos-7:pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/centos-7
before_script:
- export LC_ALL=en_US.UTF-8
script:
- yum install -y epel-release
- yum install -y knot-utils findutils
- yum install -y $(find ./pkg/pkgs -name '*.rpm' | grep -v module | grep -v debug | grep -v devel)
- systemctl start kresd@1
- kdig @127.0.0.1 nic.cz | grep -qi NOERROR
debian-10:pkgtest:
<<: *pkgtest
needs:
- debian-10:pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/debian-10
variables:
OBS_REPO: knot-resolver-build
DISTROTEST_REPO: Debian_10
script:
- *debpkgtest
debian-11:pkgtest:
<<: *pkgtest
needs:
- debian-11:pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/debian-11
variables:
OBS_REPO: knot-resolver-build
DISTROTEST_REPO: Debian_11
script:
- *debpkgtest
fedora-34:pkgtest:
<<: *pkgtest
needs:
- fedora-34:pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/fedora-34
script:
- dnf install -y knot-utils findutils
- dnf install -y $(find ./pkg/pkgs -name '*.rpm' | grep -v module | grep -v debug | grep -v devel)
- systemctl start kresd@1
- kdig @127.0.0.1 nic.cz | grep -qi NOERROR
fedora-35:pkgtest:
<<: *pkgtest
needs:
- fedora-35:pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/fedora-35
script:
- dnf install -y knot-utils findutils
- dnf install -y $(find ./pkg/pkgs -name '*.rpm' | grep -v module | grep -v debug | grep -v devel)
- systemctl start kresd@1
- kdig @127.0.0.1 nic.cz | grep -qi NOERROR
rocky-8:pkgtest:
<<: *pkgtest
needs:
- rocky-8:pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/rocky-8
script:
- dnf install -y epel-release
- dnf install -y knot-utils findutils
- dnf install -y $(find ./pkg/pkgs -name '*.rpm' | grep -v module | grep -v debug | grep -v devel)
- systemctl start kresd@1
- kdig @127.0.0.1 nic.cz | grep -qi NOERROR
ubuntu-18.04:pkgtest:
<<: *pkgtest
needs:
- ubuntu-18.04:pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/ubuntu-18.04
variables:
OBS_REPO: knot-resolver-build
DISTROTEST_REPO: xUbuntu_18.04
script:
- *debpkgtest
ubuntu-20.04:pkgtest:
<<: *pkgtest
needs:
- ubuntu-20.04:pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/ubuntu-20.04
variables:
OBS_REPO: knot-resolver-build
DISTROTEST_REPO: xUbuntu_20.04
script:
- *debpkgtest
# }}}
-- SPDX-License-Identifier: GPL-3.0-or-later
-- Refer to manual: https://www.knot-resolver.cz/documentation/latest/
-- Listen on localhost and external interface
net.listen('127.0.0.1', 5353)
net.listen('127.0.0.1', 8853, { tls = true })
net.ipv6=false
-- Auto-maintain root TA
trust_anchors.add_file('.local/etc/knot-resolver/root.keys')
cache.size = 1024 * MB
-- Load Useful modules
modules = {
'workarounds < iterate',
'policy', -- Block queries to local zones/bad sites
'view', -- Views for certain clients
'hints > iterate', -- Allow loading /etc/hosts or custom root hints
'stats', -- Track internal statistics
}
-- avoid TC flags returned to respdiff
local _, up_bs = net.bufsize()
net.bufsize(4096, up_bs)
log_level('debug')
# SPDX-License-Identifier: GPL-3.0-or-later
[sendrecv]
# in seconds
timeout = 11
# number of queries to run simultaneously
jobs = 64
# in seconds (float); delay each query by a random time (uniformly distributed) between min and max; set max to 0 to disable
time_delay_min = 0
time_delay_max = 0
[servers]
names = kresd, bind, unbound
# symbolic names of DNS servers under test
# separate multiple values by ,
# each symbolic name in [servers] section refers to config section
# containing IP address and port of particular server
[kresd]
ip = 127.0.0.1
port = 5353
transport = tcp
graph_color = #00a2e2
restart_script = ./ci/respdiff/restart-kresd.sh
[bind]
ip = 127.0.0.1
port = 53533
transport = udp
graph_color = #e2a000
restart_script = ./ci/respdiff/restart-bind.sh
[unbound]
ip = 127.0.0.1
port = 53535
transport = udp
graph_color = #218669
restart_script = ./ci/respdiff/restart-unbound.sh
[diff]
# symbolic name of server under test
# other servers are used as reference when comparing answers from the target
target = kresd
# fields and comparison methods used when comparing two DNS messages
criteria = opcode, rcode, flags, question, answertypes, answerrrsigs
# other supported criteria values: authority, additional, edns, nsid
[report]
# diffsum reports mismatches in field values in this order
# if particular message has multiple mismatches, it is counted only once into category with highest weight
field_weights = timeout, malformed, opcode, question, rcode, flags, answertypes, answerrrsigs, answer, authority, additional, edns, nsid
# SPDX-License-Identifier: GPL-3.0-or-later
[sendrecv]
# in seconds
timeout = 11
# number of queries to run simultaneously
jobs = 64
# in seconds (float); delay each query by a random time (uniformly distributed) between min and max; set max to 0 to disable
time_delay_min = 0
time_delay_max = 0
[servers]
names = kresd, bind, unbound
# symbolic names of DNS servers under test
# separate multiple values by ,
# each symbolic name in [servers] section refers to config section
# containing IP address and port of particular server
[kresd]
ip = 127.0.0.1
port = 8853
transport = tls
graph_color = #00a2e2
restart_script = ./ci/respdiff/restart-kresd.sh
[bind]
ip = 127.0.0.1
port = 53533
transport = udp
graph_color = #e2a000
restart_script = ./ci/respdiff/restart-bind.sh
[unbound]
ip = 127.0.0.1
port = 53535
transport = udp
graph_color = #218669
restart_script = ./ci/respdiff/restart-unbound.sh
[diff]
# symbolic name of server under test
# other servers are used as reference when comparing answers from the target
target = kresd
# fields and comparison methods used when comparing two DNS messages
criteria = opcode, rcode, flags, question, answertypes, answerrrsigs
# other supported criteria values: authority, additional, edns, nsid
[report]
# diffsum reports mismatches in field values in this order
# if particular message has multiple mismatches, it is counted only once into category with highest weight
field_weights = timeout, malformed, opcode, question, rcode, flags, answertypes, answerrrsigs, answer, authority, additional, edns, nsid
# SPDX-License-Identifier: GPL-3.0-or-later
[sendrecv]
# in seconds
timeout = 11
# number of queries to run simultaneously
jobs = 64
# in seconds (float); delay each query by a random time (uniformly distributed) between min and max; set max to 0 to disable
time_delay_min = 0
time_delay_max = 0
[servers]
names = kresd, bind, unbound
# symbolic names of DNS servers under test
# separate multiple values by ,
# each symbolic name in [servers] section refers to config section
# containing IP address and port of particular server
[kresd]
ip = 127.0.0.1
port = 5353
transport = udp
graph_color = #00a2e2
restart_script = ./ci/respdiff/restart-kresd.sh
[bind]
ip = 127.0.0.1
port = 53533
transport = udp
graph_color = #e2a000
restart_script = ./ci/respdiff/restart-bind.sh
[unbound]
ip = 127.0.0.1
port = 53535
transport = udp
graph_color = #218669
restart_script = ./ci/respdiff/restart-unbound.sh
[diff]
# symbolic name of server under test
# other servers are used as reference when comparing answers from the target
target = kresd
# fields and comparison methods used when comparing two DNS messages
criteria = opcode, rcode, flags, question, answertypes, answerrrsigs
# other supported criteria values: authority, additional, edns, nsid
[report]
# diffsum reports mismatches in field values in this order
# if particular message has multiple mismatches, it is counted only once into category with highest weight
field_weights = timeout, malformed, opcode, question, rcode, flags, answertypes, answerrrsigs, answer, authority, additional, edns, nsid
#!/bin/sh
# SPDX-License-Identifier: GPL-3.0-or-later
service named restart
#!/bin/sh
# SPDX-License-Identifier: GPL-3.0-or-later
exec > /dev/null
exec 2>&1
killall -w kresd
rm -f '*.mdb'
$PREFIX/sbin/kresd -n -q -c $(pwd)/ci/respdiff/kresd.config &>>kresd.log &
# wait until socket is receiving connections
sleep 1
#!/bin/sh
# SPDX-License-Identifier: GPL-3.0-or-later
service unbound restart
#!/usr/bin/env bash
# SPDX-License-Identifier: GPL-3.0-or-later
# $1 == udp/tcp/tls, it selects configuration file to use
# respdiff scripts must be present in /var/opt/respdiff
set -o errexit -o nounset -o xtrace
NDIFFREPRO=3
wget -qO- https://gitlab.nic.cz/knot/respdiff/snippets/238/raw?inline=false | head -n 5000 > /tmp/queries.txt
mkdir results
rm -rf respdiff.db
CONFIG="$(pwd)/ci/respdiff/respdiff-${1}.conf"
/var/opt/respdiff/qprep.py respdiff.db < /tmp/queries.txt
time /var/opt/respdiff/orchestrator.py respdiff.db -c "${CONFIG}"
time /var/opt/respdiff/msgdiff.py respdiff.db -c "${CONFIG}"
for i in $(seq $NDIFFREPRO); do
time /var/opt/respdiff/diffrepro.py -c "${CONFIG}" respdiff.db
done
/var/opt/respdiff/diffsum.py respdiff.db -c "${CONFIG}" > results/respdiff.txt
/var/opt/respdiff/histogram.py respdiff.db -c "${CONFIG}" -o results/histogram.svg
: minimize LMDB and log size so they can be effectively archived
mkdir results/respdiff.db
mdb_copy -c respdiff.db results/respdiff.db
xz -9 results/respdiff.db/data.mdb
xz kresd.log