Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
No results found
Show changes
Showing
with 68073 additions and 160 deletions
# Assumptions
Our main design goal is, that **the manager MUST NOT BE a required component.** Domains must be resolveable even in the absense of the manager. We want this, because of backwards compatibility with the way `kresd` has worked before. But another good reason is that `kresd` has been battle tested and is reasonably reliable. We can't say the same about manager as we do not have practical experiences with it at the time of writing.
This goal leads to usage of external service managers like systemd. Manager is therefore "just" a tool for configuring service managers. If we crash, the `kresd`'s will keep running.
# When can we expect errors
Majority of errors can meaningfully happen only when changing configuration which we do at different lifecycle stages of manager. We are changing configuration of the service managers on manager's startup and shutdown, and when change of configuration is requested (by a signal or HTTP request). Each of these situations can have a different error handling mechanisms to match user's expectations.
Additional to the errors mentioned above, we can sometimes detect, that future configuration changes will fail. Manager has a periodic watchdog monitoring health of the system and detecting failures before they actually happen.
To sum it up, errors can be raised:
* on configuration changes
* during startup
* in response to a config change request
* on shutdown
* proactively from our periodic watchdog
# How should we handle errors
## Errors on startup
**All errors should be fatal.** If something goes wrong, it's better to stop immediately before we make anything worse. Also, if we fail to start, the user will more likely notice.
## Error handling after config change requests
**All errors, that stem from the configuration change, should be reported and the manager should keep running.** Before the actual change though, watchdog should be manually invoked.
## Error handling during shutdown
**All errors should be fatal.** It does not make sense to try to correct any problems at that point.
## Error handling from watchdog
```
error_counter = 0
on error:
if error_counter > ERROR_COUNTER_THRESHOLD:
raise a fatal error
error_counter += 1
try to fix the situation
if unsucessful, fatal error
every ERROR_COUNTER_DECREASE_INTERVAL:
if error_counter > 0:
error_counter -= 1
```
Reasonable constants are probably:
```
ERROR_COUNTER_THRESHOLD = 2
ERROR_COUNTER_DECREASE_INTERVAL = 30min
```
include config.mk
include platform.mk
# Targets
all: info lib daemon modules
install: lib-install daemon-install modules-install etc-install
check: all tests
clean: contrib-clean lib-clean daemon-clean modules-clean tests-clean doc-clean
doc: doc-html
.PHONY: all install check clean doc info
# Options
ifdef COVERAGE
BUILD_CFLAGS += --coverage
endif
# Dependencies
$(eval $(call find_lib,libknot,2.1))
$(eval $(call find_lib,lmdb))
$(eval $(call find_lib,libzscanner,2.1))
$(eval $(call find_lib,libuv,1.0))
$(eval $(call find_lib,nettle))
$(eval $(call find_alt,lua,luajit))
$(eval $(call find_lib,cmocka))
$(eval $(call find_bin,doxygen))
$(eval $(call find_bin,sphinx-build))
$(eval $(call find_lib,libmemcached,1.0))
$(eval $(call find_lib,hiredis))
$(eval $(call find_lib,socket_wrapper))
$(eval $(call find_lib,libdnssec))
$(eval $(call find_lib,libsystemd,227))
$(eval $(call find_lib,gnutls))
# Find Go version and platform
GO_VERSION := $(shell $(GO) version 2>/dev/null)
ifeq ($(GO_VERSION),)
GO_VERSION := 0
else
GO_PLATFORM := $(word 2,$(subst /, ,$(word 4,$(GO_VERSION))))
GO_VERSION := $(subst .,,$(subst go,,$(word 3,$(GO_VERSION))))
endif
$(eval $(call find_ver,go,$(GO_VERSION),16))
# Check if Go is able to build shared libraries
ifeq ($(HAS_go),yes)
ifneq ($(GO_PLATFORM),$(filter $(GO_PLATFORM),amd64 386 arm arm64))
HAS_go := no
endif
else
$(eval $(call find_ver,go,$(GO_VERSION),15))
ifeq ($HAS_go,yes)
ifneq ($(GO_PLATFORM),$(filter $(GO_PLATFORM),arm amd64))
HAS_go := no
endif
endif
endif
# Work around luajit on OS X
ifeq ($(PLATFORM), Darwin)
ifneq (,$(findstring luajit, $(lua_LIBS)))
lua_LIBS += -pagezero_size 10000 -image_base 100000000
endif
endif
BUILD_CFLAGS += $(libknot_CFLAGS) $(libuv_CFLAGS) $(nettle_CFLAGS) $(cmocka_CFLAGS) $(lua_CFLAGS) $(libdnssec_CFLAGS) $(libsystemd_CFLAGS)
BUILD_CFLAGS += $(addprefix -I,$(wildcard contrib/ccan/*) contrib/murmurhash3)
# Check if it has libknot 2.3.0 and nettle to support DNS cookies
$(eval $(call find_alt,knot230,libknot,2.3))
ifeq ($(HAS_nettle)|$(HAS_knot230),yes|yes)
BUILD_CFLAGS += -DENABLE_COOKIES
ENABLE_COOKIES := yes
endif
# Overview
info:
$(info Target: Knot DNS Resolver $(MAJOR).$(MINOR).$(PATCH)-$(PLATFORM))
$(info Compiler: $(CC) $(BUILD_CFLAGS))
$(info )
$(info Variables)
$(info ---------)
$(info HARDENING: $(HARDENING))
$(info BUILDMODE: $(BUILDMODE))
$(info PREFIX: $(PREFIX))
$(info PREFIX: $(PREFIX))
$(info DESTDIR: $(DESTDIR))
$(info BINDIR: $(BINDIR))
$(info SBINDIR: $(SBINDIR))
$(info LIBDIR: $(LIBDIR))
$(info ETCDIR: $(ETCDIR))
$(info INCLUDEDIR: $(INCLUDEDIR))
$(info MODULEDIR: $(MODULEDIR))
$(info )
$(info Dependencies)
$(info ------------)
$(info [$(HAS_libknot)] libknot (lib))
$(info [$(HAS_lmdb)] lmdb (lib))
$(info [$(HAS_lua)] luajit (daemon))
$(info [$(HAS_libuv)] libuv (daemon))
$(info [$(HAS_gnutls)] libgnutls (daemon))
$(info [$(HAS_nettle)] nettle (DNS cookies))
$(info )
$(info Optional)
$(info --------)
$(info [$(HAS_doxygen)] doxygen (doc))
$(info [$(HAS_go)] go (modules/go, Go buildmode=c-shared support))
$(info [$(HAS_libmemcached)] libmemcached (modules/memcached))
$(info [$(HAS_hiredis)] hiredis (modules/redis))
$(info [$(HAS_cmocka)] cmocka (tests/unit))
$(info [$(HAS_libsystemd)] systemd (daemon))
$(info )
# Installation directories
$(DESTDIR)$(MODULEDIR):
$(INSTALL) -d $@
$(DESTDIR)$(ETCDIR):
$(INSTALL) -m 0750 -d $@
# Sub-targets
include contrib/contrib.mk
include lib/lib.mk
include daemon/daemon.mk
include modules/modules.mk
include tests/tests.mk
include doc/doc.mk
include etc/etc.mk
This diff is collapsed.
# Knot DNS Resolver
# Knot Resolver
[![Build Status](https://img.shields.io/travis/CZ-NIC/knot-resolver/master.svg)](https://travis-ci.org/CZ-NIC/knot-resolver)
[![Coverage Status](https://img.shields.io/coveralls/CZ-NIC/knot-resolver.svg)](https://coveralls.io/r/CZ-NIC/knot-resolver)
[![Coverity](https://img.shields.io/coverity/scan/3912.svg)](https://scan.coverity.com/projects/3912)
[![Documentation Status](https://readthedocs.org/projects/knot-resolver/badge/?version=latest)](https://readthedocs.org/projects/knot-resolver/?badge=latest)
[![Build Status](https://gitlab.nic.cz/knot/knot-resolver/badges/nightly/pipeline.svg?x)](https://gitlab.nic.cz/knot/knot-resolver/commits/nightly)
[![Coverage Status](https://gitlab.nic.cz/knot/knot-resolver/badges/nightly/coverage.svg?x)](https://www.knot-resolver.cz/documentation/latest)
[![Packaging status](https://repology.org/badge/tiny-repos/knot-resolver.svg)](https://repology.org/project/knot-resolver/versions)
Knot Resolver is a full caching DNS resolver implementation. The core architecture is tiny and efficient, written in C and [LuaJIT][luajit], providing a foundation and a state-machine-like API for extension modules. There are three built-in modules - *iterator*, *validator* and *cache* - which provide the main functionality of the resolver. A few other modules are automatically loaded by default to extend the resolver's functionality.
The Knot DNS Resolver is a caching full resolver implementation written in C and [LuaJIT][luajit], both a resolver library and a daemon. The core architecture is tiny and efficient, and provides a foundation and
a state-machine like API for extensions. There are three of those built-in - *iterator*, *cache*, *validator*, and most of the [rich features](https://knot-resolver.readthedocs.io/en/latest/modules.html) are written in LuaJIT, Go and C. Batteries are included, but optional.
Since Knot Resolver version 6, it also includes a so-called [manager][manager]. It is a new component written in [Python][python] that hides the complexity of older versions and makes it more user friendly. For example, new features include declarative configuration in YAML format and HTTP API for dynamic changes in the resolver and more.
The LuaJIT modules, support for DNS privacy and DNSSEC, and persistent cache with low memory footprint make it a great personal DNS resolver or a research tool to tap into DNS data. TL;DR it's the [OpenResty][openresty] of DNS.
Knot Resolver uses a [different scaling strategy][scaling] than the rest of the DNS resolvers - no threading, shared-nothing architecture (except MVCC cache which can be shared), which allows you to pin workers to available CPU cores and grow by self-replication. You can start and stop additional workers based on the contention without downtime, which is automated by the [manager][manager] by default.
Several cache backends (LMDB, Redis and Memcached), strong filtering rules, and auto-configuration with etcd make it a great large-scale resolver solution.
The LuaJIT modules, support for DNS privacy and DNSSEC, and persistent cache with low memory footprint make it a great personal DNS resolver or a research tool to tap into DNS data. Strong filtering rules, and auto-configuration with etcd make it a great large-scale resolver solution. It also has strong support for DNS over TCP, in particular TCP Fast-Open, query pipelining and deduplication, and response reordering.
The server adopts a [different scaling strategy][scaling] than the rest of the DNS recursors - no threading, shared-nothing architecture (except MVCC cache that may be shared) that allows you to pin instances on available CPU cores and grow by self-replication. You can start and stop additional nodes depending on the contention without downtime.
For more on using the resolver, see the [User Documentation][doc]. See the [Developer Documentation][doc-dev] for detailed architecture and development.
It also has strong support for DNS over TCP, notably TCP Fast-Open, query pipelining and deduplication, and response reordering.
## Packages
### Packages
The latest stable packages for various distributions are available in our
[upstream repository](https://pkg.labs.nic.cz/doc/?project=knot-resolver).
Follow the installation instructions to add this repository to your system.
Knot Resolver is packaged for Debian, Fedora, Ubuntu and [openSUSE](https://build.opensuse.org/package/show/server:dns/knot-resolver).
See [project page](https://www.knot-resolver.cz/pages/try.html) for more information.
Knot Resolver is also available from the following distributions' repositories:
### Building from sources
* [Fedora and Fedora EPEL](https://src.fedoraproject.org/rpms/knot-resolver)
* [Debian stable](https://packages.debian.org/stable/knot-resolver),
[Debian testing](https://packages.debian.org/testing/knot-resolver),
[Debian unstable](https://packages.debian.org/sid/knot-resolver)
* [Ubuntu](https://packages.ubuntu.com/jammy/knot-resolver)
* [Arch Linux](https://archlinux.org/packages/extra/x86_64/knot-resolver/)
* [Alpine Linux](https://pkgs.alpinelinux.org/packages?name=knot-resolver)
The Knot DNS Resolver [depends][depends] on the 2.1 version of the Knot DNS library, [LuaJIT][luajit] and [libuv][libuv].
See the [Building project][depends] documentation page for more information.
### Packaging
### Docker image
The project uses [`apkg`](https://gitlab.nic.cz/packaging/apkg) for packaging.
See [`distro/README.md`](distro/README.md) for packaging specific instructions.
This is simple and doesn't require any dependencies or system modifications, just run:
## Building from sources
Knot Resolver mainly depends on [KnotDNS][knot-dns] libraries, [LuaJIT][luajit], [libuv][libuv] and [Python][python].
See the [Building project][build] documentation page for more information.
## Running
By default, Knot Resolver comes with [systemd][systemd] integration and you just need to start its service. It requires no configuration changes to run a server on localhost.
```
$ docker run -it cznic/knot-resolver
# systemctl start knot-resolver
```
See the build page [hub.docker.com/r/cznic/knot-resolver](https://hub.docker.com/r/cznic/knot-resolver/) for more information and options.
See the documentation at [knot-resolver.cz/documentation/latest][doc] for more information.
### Running
## Running the Docker image
The project builds a resolver library in the `lib` directory, and a daemon in the `daemon` directory. It requires no configuration or parameters to run a server on localhost.
Running the Docker image is simple and doesn't require any dependencies or system modifications, just run:
```
$ kresd
$ docker run -Pit cznic/knot-resolver
```
See the documentation at [knot-resolver.readthedocs.io][doc] for more options.
The images are meant as an easy way to try the resolver, and they're not designed for production use.
[depends]: https://knot-resolver.readthedocs.io/en/latest/build.html
[doc]: https://knot-resolver.readthedocs.io/en/latest/index.html
[scaling]: https://knot-resolver.readthedocs.io/en/latest/daemon.html#scaling-out
[deckard]: https://gitlab.labs.nic.cz/knot/deckard
[luajit]: http://luajit.org/
[libuv]: https://github.com/libuv/libuv
[openresty]: https://openresty.org/
## Contacting us
### Contacting us
- [GitLab issues](https://gitlab.nic.cz/knot/knot-resolver/issues) (you may authenticate via GitHub)
- [mailing list](https://lists.nic.cz/postorius/lists/knot-resolver-announce.lists.nic.cz/)
- [![Join the chat at https://gitter.im/CZ-NIC/knot-resolver](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/CZ-NIC/knot-resolver?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Join the chat at https://gitter.im/CZ-NIC/knot-resolver](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/CZ-NIC/knot-resolver?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[build]: https://www.knot-resolver.cz/documentation/latest/dev/build.html
[doc]: https://www.knot-resolver.cz/documentation/latest/
[doc-dev]: https://www.knot-resolver.cz/documentation/latest/dev
[knot-dns]: https://www.knot-dns.cz/
[luajit]: https://luajit.org/
[libuv]: http://libuv.org
[python]: https://www.python.org/
[systemd]: https://systemd.io/
[scaling]: https://www.knot-resolver.cz/documentation/latest/config-multiple-workers.html
[manager]: https://www.knot-resolver.cz/documentation/latest/dev/architecture.html
/* Copyright (C) CZ.NIC, z.s.p.o. <knot-resolver@labs.nic.cz>
* SPDX-License-Identifier: GPL-3.0-or-later
*/
#include <math.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
#include <sys/time.h>
#include <unistd.h>
#include "contrib/ucw/lib.h"
#include "daemon/engine.h"
#include "lib/selection.h"
typedef lru_t(unsigned) lru_bench_t;
#define p_out(...) do { \
printf(__VA_ARGS__); \
(void)fflush(stdout); \
} while (0)
#define p_err(...) ((void)fprintf(stderr, __VA_ARGS__))
#ifndef LRU_RTT_SIZE
#define LRU_RTT_SIZE 65536 /**< NS RTT cache size */
#endif
static int die(const char *cause)
{
(void)fprintf(stderr, "%s: %s\n", cause, strerror(errno));
exit(1);
}
static void time_get(struct timeval *tv)
{
if (gettimeofday(tv, NULL))
die("gettimeofday");
}
static void time_print_diff(struct timeval *tv, size_t op_count)
{
struct timeval now;
time_get(&now);
now.tv_sec -= tv->tv_sec;
now.tv_usec -= tv->tv_usec;
if (now.tv_usec < 0) {
now.tv_sec -= 1;
now.tv_usec += 1000000;
}
size_t speed = round((double)(op_count) / 1000
/ (now.tv_sec + (double)(now.tv_usec)/1000000));
p_out("%ld.%06d", now.tv_sec, (int)now.tv_usec);
p_err(" s"); p_out(","); p_err("\t");
p_out("%zd", speed);
p_err(" kops/s"); p_out(","); p_err("\n");
}
/// initialize seed for random()
static int ssrandom(char *s)
{
if (*s == '-') { // initialize from time
struct timeval now;
time_get(&now);
srandom(now.tv_sec * 1000000 + now.tv_usec);
return 0;
}
// initialize from a string
size_t len = strlen(s);
if (len < 12)
return(-1);
unsigned seed = s[0] | s[1] << 8 | s[2] << 16 | s[3] << 24;
initstate(seed, s+4, len-4);
return 0;
}
struct key {
size_t len;
char *chars;
};
/// read lines from a file and reorder them randomly
static struct key * read_lines(const char *fname, size_t *count, char **pfree)
{
// read the file at once
int fd = open(fname, O_RDONLY);
if (fd < 0)
die("open");
struct stat st;
if (fstat(fd, &st) < 0)
die("stat");
size_t flen = (size_t)st.st_size;
char *fbuf = malloc(flen + 1);
*pfree = fbuf;
if (fbuf == NULL)
die("malloc");
if (read(fd, fbuf, flen) < 0)
die("read");
close(fd);
fbuf[flen] = '\0';
// get pointers to individual lines
size_t lines = 0;
for (size_t i = 0; i < flen; ++i)
if (fbuf[i] == '\n') {
fbuf[i] = 0;
++lines;
}
*count = lines;
size_t avg_len = (flen + 1) / lines - 1;
p_err("lines read: ");
p_out("%zu,", lines);
p_err("\taverage length ");
p_out("%zu,", avg_len);
struct key *result = calloc(lines, sizeof(struct key));
result[0].chars = fbuf;
for (size_t l = 0; l < lines; ++l) {
size_t i = 0;
while (result[l].chars[i])
++i;
result[l].len = i;
if (l + 1 < lines)
result[l + 1].chars = result[l].chars + i + 1;
}
//return result;
// reorder the lines randomly (via "random select-sort")
// note: this makes their order non-sequential *in memory*
if (RAND_MAX < lines)
die("RAND_MAX is too small");
for (size_t i = 0; i < lines - 1; ++i) { // swap i with random j >= i
size_t j = i + random() % (lines - i);
if (j != i) {
struct key tmp = result[i];
result[i] = result[j];
result[j] = tmp;
}
}
return result;
}
// compatibility layer for the older lru_* names
#ifndef lru_create
#define lru_get_new lru_set
#define lru_get_try lru_get
#endif
static void usage(const char *progname)
{
p_err("usage: %s <log_count> <input> <seed> [lru_size]\n", progname);
p_err("The seed must be at least 12 characters or \"-\".\n"
"Standard output contains csv-formatted lines.\n");
exit(1);
}
int main(int argc, char ** argv)
{
if (argc != 4 && argc != 5)
usage(argv[0]);
if (ssrandom(argv[3]) < 0)
usage(argv[0]);
p_out("\n");
size_t key_count;
char *data_to_free = NULL;
struct key *keys = read_lines(argv[2], &key_count, &data_to_free);
size_t run_count;
{
size_t run_log = atoi(argv[1]); // NOLINT: atoi is fine for this tool...
assert(run_log < 64);
run_count = 1ULL << run_log;
p_err("\ntest run length:\t2^");
p_out("%zd,", run_log);
}
struct timeval time;
const int lru_size = argc > 4 ? atoi(argv[4]) : LRU_RTT_SIZE; // NOLINT: ditto atoi
lru_bench_t *lru;
#ifdef lru_create
lru_create(&lru, lru_size, NULL, NULL);
#else
lru = malloc(lru_size(lru_bench_t, lru_size));
if (lru)
lru_init(lru, lru_size);
#endif
if (!lru)
die("malloc");
p_err("\nLRU capacity:\t");
p_out("%d,",
#ifdef lru_capacity
lru_capacity(lru) // report real capacity, if provided
#else
lru_size
#endif
);
size_t miss = 0;
p_err("\nload everything:\t");
time_get(&time);
for (size_t i = 0, ki = key_count - 1; i < run_count; ++i, --ki) {
unsigned *r = lru_get_new(lru, keys[ki].chars, keys[ki].len, NULL);
if (!r || *r == 0)
++miss;
if (r)
*r = 1;
if (unlikely(ki == 0))
ki = key_count;
}
time_print_diff(&time, run_count);
p_err("LRU misses [%%]:\t");
p_out("%zd,",(miss * 100 + 50) / run_count);
p_err("\n");
unsigned accum = 0; // compute something to make sure compiler can't remove code
p_err("search everything:\t");
time_get(&time);
for (size_t i = 0, ki = key_count - 1; i < run_count; ++i, --ki) {
unsigned *r = lru_get_try(lru, keys[ki].chars, keys[ki].len);
if (r)
accum += *r;
if (unlikely(ki == 0))
ki = key_count;
}
time_print_diff(&time, run_count);
p_err("ignore: %u\n", accum);
// free memory, at least with new LRU
#ifdef lru_create
lru_free(lru);
#endif
free(keys);
free(data_to_free);
return 0;
}
This diff is collapsed.
# bench
# SPDX-License-Identifier: GPL-3.0-or-later
bench_lru_src = files([
'bench_lru.c',
])
cc = meson.get_compiler('c')
m_dep = cc.find_library('m', required : false)
bench_lru = executable(
'bench_lru',
bench_lru_src,
dependencies: [
contrib_dep,
libkres_dep,
m_dep,
],
)
run_target(
'bench',
command: '../scripts/meson/bench.sh',
)
from typing import Any, Dict
from setuptools import Extension
def build(setup_kwargs: Dict[Any, Any]) -> None:
setup_kwargs.update(
{
"ext_modules": [
Extension(
name="knot_resolver.controller.supervisord.plugin.notify",
sources=["python/knot_resolver/controller/supervisord/plugin/notifymodule.c"],
),
]
}
)
DECKARD_COMMIT=$(git ls-tree HEAD:tests/integration/ | grep commit | grep deckard | cut -f1 | cut -f3 '-d ')
DECKARD_PATH="tests/integration/deckard"
pushd $DECKARD_PATH > /dev/null
if git merge-base --is-ancestor $DECKARD_COMMIT origin/master; then
echo "Deckard submodule commit is on in its master branch. All good in the hood."
exit 0
else
echo "Deckard submodule commit $DECKARD_COMMIT is not in Deckard's master branch."
echo "This WILL cause CI breakages so make sure your changes in Deckard are merged"
echo "or point the submodule to another commit."
exit 1
fi
#!/bin/sh
sed 's|</testcase>|</testcase>\n|g' -i "$@"
sed -e '/<failure \/>/,/<\/testcase>/s/<\(\/\?\)system-\(out\|err\)>/<\1failure>/g' \
-e 's/<failure \/>//g' \
-i "$@"
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-3.0-or-later
import json
import time
import sys
import requests
BRANCH_API_ENDPOINT = "https://api.github.com/repos/CZ-NIC/knot-resolver/actions/runs?branch={branch}" # noqa
TIMEOUT = 20*60 # 20 mins max
POLL_DELAY = 60
SYNC_TIMEOUT = 10*60
def exit(msg='', html_url='', code=1):
print(msg, file=sys.stderr)
print(html_url)
sys.exit(code)
end_time = time.time() + TIMEOUT
sync_timeout = time.time() + SYNC_TIMEOUT
while time.time() < end_time:
response = requests.get(
BRANCH_API_ENDPOINT.format(branch=sys.argv[1]),
headers={"Accept": "application/vnd.github.v3+json"})
if response.status_code == 404:
pass # not created yet?
elif response.status_code == 200:
data = json.loads(response.content.decode('utf-8'))
try:
for i in range(0, 1): # two runs ATM
run = data['workflow_runs'][i]
conclusion = run['conclusion']
html_url = run['html_url']
commit_sha = run['head_sha']
except (KeyError, IndexError):
time.sleep(POLL_DELAY)
continue
if commit_sha != sys.argv[2]:
if time.time() < sync_timeout:
time.sleep(POLL_DELAY)
continue
exit("Fetched invalid GH Action: commit mismatch. Re-run or push again?")
if conclusion is None:
pass
if conclusion == "success":
exit("SUCCESS!", html_url, code=0)
elif isinstance(conclusion, str):
# failure, neutral, cancelled, skipped, timed_out, or action_required
exit("GitHub Actions Conclusion: {}!".format(conclusion.upper()), html_url)
else:
exit("API Response Code: {}".format(response.status_code), code=2)
time.sleep(POLL_DELAY)
exit("Timed out!")
#!/bin/sh
grep '\<assert\>' -- $(git ls-files | grep '\.[hc]$' | grep -vE '^(contrib|bench|tests|daemon/ratelimiting.test)/|^lib/kru')
test $? -eq 1
default:
interruptible: true
stages:
- pkgbuild
- pkgtest
# pkgbuild {{{
.pkgbuild: &pkgbuild
stage: pkgbuild
tags:
- lxc
- amd64
before_script:
- git config --global user.name CI
- git config --global user.email ci@nic
needs: # https://gitlab.nic.cz/help/ci/yaml/README.md#artifact-downloads-to-child-pipelines
- pipeline: $PARENT_PIPELINE_ID
job: archive
artifacts:
when: always
expire_in: '1 day'
paths:
- pkg/
.apkgbuild: &apkgbuild # new jinja2 breaks docs (sphinx/breathe)
- pip3 install -U apkg 'jinja2<3.1'
- apkg build-dep -y
- apkg build
.pkgdebrepo: &pkgdebrepo
- apt-get update
- apt-get install -y curl gnupg2
- echo "deb http://download.opensuse.org/repositories/home:/CZ-NIC:/$OBS_REPO/$DISTROTEST_REPO/ /" > /etc/apt/sources.list.d/obs.list
- curl -fsSL "https://download.opensuse.org/repositories/home:CZ-NIC:$OBS_REPO/$DISTROTEST_REPO/Release.key" | gpg --dearmor > /etc/apt/trusted.gpg.d/obs.gpg
- apt-get update
.debpkgbuild: &debpkgbuild
- *pkgdebrepo
- apt-get install -y python3-pip devscripts
- *apkgbuild
centos-7:pkgbuild:
<<: *pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/centos-7
before_script:
- export LC_ALL=en_US.UTF-8
- git config --global user.name CI
- git config --global user.email ci@nic
script:
- yum install -y rpm-build python3-pip epel-release
- *apkgbuild
debian-10:pkgbuild:
<<: *pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/debian-10
variables:
OBS_REPO: knot-resolver-build
DISTROTEST_REPO: Debian_10
script:
- *debpkgbuild
debian-11:pkgbuild:
<<: *pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/debian-11
variables:
OBS_REPO: knot-resolver-build
DISTROTEST_REPO: Debian_11
script:
- *debpkgbuild
fedora-34:pkgbuild:
<<: *pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/fedora-34
script:
- dnf install -y rpm-build python3-pip
- *apkgbuild
fedora-35:pkgbuild:
<<: *pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/fedora-35
script:
- dnf install -y rpm-build python3-pip
- *apkgbuild
rocky-8:pkgbuild:
<<: *pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/rocky-8
script:
- dnf install -y rpm-build python3-pip epel-release dnf-plugins-core
- dnf config-manager --set-enabled powertools
- *apkgbuild
ubuntu-18.04:pkgbuild:
<<: *pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/ubuntu-18.04
variables:
OBS_REPO: knot-resolver-build
DISTROTEST_REPO: xUbuntu_18.04
script:
- *debpkgbuild
ubuntu-20.04:pkgbuild:
<<: *pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/ubuntu-20.04
variables:
OBS_REPO: knot-resolver-build
DISTROTEST_REPO: xUbuntu_20.04
script:
- *debpkgbuild
nixos-unstable:pkgbuild:
<<: *pkgbuild
# We do NOT use LXC, for now at least.
parallel:
matrix:
- PLATFORM: [ amd64, arm64 ]
tags:
- docker
- linux
- ${PLATFORM}
# https://github.com/NixOS/nix/issues/10648#issuecomment-2101993746
image: docker.io/nixos/nix:latest-${PLATFORM}
variables:
NIX_PATH: nixpkgs=https://github.com/nixos/nixpkgs/archive/nixos-unstable.tar.gz
before_script:
script:
- nix-build '<nixpkgs>' -QA apkg
# the image auto-detects as alpine distro
# If apkg version differs (too much), it will fail to reuse archive and fail.
- ./result/bin/apkg install -d nix
- kresd --version
# }}}
# pkgtest {{{
.pkgtest: &pkgtest
stage: pkgtest
tags:
- lxc
- amd64
.debpkgtest: &debpkgtest
- *pkgdebrepo
- apt-get install -y knot-dnsutils
- apt-get install -y $(find ./pkg/pkgs -name '*.deb' | grep -v module | grep -v debug | grep -v devel)
- systemctl start kresd@1
- kdig @127.0.0.1 nic.cz | grep -qi NOERROR
centos-7:pkgtest:
<<: *pkgtest
needs:
- centos-7:pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/centos-7
before_script:
- export LC_ALL=en_US.UTF-8
script:
- yum install -y epel-release
- yum install -y knot-utils findutils
- yum install -y $(find ./pkg/pkgs -name '*.rpm' | grep -v module | grep -v debug | grep -v devel)
- systemctl start kresd@1
- kdig @127.0.0.1 nic.cz | grep -qi NOERROR
debian-10:pkgtest:
<<: *pkgtest
needs:
- debian-10:pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/debian-10
variables:
OBS_REPO: knot-resolver-build
DISTROTEST_REPO: Debian_10
script:
- *debpkgtest
debian-11:pkgtest:
<<: *pkgtest
needs:
- debian-11:pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/debian-11
variables:
OBS_REPO: knot-resolver-build
DISTROTEST_REPO: Debian_11
script:
- *debpkgtest
fedora-34:pkgtest:
<<: *pkgtest
needs:
- fedora-34:pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/fedora-34
script:
- dnf install -y knot-utils findutils
- dnf install -y $(find ./pkg/pkgs -name '*.rpm' | grep -v module | grep -v debug | grep -v devel)
- systemctl start kresd@1
- kdig @127.0.0.1 nic.cz | grep -qi NOERROR
fedora-35:pkgtest:
<<: *pkgtest
needs:
- fedora-35:pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/fedora-35
script:
- dnf install -y knot-utils findutils
- dnf install -y $(find ./pkg/pkgs -name '*.rpm' | grep -v module | grep -v debug | grep -v devel)
- systemctl start kresd@1
- kdig @127.0.0.1 nic.cz | grep -qi NOERROR
rocky-8:pkgtest:
<<: *pkgtest
needs:
- rocky-8:pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/rocky-8
script:
- dnf install -y epel-release
- dnf install -y knot-utils findutils
- dnf install -y $(find ./pkg/pkgs -name '*.rpm' | grep -v module | grep -v debug | grep -v devel)
- systemctl start kresd@1
- kdig @127.0.0.1 nic.cz | grep -qi NOERROR
ubuntu-18.04:pkgtest:
<<: *pkgtest
needs:
- ubuntu-18.04:pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/ubuntu-18.04
variables:
OBS_REPO: knot-resolver-build
DISTROTEST_REPO: xUbuntu_18.04
script:
- *debpkgtest
ubuntu-20.04:pkgtest:
<<: *pkgtest
needs:
- ubuntu-20.04:pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/ubuntu-20.04
variables:
OBS_REPO: knot-resolver-build
DISTROTEST_REPO: xUbuntu_20.04
script:
- *debpkgtest
# }}}
-- SPDX-License-Identifier: GPL-3.0-or-later
-- Refer to manual: https://www.knot-resolver.cz/documentation/latest/
-- Listen on localhost and external interface
net.listen('127.0.0.1', 5353)
net.listen('127.0.0.1', 8853, { tls = true })
net.ipv6=false
-- Auto-maintain root TA
trust_anchors.add_file('.local/etc/knot-resolver/root.keys')
cache.size = 1024 * MB
-- Load Useful modules
modules = {
'workarounds < iterate',
'policy', -- Block queries to local zones/bad sites
'view', -- Views for certain clients
'hints > iterate', -- Allow loading /etc/hosts or custom root hints
'stats', -- Track internal statistics
}
-- avoid TC flags returned to respdiff
local _, up_bs = net.bufsize()
net.bufsize(4096, up_bs)
log_level('debug')
# SPDX-License-Identifier: GPL-3.0-or-later
[sendrecv]
# in seconds
timeout = 11
# number of queries to run simultaneously
jobs = 64
# in seconds (float); delay each query by a random time (uniformly distributed) between min and max; set max to 0 to disable
time_delay_min = 0
time_delay_max = 0
[servers]
names = kresd, bind, unbound
# symbolic names of DNS servers under test
# separate multiple values by ,
# each symbolic name in [servers] section refers to config section
# containing IP address and port of particular server
[kresd]
ip = 127.0.0.1
port = 5353
transport = tcp
graph_color = #00a2e2
restart_script = ./ci/respdiff/restart-kresd.sh
[bind]
ip = 127.0.0.1
port = 53533
transport = udp
graph_color = #e2a000
restart_script = ./ci/respdiff/restart-bind.sh
[unbound]
ip = 127.0.0.1
port = 53535
transport = udp
graph_color = #218669
restart_script = ./ci/respdiff/restart-unbound.sh
[diff]
# symbolic name of server under test
# other servers are used as reference when comparing answers from the target
target = kresd
# fields and comparison methods used when comparing two DNS messages
criteria = opcode, rcode, flags, question, answertypes, answerrrsigs
# other supported criteria values: authority, additional, edns, nsid
[report]
# diffsum reports mismatches in field values in this order
# if particular message has multiple mismatches, it is counted only once into category with highest weight
field_weights = timeout, malformed, opcode, question, rcode, flags, answertypes, answerrrsigs, answer, authority, additional, edns, nsid
# SPDX-License-Identifier: GPL-3.0-or-later
[sendrecv]
# in seconds
timeout = 11
# number of queries to run simultaneously
jobs = 64
# in seconds (float); delay each query by a random time (uniformly distributed) between min and max; set max to 0 to disable
time_delay_min = 0
time_delay_max = 0
[servers]
names = kresd, bind, unbound
# symbolic names of DNS servers under test
# separate multiple values by ,
# each symbolic name in [servers] section refers to config section
# containing IP address and port of particular server
[kresd]
ip = 127.0.0.1
port = 8853
transport = tls
graph_color = #00a2e2
restart_script = ./ci/respdiff/restart-kresd.sh
[bind]
ip = 127.0.0.1
port = 53533
transport = udp
graph_color = #e2a000
restart_script = ./ci/respdiff/restart-bind.sh
[unbound]
ip = 127.0.0.1
port = 53535
transport = udp
graph_color = #218669
restart_script = ./ci/respdiff/restart-unbound.sh
[diff]
# symbolic name of server under test
# other servers are used as reference when comparing answers from the target
target = kresd
# fields and comparison methods used when comparing two DNS messages
criteria = opcode, rcode, flags, question, answertypes, answerrrsigs
# other supported criteria values: authority, additional, edns, nsid
[report]
# diffsum reports mismatches in field values in this order
# if particular message has multiple mismatches, it is counted only once into category with highest weight
field_weights = timeout, malformed, opcode, question, rcode, flags, answertypes, answerrrsigs, answer, authority, additional, edns, nsid
# SPDX-License-Identifier: GPL-3.0-or-later
[sendrecv]
# in seconds
timeout = 11
# number of queries to run simultaneously
jobs = 64
# in seconds (float); delay each query by a random time (uniformly distributed) between min and max; set max to 0 to disable
time_delay_min = 0
time_delay_max = 0
[servers]
names = kresd, bind, unbound
# symbolic names of DNS servers under test
# separate multiple values by ,
# each symbolic name in [servers] section refers to config section
# containing IP address and port of particular server
[kresd]
ip = 127.0.0.1
port = 5353
transport = udp
graph_color = #00a2e2
restart_script = ./ci/respdiff/restart-kresd.sh
[bind]
ip = 127.0.0.1
port = 53533
transport = udp
graph_color = #e2a000
restart_script = ./ci/respdiff/restart-bind.sh
[unbound]
ip = 127.0.0.1
port = 53535
transport = udp
graph_color = #218669
restart_script = ./ci/respdiff/restart-unbound.sh
[diff]
# symbolic name of server under test
# other servers are used as reference when comparing answers from the target
target = kresd
# fields and comparison methods used when comparing two DNS messages
criteria = opcode, rcode, flags, question, answertypes, answerrrsigs
# other supported criteria values: authority, additional, edns, nsid
[report]
# diffsum reports mismatches in field values in this order
# if particular message has multiple mismatches, it is counted only once into category with highest weight
field_weights = timeout, malformed, opcode, question, rcode, flags, answertypes, answerrrsigs, answer, authority, additional, edns, nsid
#!/bin/sh
# SPDX-License-Identifier: GPL-3.0-or-later
service named restart
#!/bin/sh
# SPDX-License-Identifier: GPL-3.0-or-later
exec > /dev/null
exec 2>&1
killall -w kresd
rm -f '*.mdb'
$PREFIX/sbin/kresd -n -q -c $(pwd)/ci/respdiff/kresd.config &>>kresd.log &
# wait until socket is receiving connections
sleep 1
#!/bin/sh
# SPDX-License-Identifier: GPL-3.0-or-later
service unbound restart