Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
No results found
Show changes
Showing
with 1580 additions and 2433 deletions
SPDXVersion: SPDX-2.1
DataLicense: CC0-1.0
SPDXID: SPDXRef-DOCUMENT
DocumentName: ccan-json
DocumentNamespace: http://spdx.org/spdxdocs/spdx-v2.1-d9b4db4c-062f-4add-89b6-f603224f5a2c
PackageName: json
PackageDownloadLocation: git+https://github.com/aappleby/smhasher.git@73e075b203d9c76cd1e20d6c8907c2983d653f33#MurmurHash3.cpp
PackageOriginator: Person: Austin Appleby (aappleby@gmail.com)
PackageLicenseDeclared: CC0-1.0
/* $OpenBSD: siphash.c,v 1.6 2017/04/12 17:41:49 deraadt Exp $ */
/*-
* Copyright (c) 2013 Andre Oppermann <andre@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* SipHash is a family of PRFs SipHash-c-d where the integer parameters c and d
* are the number of compression rounds and the number of finalization rounds.
* A compression round is identical to a finalization round and this round
* function is called SipRound. Given a 128-bit key k and a (possibly empty)
* byte string m, SipHash-c-d returns a 64-bit value SipHash-c-d(k; m).
*
* Implemented from the paper "SipHash: a fast short-input PRF", 2012.09.18,
* by Jean-Philippe Aumasson and Daniel J. Bernstein,
* Permanent Document ID b9a943a805fbfc6fde808af9fc0ecdfa
* https://131002.net/siphash/siphash.pdf
* https://131002.net/siphash/
*/
#include <string.h>
#include "libknot/endian.h"
#include "contrib/openbsd/siphash.h"
static void SipHash_CRounds(SIPHASH_CTX *, int);
static void SipHash_Rounds(SIPHASH_CTX *, int);
void
SipHash_Init(SIPHASH_CTX *ctx, const SIPHASH_KEY *key)
{
uint64_t k0, k1;
k0 = le64toh(key->k0);
k1 = le64toh(key->k1);
ctx->v[0] = 0x736f6d6570736575ULL ^ k0;
ctx->v[1] = 0x646f72616e646f6dULL ^ k1;
ctx->v[2] = 0x6c7967656e657261ULL ^ k0;
ctx->v[3] = 0x7465646279746573ULL ^ k1;
memset(ctx->buf, 0, sizeof(ctx->buf));
ctx->bytes = 0;
}
void
SipHash_Update(SIPHASH_CTX *ctx, int rc, int rf, const void *src, size_t len)
{
const uint8_t *ptr = src;
size_t left, used;
if (len == 0)
return;
used = ctx->bytes % sizeof(ctx->buf);
ctx->bytes += len;
if (used > 0) {
left = sizeof(ctx->buf) - used;
if (len >= left) {
memcpy(&ctx->buf[used], ptr, left);
SipHash_CRounds(ctx, rc);
len -= left;
ptr += left;
} else {
memcpy(&ctx->buf[used], ptr, len);
return;
}
}
while (len >= sizeof(ctx->buf)) {
memcpy(ctx->buf, ptr, sizeof(ctx->buf));
SipHash_CRounds(ctx, rc);
len -= sizeof(ctx->buf);
ptr += sizeof(ctx->buf);
}
if (len > 0)
memcpy(&ctx->buf, ptr, len);
}
uint64_t
SipHash_End(SIPHASH_CTX *ctx, int rc, int rf)
{
uint64_t r;
size_t left, used;
used = ctx->bytes % sizeof(ctx->buf);
left = sizeof(ctx->buf) - used;
memset(&ctx->buf[used], 0, left - 1);
ctx->buf[7] = ctx->bytes;
SipHash_CRounds(ctx, rc);
ctx->v[2] ^= 0xff;
SipHash_Rounds(ctx, rf);
r = (ctx->v[0] ^ ctx->v[1]) ^ (ctx->v[2] ^ ctx->v[3]);
// we do not clean up the context
return htole64(r);
}
uint64_t
SipHash(const SIPHASH_KEY *key, int rc, int rf, const void *src, size_t len)
{
SIPHASH_CTX ctx;
SipHash_Init(&ctx, key);
SipHash_Update(&ctx, rc, rf, src, len);
return (SipHash_End(&ctx, rc, rf));
}
#define SIP_ROTL(x, b) ((x) << (b)) | ( (x) >> (64 - (b)))
static void
SipHash_Rounds(SIPHASH_CTX *ctx, int rounds)
{
while (rounds--) {
ctx->v[0] += ctx->v[1];
ctx->v[2] += ctx->v[3];
ctx->v[1] = SIP_ROTL(ctx->v[1], 13);
ctx->v[3] = SIP_ROTL(ctx->v[3], 16);
ctx->v[1] ^= ctx->v[0];
ctx->v[3] ^= ctx->v[2];
ctx->v[0] = SIP_ROTL(ctx->v[0], 32);
ctx->v[2] += ctx->v[1];
ctx->v[0] += ctx->v[3];
ctx->v[1] = SIP_ROTL(ctx->v[1], 17);
ctx->v[3] = SIP_ROTL(ctx->v[3], 21);
ctx->v[1] ^= ctx->v[2];
ctx->v[3] ^= ctx->v[0];
ctx->v[2] = SIP_ROTL(ctx->v[2], 32);
}
}
static void
SipHash_CRounds(SIPHASH_CTX *ctx, int rounds)
{
uint64_t tmp;
memcpy(&tmp, ctx->buf, sizeof(tmp));
uint64_t m = le64toh(tmp);
ctx->v[3] ^= m;
SipHash_Rounds(ctx, rounds);
ctx->v[0] ^= m;
}
/*-
* Copyright (c) 2013 Andre Oppermann <andre@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $OpenBSD: siphash.h,v 1.3 2015/02/20 11:51:03 tedu Exp $
*/
/*
* SipHash is a family of pseudorandom functions (a.k.a. keyed hash functions)
* optimized for speed on short messages returning a 64bit hash/digest value.
*
* The number of rounds is defined during the initialization:
* SipHash24_Init() for the fast and reasonable strong version
* SipHash48_Init() for the strong version (half as fast)
*
* struct SIPHASH_CTX ctx;
* SipHash24_Init(&ctx);
* SipHash_SetKey(&ctx, "16bytes long key");
* SipHash_Update(&ctx, pointer_to_string, length_of_string);
* SipHash_End(&ctx);
*/
#ifndef _SIPHASH_H_
#define _SIPHASH_H_
#include <stddef.h>
#include <stdint.h>
#define SIPHASH_BLOCK_LENGTH 8
#define SIPHASH_KEY_LENGTH 16
#define SIPHASH_DIGEST_LENGTH 8
typedef struct _SIPHASH_CTX {
uint64_t v[4];
uint8_t buf[SIPHASH_BLOCK_LENGTH];
uint32_t bytes;
} SIPHASH_CTX;
typedef struct {
uint64_t k0;
uint64_t k1;
} SIPHASH_KEY;
void SipHash_Init(SIPHASH_CTX *, const SIPHASH_KEY *);
void SipHash_Update(SIPHASH_CTX *, int, int, const void *, size_t);
uint64_t SipHash_End(SIPHASH_CTX *, int, int);
uint64_t SipHash(const SIPHASH_KEY *, int, int, const void *, size_t);
#define SipHash24_Init(_c, _k) SipHash_Init((_c), (_k))
#define SipHash24_Update(_c, _p, _l) SipHash_Update((_c), 2, 4, (_p), (_l))
#define SipHash24_End(_d) SipHash_End((_d), 2, 4)
#define SipHash24(_k, _p, _l) SipHash((_k), 2, 4, (_p), (_l))
#define SipHash48_Init(_c, _k) SipHash_Init((_c), (_k))
#define SipHash48_Update(_c, _p, _l) SipHash_Update((_c), 4, 8, (_p), (_l))
#define SipHash48_End(_d) SipHash_End((_d), 4, 8)
#define SipHash48(_k, _p, _l) SipHash((_k), 4, 8, (_p), (_l))
#endif /* _SIPHASH_H_ */
......@@ -2,6 +2,8 @@
* UCW Library -- Generic allocators
*
* (c) 2014 Martin Mares <mj@ucw.cz>
* SPDX-License-Identifier: LGPL-2.1-or-later
* Source: https://www.ucw.cz/libucw/
*/
#ifndef _UCW_ALLOC_H
......@@ -22,7 +24,7 @@ struct ucw_allocator {
/**
* [[std]]
* This allocator uses <<basics:xmalloc()>>, <<basics:xrealloc()>> and <<basics:xfree()>>. The memory
* it allocates is left unitialized.
* it allocates is left uninitialized.
**/
extern struct ucw_allocator ucw_allocator_std;
......
......@@ -4,8 +4,8 @@
* (c) 1997--2012 Martin Mares <mj@ucw.cz>
* (c) 2006 Robert Spalek <robert@ucw.cz>
*
* This software may be freely distributed and used according to the terms
* of the GNU Lesser General Public License.
* SPDX-License-Identifier: LGPL-2.1-or-later
* Source: https://www.ucw.cz/libucw/
*/
#ifndef _UCW_CONFIG_H
......@@ -38,7 +38,12 @@ typedef int32_t s32; /** Exactly 32 bits, signed **/
typedef uint64_t u64; /** Exactly 64 bits, unsigned **/
typedef int64_t s64; /** Exactly 64 bits, signed **/
#ifndef uint /* Redefining typedef is a C11 feature. */
typedef unsigned int uint; /** A better pronounceable alias for `unsigned int` **/
#define uint uint
#endif
typedef s64 timestamp_t; /** Milliseconds since an unknown epoch **/
// FIXME: This should be removed soon
......
......@@ -6,8 +6,8 @@
* (c) 2006 Robert Spalek <robert@ucw.cz>
* (c) 2007 Pavel Charvat <pchar@ucw.cz>
*
* This software may be freely distributed and used according to the terms
* of the GNU Lesser General Public License.
* SPDX-License-Identifier: LGPL-2.1-or-later
* Source: https://www.ucw.cz/libucw/
*/
#ifndef _UCW_LIB_H
......@@ -15,6 +15,7 @@
#include <stdarg.h>
#include <stdbool.h>
#include <stdlib.h>
#ifdef CONFIG_UCW_CLEAN_ABI
#define assert_failed ucw_assert_failed
......@@ -117,7 +118,7 @@
* === Basic logging functions (see <<log:,Logging>> and <ucw/log.h> for more)
***/
#define DBG(x,y...) do { } while(0)
#define DBG(x, ...) do { } while(0)
#define DBG_SPOT do { } while(0)
#define ASSERT(x)
......
SPDXVersion: SPDX-2.1
DataLicense: CC0-1.0
SPDXID: SPDXRef-DOCUMENT
DocumentName: libucw
DocumentNamespace: http://spdx.org/spdxdocs/spdx-v2.1-c3d39e26-6b71-46d4-88ea-e52750932ff3
PackageName: libucw
PackageDownloadLocation: git://git.ucw.cz/libucw.git@f1bde7104b04d5254d1d1d7dcc8de790a43a416f#ucw/
PackageOriginator: Organization: United Computer Wizards
PackageLicenseDeclared: LGPL-2.1-or-later
/*
* UCW Library -- Memory Pools (Formatting)
*
* (c) 2005 Martin Mares <mj@ucw.cz>
* (c) 2007 Pavel Charvat <pchar@ucw.cz>
* SPDX-License-Identifier: LGPL-2.1-or-later
* Source: https://www.ucw.cz/libucw/
*/
#include <ucw/lib.h>
#include <ucw/mempool.h>
#include <stdio.h>
#include <string.h>
/* FIXME: migrate to Knot DNS version of mempools. */
#pragma GCC diagnostic ignored "-Wpointer-arith"
static char *
mp_vprintf_at(struct mempool *mp, size_t ofs, const char *fmt, va_list args)
{
char *ret = mp_grow(mp, ofs + 1) + ofs;
va_list args2;
va_copy(args2, args);
int cnt = vsnprintf(ret, mp_avail(mp) - ofs, fmt, args2);
va_end(args2);
if (cnt < 0)
{
/* Our C library doesn't support C99 return value of vsnprintf, so we need to iterate */
do
{
ret = mp_expand(mp) + ofs;
va_copy(args2, args);
cnt = vsnprintf(ret, mp_avail(mp) - ofs, fmt, args2);
va_end(args2);
}
while (cnt < 0);
}
else if ((uint)cnt >= mp_avail(mp) - ofs)
{
ret = mp_grow(mp, ofs + cnt + 1) + ofs;
va_copy(args2, args);
vsnprintf(ret, cnt + 1, fmt, args2);
va_end(args2);
}
mp_end(mp, ret + cnt + 1);
return ret - ofs;
}
char *
mp_vprintf(struct mempool *mp, const char *fmt, va_list args)
{
mp_start(mp, 1);
return mp_vprintf_at(mp, 0, fmt, args);
}
char *
mp_printf(struct mempool *p, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
char *res = mp_vprintf(p, fmt, args);
va_end(args);
return res;
}
char *
mp_vprintf_append(struct mempool *mp, char *ptr, const char *fmt, va_list args)
{
size_t ofs = mp_open(mp, ptr);
ASSERT(ofs && !ptr[ofs - 1]);
return mp_vprintf_at(mp, ofs - 1, fmt, args);
}
char *
mp_printf_append(struct mempool *mp, char *ptr, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
char *res = mp_vprintf_append(mp, ptr, fmt, args);
va_end(args);
return res;
}
#ifdef TEST
int main(void)
{
struct mempool *mp = mp_new(64);
char *x = mp_printf(mp, "<Hello, %s!>", "World");
fputs(x, stdout);
x = mp_printf_append(mp, x, "<Appended>");
fputs(x, stdout);
x = mp_printf(mp, "<Hello, %50s!>\n", "World");
fputs(x, stdout);
return 0;
}
#endif
......@@ -4,8 +4,8 @@
* (c) 1997--2014 Martin Mares <mj@ucw.cz>
* (c) 2007--2015 Pavel Charvat <pchar@ucw.cz>
*
* This software may be freely distributed and used according to the terms
* of the GNU Lesser General Public License.
* SPDX-License-Identifier: LGPL-2.1-or-later
* Source: https://www.ucw.cz/libucw/
*/
#undef LOCAL_DEBUG
......@@ -18,6 +18,9 @@
#include <string.h>
#include <stdlib.h>
/* FIXME: migrate to Knot DNS version of mempools. */
#pragma GCC diagnostic ignored "-Wpointer-arith"
#define MP_CHUNK_TAIL ALIGN_TO(sizeof(struct mempool_chunk), CPU_STRUCT_ALIGN)
#define MP_SIZE_MAX (SIZE_MAX - MP_CHUNK_TAIL - CPU_PAGE_SIZE)
......
......@@ -3,14 +3,14 @@
*
* (c) 1997--2015 Martin Mares <mj@ucw.cz>
* (c) 2007 Pavel Charvat <pchar@ucw.cz>
*
* This software may be freely distributed and used according to the terms
* of the GNU Lesser General Public License.
* SPDX-License-Identifier: LGPL-2.1-or-later
* Source: https://www.ucw.cz/libucw/
*/
#ifndef _UCW_POOLS_H
#define _UCW_POOLS_H
#include "lib/defines.h"
#include <ucw/alloc.h>
#include <ucw/config.h>
#include <ucw/lib.h>
......@@ -100,6 +100,7 @@ struct mempool_stats { /** Mempool statistics. See @mp_stats(). **/
*
* Memory pools can be treated as <<trans:respools,resources>>, see <<trans:res_mempool()>>.
**/
KR_EXPORT
void mp_init(struct mempool *pool, size_t chunk_size);
/**
......@@ -110,6 +111,7 @@ void mp_init(struct mempool *pool, size_t chunk_size);
*
* Memory pools can be treated as <<trans:respools,resources>>, see <<trans:res_mempool()>>.
**/
KR_EXPORT
struct mempool *mp_new(size_t chunk_size);
/**
......@@ -117,6 +119,7 @@ struct mempool *mp_new(size_t chunk_size);
* Frees all the memory allocated by this mempool and,
* if created by @mp_new(), the @pool itself.
**/
KR_EXPORT
void mp_delete(struct mempool *pool);
/**
......@@ -125,6 +128,7 @@ void mp_delete(struct mempool *pool);
* further allocation requests. Leaves the @pool alive,
* even if it was created with @mp_new().
**/
KR_EXPORT
void mp_flush(struct mempool *pool);
/**
......@@ -166,6 +170,7 @@ void *mp_alloc_internal(struct mempool *pool, size_t size) LIKE_MALLOC;
* `CPU_STRUCT_ALIGN` bytes and this condition remains true also
* after future reallocations.
**/
KR_EXPORT
void *mp_alloc(struct mempool *pool, size_t size);
/**
......@@ -225,7 +230,7 @@ static inline struct ucw_allocator *mp_get_allocator(struct mempool *mp)
* you can grow it incrementally to needed size. You can grow only
* one buffer at a time on a given mempool.
*
* Similar functionality is provided by <<growbuf:,growing buffes>> module.
* Similar functionality is provided by <<growbuf:,growing buffers>> module.
***/
/* For internal use only, do not call directly */
......@@ -531,6 +536,7 @@ char *mp_str_from_mem(struct mempool *p, const void *mem, size_t len) LIKE_MALLO
/**
* printf() into a in-memory string, allocated on the memory pool.
**/
KR_EXPORT
char *mp_printf(struct mempool *mp, const char *fmt, ...) FORMAT_CHECK(printf,2,3) LIKE_MALLOC;
/**
* Like @mp_printf(), but uses `va_list` for parameters.
......@@ -549,6 +555,7 @@ char *mp_vprintf(struct mempool *mp, const char *fmt, va_list args) LIKE_MALLOC;
* not called on an opened growing buffer. The old name will be preserved for backward
* compatibility for the time being.
**/
KR_EXPORT
char *mp_printf_append(struct mempool *mp, char *ptr, const char *fmt, ...) FORMAT_CHECK(printf,3,4);
#define mp_append_printf mp_printf_append
/**
......
/* Copyright (C) 2011 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
/*!
* \file
*
* \brief Wire integer operations.
*
* \addtogroup contrib
* @{
*/
#pragma once
#include <stdint.h>
#include <string.h>
#if defined(__linux__)
# include <endian.h>
# ifndef be64toh
# include <arpa/inet.h>
# include <byteswap.h>
# if BYTE_ORDER == LITTLE_ENDIAN
# define be16toh(x) ntohs(x)
# define be32toh(x) ntohl(x)
# define be64toh(x) bswap_64 (x)
# define le16toh(x) (x)
# define le32toh(x) (x)
# define le64toh(x) (x)
# else
# define be16toh(x) (x)
# define be32toh(x) (x)
# define be64toh(x) (x)
# define le16toh(x) ntohs(x)
# define le32toh(x) ntohl(x)
# define le64toh(x) bswap_64 (x)
# endif
# endif
#elif defined(__FreeBSD__) || defined(__NetBSD__)
# include <sys/endian.h>
#elif defined(__OpenBSD__)
# include <endian.h>
#elif defined(__APPLE__)
# include <libkern/OSByteOrder.h>
# define be16toh(x) OSSwapBigToHostInt16(x)
# define be32toh(x) OSSwapBigToHostInt32(x)
# define be64toh(x) OSSwapBigToHostInt64(x)
# define htobe16(x) OSSwapHostToBigInt16(x)
# define htobe32(x) OSSwapHostToBigInt32(x)
# define htobe64(x) OSSwapHostToBigInt64(x)
# define le16toh(x) OSSwapLittleToHostInt16(x)
# define le32toh(x) OSSwapLittleToHostInt32(x)
# define le64toh(x) OSSwapLittleToHostInt64(x)
# define htole16(x) OSSwapHostToLittleInt16(x)
# define htole32(x) OSSwapHostToLittleInt32(x)
# define htole64(x) OSSwapHostToLittleInt64(x)
#endif
/*!
* \brief Reads 2 bytes from the wireformat data.
*
* \param pos Data to read the 2 bytes from.
*
* \return The 2 bytes read, in host byte order.
*/
inline static uint16_t wire_read_u16(const uint8_t *pos)
{
return be16toh(*(uint16_t *)pos);
}
/*!
* \brief Reads 4 bytes from the wireformat data.
*
* \param pos Data to read the 4 bytes from.
*
* \return The 4 bytes read, in host byte order.
*/
inline static uint32_t wire_read_u32(const uint8_t *pos)
{
return be32toh(*(uint32_t *)pos);
}
/*!
* \brief Reads 6 bytes from the wireformat data.
*
* \param pos Data to read the 6 bytes from.
*
* \return The 6 bytes read, in host byte order.
*/
inline static uint64_t wire_read_u48(const uint8_t *pos)
{
uint64_t input = 0;
memcpy((uint8_t *)&input + 1, pos, 6);
return be64toh(input) >> 8;
}
/*!
* \brief Read 8 bytes from the wireformat data.
*
* \param pos Data to read the 8 bytes from.
*
* \return The 8 bytes read, in host byte order.
*/
inline static uint64_t wire_read_u64(const uint8_t *pos)
{
return be64toh(*(uint64_t *)pos);
}
/*!
* \brief Writes 2 bytes in wireformat.
*
* The data are stored in network byte order (big endian).
*
* \param pos Position where to put the 2 bytes.
* \param data Data to put.
*/
inline static void wire_write_u16(uint8_t *pos, uint16_t data)
{
*(uint16_t *)pos = htobe16(data);
}
/*!
* \brief Writes 4 bytes in wireformat.
*
* The data are stored in network byte order (big endian).
*
* \param pos Position where to put the 4 bytes.
* \param data Data to put.
*/
inline static void wire_write_u32(uint8_t *pos, uint32_t data)
{
*(uint32_t *)pos = htobe32(data);
}
/*!
* \brief Writes 6 bytes in wireformat.
*
* The data are stored in network byte order (big endian).
*
* \param pos Position where to put the 4 bytes.
* \param data Data to put.
*/
inline static void wire_write_u48(uint8_t *pos, uint64_t data)
{
uint64_t swapped = htobe64(data << 8);
memcpy(pos, (uint8_t *)&swapped + 1, 6);
}
/*!
* \brief Writes 8 bytes in wireformat.
*
* The data are stored in network byte order (big endian).
*
* \param pos Position where to put the 8 bytes.
* \param data Data to put.
*/
inline static void wire_write_u64(uint8_t *pos, uint64_t data)
{
*(uint64_t *)pos = htobe64(data);
}
/*! @} */
************************
Knot DNS Resolver daemon
************************
The server is in the `daemon` directory, it works out of the box without any configuration.
.. code-block:: bash
$ kresd -h # Get help
$ kresd -a ::1
Enabling DNSSEC
===============
The resolver supports DNSSEC including :rfc:`5011` automated DNSSEC TA updates and :rfc:`7646` negative trust anchors.
To enable it, you need to provide trusted root keys. Bootstrapping of the keys is automated, and kresd fetches root trust anchors set `over a secure channel <http://jpmens.net/2015/01/21/opendnssec-rfc-5011-bind-and-unbound/>`_ from IANA. From there, it can perform :rfc:`5011` automatic updates for you.
.. note:: Automatic bootstrap requires luasocket_ and luasec_ installed.
.. code-block:: bash
$ kresd -k root.keys # File for root keys
[ ta ] bootstrapped root anchor "19036 8 2 49AAC11D7B6F6446702E54A1607371607A1A41855200FD2CE1CDDE32F24E8FB5"
[ ta ] warning: you SHOULD check the key manually, see: https://data.iana.org/root-anchors/draft-icann-dnssec-trust-anchor.html#sigs
[ ta ] key: 19036 state: Valid
[ ta ] next refresh: 86400000
Alternatively, you can set it in configuration file with ``trust_anchors.file = 'root.keys'``. If the file doesn't exist, it will be automatically populated with root keys validated using root anchors retrieved over HTTPS.
This is equivalent to `using unbound-anchor <https://www.unbound.net/documentation/howto_anchor.html>`_:
.. code-block:: bash
$ unbound-anchor -a "root.keys" || echo "warning: check the key at this point"
$ echo "auto-trust-anchor-file: \"root.keys\"" >> unbound.conf
$ unbound -c unbound.conf
.. warning:: Bootstrapping of the root trust anchors is automatic, you are however **encouraged to check** the key over **secure channel**, as specified in `DNSSEC Trust Anchor Publication for the Root Zone <https://data.iana.org/root-anchors/draft-icann-dnssec-trust-anchor.html#sigs>`_. This is a critical step where the whole infrastructure may be compromised, you will be warned in the server log.
Manually providing root anchors
-------------------------------
The root anchors bootstrap may fail for various reasons, in this case you need to provide IANA or alternative root anchors. The format of the keyfile is the same as for Unbound or BIND and contains DS/DNSKEY records.
1. Check the current TA published on `IANA website <https://data.iana.org/root-anchors/root-anchors.xml>`_
2. Fetch current keys (DNSKEY), verify digests
3. Deploy them
.. code-block:: bash
$ kdig DNSKEY . @k.root-servers.net +noall +answer | grep "DNSKEY[[:space:]]257" > root.keys
$ ldns-key2ds -n root.keys # Only print to stdout
... verify that digest matches TA published by IANA ...
$ kresd -k root.keys
You've just enabled DNSSEC!
CLI interface
=============
The daemon features a CLI interface, type ``help()`` to see the list of available commands.
.. code-block:: bash
$ kresd /var/run/knot-resolver
[system] started in interactive mode, type 'help()'
> cache.count()
53
.. role:: lua(code)
:language: lua
Verbose output
--------------
If the debug logging is compiled in, you can turn on verbose tracing of server operation with the ``-v`` option.
You can also toggle it on runtime with ``verbose(true|false)`` command.
.. code-block:: bash
$ kresd -v
Scaling out
===========
The server can clone itself into multiple processes upon startup, this enables you to scale it on multiple cores.
Multiple processes can serve different addresses, but still share the same working directory and cache.
You can add start and stop processes on runtime based on the load.
.. code-block:: bash
$ kresd -f 4 rundir > kresd.log &
$ kresd -f 2 rundir > kresd_2.log & # Extra instances
$ pstree $$ -g
bash(3533)─┬─kresd(19212)─┬─kresd(19212)
│ ├─kresd(19212)
│ └─kresd(19212)
├─kresd(19399)───kresd(19399)
└─pstree(19411)
$ kill 19399 # Kill group 2, former will continue to run
bash(3533)─┬─kresd(19212)─┬─kresd(19212)
│ ├─kresd(19212)
│ └─kresd(19212)
└─pstree(19460)
.. _daemon-reuseport:
.. note:: On recent Linux supporting ``SO_REUSEPORT`` (since 3.9, backported to RHEL 2.6.32) it is also able to bind to the same endpoint and distribute the load between the forked processes. If your OS doesn't support it, you can :ref:`use supervisor <daemon-supervised>` that is going to bind to sockets before starting multiple processes.
Notice the absence of an interactive CLI. You can attach to the the consoles for each process, they are in ``rundir/tty/PID``.
.. code-block:: bash
$ nc -U rundir/tty/3008 # or socat - UNIX-CONNECT:rundir/tty/3008
> cache.count()
53
The *direct output* of the CLI command is captured and sent over the socket, while also printed to the daemon standard outputs (for accountability). This gives you an immediate response on the outcome of your command.
Error or debug logs aren't captured, but you can find them in the daemon standard outputs.
This is also a way to enumerate and test running instances, the list of files in ``tty`` corresponds to the list
of running processes, and you can test the process for liveliness by connecting to the UNIX socket.
.. _daemon-supervised:
Running supervised
==================
Knot Resolver can run under a supervisor to allow for graceful restarts, watchdog process and socket activation. This way the supervisor binds to sockets and lends them to the resolver daemon. If the resolver terminates or is killed, the sockets remain open and no queries are dropped.
The watchdog process must notify kresd about active file descriptors, and kresd will automatically determine the socket type and bound address, thus it will appear as any other address. There's a tiny supervisor script for convenience, but you should have a look at `real process managers`_.
.. code-block:: bash
$ python scripts/supervisor.py ./daemon/kresd -a 127.0.0.1
$ [system] interactive mode
> quit()
> [2016-03-28 16:06:36.795879] process finished, pid = 99342, status = 0, uptime = 0:00:01.720612
[system] interactive mode
>
The daemon also supports `systemd socket activation`_, it is automatically detected and requires no configuration on users's side.
Configuration
=============
.. contents::
:depth: 2
:local:
In it's simplest form it requires just a working directory in which it can set up persistent files like
cache and the process state. If you don't provide the working directory by parameter, it is going to make itself
comfortable in the current working directory.
.. code-block:: sh
$ kresd /var/run/kresd
And you're good to go for most use cases! If you want to use modules or configure daemon behavior, read on.
There are several choices on how you can configure the daemon, a RPC interface, a CLI, and a configuration file.
Fortunately all share common syntax and are transparent to each other.
Configuration example
---------------------
.. code-block:: lua
-- interfaces
net = { '127.0.0.1', '::1' }
-- load some modules
modules = { 'policy' }
-- 10MB cache
cache.size = 10*MB
.. tip:: There are more configuration examples in `etc/` directory for personal, ISP, company internal and resolver cluster use cases.
Configuration syntax
--------------------
The configuration is kept in the ``config`` file in the daemon working directory, and it's going to get loaded automatically.
If there isn't one, the daemon is going to start with sane defaults, listening on `localhost`.
The syntax for options is like follows: ``group.option = value`` or ``group.action(parameters)``.
You can also comment using a ``--`` prefix.
A simple example would be to load static hints.
.. code-block:: lua
modules = {
'hints' -- no configuration
}
If the module accepts configuration, you can call the ``module.config({...})`` or provide options table.
The syntax for table is ``{ key1 = value, key2 = value }``, and it represents the unpacked `JSON-encoded`_ string, that
the modules use as the :ref:`input configuration <mod-properties>`.
.. code-block:: lua
modules = {
hints = '/etc/hosts'
}
.. warning:: Modules specified including their configuration may not load exactly in the same order as specified.
Modules are inherently ordered by their declaration. Some modules are built-in, so it would be normally impossible to place for example *hints* before *rrcache*. You can enforce specific order by precedence operators **>** and **<**.
.. code-block:: lua
modules = {
'hints > iterate', -- Hints AFTER iterate
'policy > hints', -- Policy AFTER hints
'view < rrcache' -- View BEFORE rrcache
}
modules.list() -- Check module call order
This is useful if you're writing a module with a layer, that evaluates an answer before writing it into cache for example.
.. tip:: The configuration and CLI syntax is Lua language, with which you may already be familiar with.
If not, you can read the `Learn Lua in 15 minutes`_ for a syntax overview. Spending just a few minutes
will allow you to break from static configuration, write more efficient configuration with iteration, and
leverage events and hooks. Lua is heavily used for scripting in applications ranging from embedded to game engines,
but in DNS world notably in `PowerDNS Recursor`_. Knot DNS Resolver does not simply use Lua modules, but it is
the heart of the daemon for everything from configuration, internal events and user interaction.
Dynamic configuration
^^^^^^^^^^^^^^^^^^^^^
Knowing that the the configuration is a Lua in disguise enables you to write dynamic rules. It also helps you to avoid repetitive templating that is unavoidable with static configuration.
.. code-block:: lua
if hostname() == 'hidden' then
net.listen(net.eth0, 5353)
else
net = { '127.0.0.1', net.eth1.addr[1] }
end
Another example would show how it is possible to bind to all interfaces, using iteration.
.. code-block:: lua
for name, addr_list in pairs(net.interfaces()) do
net.listen(addr_list)
end
You can also use third-party packages (available for example through LuaRocks_) as on this example
to download cache from parent, to avoid cold-cache start.
.. code-block:: lua
local http = require('socket.http')
local ltn12 = require('ltn12')
if cache.count() == 0 then
-- download cache from parent
http.request {
url = 'http://parent/cache.mdb',
sink = ltn12.sink.file(io.open('cache.mdb', 'w'))
}
-- reopen cache with 100M limit
cache.size = 100*MB
end
Events and services
^^^^^^^^^^^^^^^^^^^
The Lua supports a concept called closures_, this is extremely useful for scripting actions upon various events,
say for example - prune the cache within minute after loading, publish statistics each 5 minutes and so on.
Here's an example of an anonymous function with :func:`event.recurrent()`:
.. code-block:: lua
-- every 5 minutes
event.recurrent(5 * minute, function()
cache.prune()
end)
Note that each scheduled event is identified by a number valid for the duration of the event,
you may cancel it at any time. You can do this with anonymous functions, if you accept the event
as a parameter, but it's not very useful as you don't have any *non-global* way to keep persistent variables.
.. code-block:: lua
-- make a closure, encapsulating counter
function pruner()
local i = 0
-- pruning function
return function(e)
cache.prune()
-- cancel event on 5th attempt
i = i + 1
if i == 5 then
event.cancel(e)
fi
end
end
-- make recurrent event that will cancel after 5 times
event.recurrent(5 * minute, pruner())
Another type of actionable event is activity on a file descriptor. This allows you to embed other
event loops or monitor open files and then fire a callback when an activity is detected.
This allows you to build persistent services like HTTP servers or monitoring probes that cooperate
well with the daemon internal operations.
For example a simple web server that doesn't block:
.. code-block:: lua
local server, headers = require 'http.server', require 'http.headers'
local cqueues = require 'cqueues'
-- Start socket server
local s = server.listen { host = 'localhost', port = 8080 }
assert(s:listen())
-- Compose per-request coroutine
local cq = cqueues.new()
cq:wrap(function()
s:run(function(stream)
-- Create response headers
local headers = headers.new()
headers:append(':status', '200')
headers:append('connection', 'close')
-- Send response and close connection
assert(stream:write_headers(headers, false))
assert(stream:write_chunk('OK', true))
stream:shutdown()
stream.connection:shutdown()
end)
s:close()
end)
-- Hook to socket watcher
event.socket(cq:pollfd(), function (ev, status, events)
cq:step(0)
end)
* File watchers
.. note:: Work in progress, come back later!
.. _closures: https://www.lua.org/pil/6.1.html
Configuration reference
-----------------------
This is a reference for variables and functions available to both configuration file and CLI.
.. contents::
:depth: 1
:local:
Environment
^^^^^^^^^^^
.. envvar:: env (table)
Return environment variable.
.. code-block:: lua
env.USER -- equivalent to $USER in shell
.. function:: hostname()
:return: Machine hostname.
.. function:: verbose(true | false)
:return: Toggle verbose logging.
.. function:: mode('strict' | 'normal' | 'permissive')
:return: Change resolver strictness checking level.
By default, resolver runs in *normal* mode. There are possibly many small adjustments
hidden behind the mode settings, but the main idea is that in *permissive* mode, the resolver
tries to resolve a name with as few lookups as possible, while in *strict* mode it spends much
more effort resolving and checking referral path. However, if majority of the traffic is covered
by DNSSEC, some of the strict checking actions are counter-productive.
.. csv-table::
:header: "Action", "Modes"
"Use mandatory glue", "strict, normal, permissive"
"Use in-bailiwick glue", "normal, permissive"
"Use any glue records", "permissive"
.. function:: user(name, [group])
:param string name: user name
:param string group: group name (optional)
:return: boolean
Drop privileges and run as given user (and group, if provided).
.. tip:: Note that you should bind to required network addresses before changing user. At the same time, you should open the cache **AFTER** you change the user (so it remains accessible). A good practice is to divide configuration in two parts:
.. code-block:: lua
-- privileged
net = { '127.0.0.1', '::1' }
-- unprivileged
cache.size = 100*MB
trust_anchors.file = 'root.key'
Example output:
.. code-block:: lua
> user('baduser')
invalid user name
> user('kresd', 'netgrp')
true
> user('root')
Operation not permitted
.. function:: resolve(qname, qtype[, qclass = kres.class.IN, options = 0, callback = nil])
:param string qname: Query name (e.g. 'com.')
:param number qtype: Query type (e.g. ``kres.type.NS``)
:param number qclass: Query class *(optional)* (e.g. ``kres.class.IN``)
:param number options: Resolution options (see query flags)
:param function callback: Callback to be executed when resolution completes (e.g. `function cb (pkt, req) end`). The callback gets a packet containing the final answer and doesn't have to return anything.
:return: boolean
Example:
.. code-block:: lua
-- Send query for root DNSKEY, ignore cache
resolve('.', kres.type.DNSKEY, kres.class.IN, kres.query.NO_CACHE)
-- Query for AAAA record
resolve('example.com', kres.type.AAAA, kres.class.IN, 0,
function (answer, req)
-- Check answer RCODE
local pkt = kres.pkt_t(answer)
if pkt:rcode() == kres.rcode.NOERROR then
-- Print matching records
local records = pkt:section(kres.section.ANSWER)
for i = 1, #records do
if rr.type == kres.type.AAAA then
print ('record:', kres.rr2str(rr))
end
end
else
print ('rcode: ', pkt:rcode())
end
end)
Network configuration
^^^^^^^^^^^^^^^^^^^^^
For when listening on ``localhost`` just doesn't cut it.
.. tip:: Use declarative interface for network.
.. code-block:: lua
net = { '127.0.0.1', net.eth0, net.eth1.addr[1] }
net.ipv4 = false
.. envvar:: net.ipv6 = true|false
:return: boolean (default: true)
Enable/disable using IPv6 for recursion.
.. envvar:: net.ipv4 = true|false
:return: boolean (default: true)
Enable/disable using IPv4 for recursion.
.. function:: net.listen(address, [port = 53, flags = {tls = false}])
:return: boolean
Listen on address, port and flags are optional.
.. function:: net.listen({address1, ...}, [port = 53, flags = {tls = false}])
:return: boolean
Listen on list of addresses.
.. function:: net.listen(interface, [port = 53, flags = {tls = false}])
:return: boolean
Listen on all addresses belonging to an interface.
Example:
.. code-block:: lua
net.listen(net.eth0) -- listen on eth0
.. function:: net.close(address, [port = 53])
:return: boolean
Close opened address/port pair, noop if not listening.
.. function:: net.list()
:return: Table of bound interfaces.
Example output:
.. code-block:: lua
[127.0.0.1] => {
[port] => 53
[tcp] => true
[udp] => true
}
.. function:: net.interfaces()
:return: Table of available interfaces and their addresses.
Example output:
.. code-block:: lua
[lo0] => {
[addr] => {
[1] => ::1
[2] => 127.0.0.1
}
[mac] => 00:00:00:00:00:00
}
[eth0] => {
[addr] => {
[1] => 192.168.0.1
}
[mac] => de:ad:be:ef:aa:bb
}
.. tip:: You can use ``net.<iface>`` as a shortcut for specific interface, e.g. ``net.eth0``
.. function:: net.bufsize([udp_bufsize])
Get/set maximum EDNS payload available. Default is 4096.
You cannot set less than 512 (512 is DNS packet size without EDNS, 1220 is minimum size for DNSSEC) or more than 65535 octets.
Example output:
.. code-block:: lua
> net.bufsize 4096
> net.bufsize()
4096
.. function:: net.tcp_pipeline([len])
Get/set per-client TCP pipeline limit (number of outstanding queries that a single client connection can make in parallel). Default is 50.
.. code-block:: lua
> net.tcp_pipeline()
50
> net.tcp_pipeline(100)
.. function:: net.tls([cert_path], [key_path])
Get/set path to a server TLS certificate and private key for DNS/TLS.
Example output:
.. code-block:: lua
> net.tls_cert("/etc/kresd/server-cert.pem", "/etc/kresd/server-key.pem")
> net.tls_cert()
("/etc/kresd/server-cert.pem", "/etc/kresd/server-key.pem")
> net.listen("::", 853)
> net.listen("::", 443, {tls = true})
Trust anchors and DNSSEC
^^^^^^^^^^^^^^^^^^^^^^^^
.. envvar:: trust_anchors.hold_down_time = 30 * day
:return: int (default: 30 * day)
Modify RFC5011 hold-down timer to given value. Example: ``30 * sec``
.. envvar:: trust_anchors.refresh_time = nil
:return: int (default: nil)
Modify RFC5011 refresh timer to given value (not set by default), this will force trust anchors
to be updated every N seconds periodically instead of relying on RFC5011 logic and TTLs.
Example: ``10 * sec``
.. envvar:: trust_anchors.keep_removed = 0
:return: int (default: 0)
How many ``Removed`` keys should be held in history (and key file) before being purged.
Note: all ``Removed`` keys will be purged from key file after restarting the process.
.. function:: trust_anchors.config(keyfile)
:param string keyfile: File containing DNSKEY records, should be writeable.
You can use only DNSKEY records in managed mode. It is equivalent to CLI parameter ``-k <keyfile>`` or ``trust_anchors.file = keyfile``.
Example output:
.. code-block:: lua
> trust_anchors.config('root.keys')
[trust_anchors] key: 19036 state: Valid
.. function:: trust_anchors.set_insecure(nta_set)
:param table nta_list: List of domain names (text format) representing NTAs.
When you use a domain name as an NTA, DNSSEC validation will be turned off at/below these names.
Each function call replaces the previous NTA set. You can find the current active set in ``trust_anchors.insecure`` variable.
.. tip:: Use the `trust_anchors.negative = {}` alias for easier configuration.
Example output:
.. code-block:: lua
> trust_anchors.negative = { 'bad.boy', 'example.com' }
> trust_anchors.insecure
[1] => bad.boy
[2] => example.com
.. function:: trust_anchors.add(rr_string)
:param string rr_string: DS/DNSKEY records in presentation format (e.g. ``. 3600 IN DS 19036 8 2 49AAC11...``)
Inserts DS/DNSKEY record(s) into current keyset. These will not be managed or updated, use it only for testing
or if you have a specific use case for not using a keyfile.
Example output:
.. code-block:: lua
> trust_anchors.add('. 3600 IN DS 19036 8 2 49AAC11...')
Modules configuration
^^^^^^^^^^^^^^^^^^^^^
The daemon provides an interface for dynamic loading of :ref:`daemon modules <modules-implemented>`.
.. tip:: Use declarative interface for module loading.
.. code-block:: lua
modules = {
hints = {file = '/etc/hosts'}
}
Equals to:
.. code-block:: lua
modules.load('hints')
hints.config({file = '/etc/hosts'})
.. function:: modules.list()
:return: List of loaded modules.
.. function:: modules.load(name)
:param string name: Module name, e.g. "hints"
:return: boolean
Load a module by name.
.. function:: modules.unload(name)
:param string name: Module name
:return: boolean
Unload a module by name.
Cache configuration
^^^^^^^^^^^^^^^^^^^
The cache in Knot DNS Resolver is persistent with LMDB backend, this means that the daemon doesn't lose
the cached data on restart or crash to avoid cold-starts. The cache may be reused between cache
daemons or manipulated from other processes, making for example synchronised load-balanced recursors possible.
.. envvar:: cache.size (number)
Get/set the cache maximum size in bytes. Note that this is only a hint to the backend,
which may or may not respect it. See :func:`cache.open()`.
.. code-block:: lua
print(cache.size)
cache.size = 100 * MB -- equivalent to `cache.open(100 * MB)`
.. envvar:: cache.storage (string)
Get or change the cache storage backend configuration, see :func:`cache.backends()` for
more information. If the new storage configuration is invalid, it is not set.
.. code-block:: lua
print(cache.storage)
cache.storage = 'lmdb://.'
.. function:: cache.backends()
:return: map of backends
The cache supports runtime-changeable backends, using the optional :rfc:`3986` URI, where the scheme
represents backend protocol and the rest of the URI backend-specific configuration. By default, it
is a ``lmdb`` backend in working directory, i.e. ``lmdb://``.
Example output:
.. code-block:: lua
[lmdb://] => true
.. function:: cache.stats()
:return: table of cache counters
The cache collects counters on various operations (hits, misses, transactions, ...). This function call returns a table of
cache counters that can be used for calculating statistics.
.. function:: cache.open(max_size[, config_uri])
:param number max_size: Maximum cache size in bytes.
:return: boolean
Open cache with size limit. The cache will be reopened if already open.
Note that the max_size cannot be lowered, only increased due to how cache is implemented.
.. tip:: Use ``kB, MB, GB`` constants as a multiplier, e.g. ``100*MB``.
The cache supports runtime-changeable backends, see :func:`cache.backends()` for mor information and
default. Refer to specific documentation of specific backends for configuration string syntax.
- ``lmdb://``
As of now it only allows you to change the cache directory, e.g. ``lmdb:///tmp/cachedir``.
.. function:: cache.count()
:return: Number of entries in the cache.
.. function:: cache.close()
:return: boolean
Close the cache.
.. note:: This may or may not clear the cache, depending on the used backend. See :func:`cache.clear()`.
.. function:: cache.stats()
Return table of statistics, note that this tracks all operations over cache, not just which
queries were answered from cache or not.
Example:
.. code-block:: lua
print('Insertions:', cache.stats().insert)
.. function:: cache.prune([max_count])
:param number max_count: maximum number of items to be pruned at once (default: 65536)
:return: ``{ pruned: int }``
Prune expired/invalid records.
.. function:: cache.get([domain])
:return: list of matching records in cache
Fetches matching records from cache. The **domain** can either be:
- a domain name (e.g. ``"domain.cz"``)
- a wildcard (e.g. ``"*.domain.cz"``)
The domain name fetches all records matching this name, while the wildcard matches all records at or below that name.
You can also use a special namespace ``"P"`` to purge NODATA/NXDOMAIN matching this name (e.g. ``"domain.cz P"``).
.. note:: This is equivalent to ``cache['domain']`` getter.
Examples:
.. code-block:: lua
-- Query cache for 'domain.cz'
cache['domain.cz']
-- Query cache for all records at/below 'insecure.net'
cache['*.insecure.net']
.. function:: cache.clear([domain])
:return: ``bool``
Purge cache records. If the domain isn't provided, whole cache is purged. See *cache.get()* documentation for subtree matching policy.
Examples:
.. code-block:: lua
-- Clear records at/below 'bad.cz'
cache.clear('*.bad.cz')
-- Clear packet cache
cache.clear('*. P')
-- Clear whole cache
cache.clear()
Timers and events
^^^^^^^^^^^^^^^^^
The timer represents exactly the thing described in the examples - it allows you to execute closures
after specified time, or event recurrent events. Time is always described in milliseconds,
but there are convenient variables that you can use - ``sec, minute, hour``.
For example, ``5 * hour`` represents five hours, or 5*60*60*100 milliseconds.
.. function:: event.after(time, function)
:return: event id
Execute function after the specified time has passed.
The first parameter of the callback is the event itself.
Example:
.. code-block:: lua
event.after(1 * minute, function() print('Hi!') end)
.. function:: event.recurrent(interval, function)
:return: event id
Similar to :func:`event.after()`, periodically execute function after ``interval`` passes.
Example:
.. code-block:: lua
msg_count = 0
event.recurrent(5 * sec, function(e)
msg_count = msg_count + 1
print('Hi #'..msg_count)
end)
.. function:: event.reschedule(event_id, timeout)
Reschedule a running event, it has no effect on canceled events.
New events may reuse the event_id, so the behaviour is undefined if the function
is called after another event is started.
Example:
.. code-block:: lua
local interval = 1 * minute
event.after(1 * minute, function (ev)
print('Good morning!')
-- Halven the interval for each iteration
interval = interval / 2
event.reschedule(ev, interval)
end)
.. function:: event.cancel(event_id)
Cancel running event, it has no effect on already canceled events.
New events may reuse the event_id, so the behaviour is undefined if the function
is called after another event is started.
Example:
.. code-block:: lua
e = event.after(1 * minute, function() print('Hi!') end)
event.cancel(e)
Watch for file descriptor activity. This allows embedding other event loops or simply
firing events when a pipe endpoint becomes active. In another words, asynchronous
notifications for daemon.
.. function:: event.socket(fd, cb)
:param number fd: file descriptor to watch
:param cb: closure or callback to execute when fd becomes active
:return: event id
Execute function when there is activity on the file descriptor and calls a closure
with event id as the first parameter, status as second and number of events as third.
Example:
.. code-block:: lua
e = event.socket(0, function(e, status, nevents)
print('activity detected')
end)
e.cancel(e)
Map over multiple forks
^^^^^^^^^^^^^^^^^^^^^^^
When daemon is running in forked mode, each process acts independently. This is good because it reduces software complexity and allows for runtime scaling, but not ideal because of additional operational burden.
For example, when you want to add a new policy, you'd need to add it to either put it in the configuration, or execute command on each process independently. The daemon simplifies this by promoting process group leader which is able to execute commands synchronously over forks.
.. function:: map(expr)
Run expression synchronously over all forks, results are returned as a table ordered as forks. Expression can be any valid expression in Lua.
Example:
.. code-block:: lua
-- Current instance only
hostname()
localhost
-- Mapped to forks
map 'hostname()'
[1] => localhost
[2] => localhost
-- Get worker ID from each fork
map 'worker.id'
[1] => 0
[2] => 1
-- Get cache stats from each fork
map 'cache.stats()'
[1] => {
[hit] => 0
[delete] => 0
[miss] => 0
[insert] => 0
}
[2] => {
[hit] => 0
[delete] => 0
[miss] => 0
[insert] => 0
}
Scripting worker
^^^^^^^^^^^^^^^^
Worker is a service over event loop that tracks and schedules outstanding queries,
you can see the statistics or schedule new queries. It also contains information about
specified worker count and process rank.
.. envvar:: worker.count
Return current total worker count (e.g. `1` for single-process)
.. envvar:: worker.id
Return current worker ID (starting from `0` up to `worker.count - 1`)
.. envvar:: pid (number)
Current worker process PID.
.. function:: worker.stats()
Return table of statistics.
* ``udp`` - number of outbound queries over UDP
* ``tcp`` - number of outbound queries over TCP
* ``ipv6`` - number of outbound queries over IPv6
* ``ipv4`` - number of outbound queries over IPv4
* ``timeout`` - number of timeouted outbound queries
* ``concurrent`` - number of concurrent queries at the moment
* ``queries`` - number of inbound queries
* ``dropped`` - number of dropped inbound queries
Example:
.. code-block:: lua
print(worker.stats().concurrent)
Using CLI tools
===============
* ``kresd-host.lua`` - a drop-in replacement for *host(1)* utility
Queries the DNS for information.
The hostname is looked up for IP4, IP6 and mail.
Example:
.. code-block:: bash
$ kresd-host.lua -f root.key -v nic.cz
nic.cz. has address 217.31.205.50 (secure)
nic.cz. has IPv6 address 2001:1488:0:3::2 (secure)
nic.cz. mail is handled by 10 mail.nic.cz. (secure)
nic.cz. mail is handled by 20 mx.nic.cz. (secure)
nic.cz. mail is handled by 30 bh.nic.cz. (secure)
* ``kresd-query.lua`` - run the daemon in zero-configuration mode, perform a query and execute given callback.
This is useful for executing one-shot queries and hooking into the processing of the result,
for example to check if a domain is managed by a certain registrar or if it's signed.
Example:
.. code-block:: bash
$ kresd-query.lua www.sub.nic.cz 'assert(kres.dname2str(req:resolved().zone_cut.name) == "nic.cz.")' && echo "yes"
yes
$ kresd-query.lua -C 'trust_anchors.config("root.keys")' nic.cz 'assert(req:resolved():hasflag(kres.query.DNSSEC_WANT))'
$ echo $?
0
.. _`JSON-encoded`: http://json.org/example
.. _`Learn Lua in 15 minutes`: http://tylerneylon.com/a/learn-lua/
.. _`PowerDNS Recursor`: https://doc.powerdns.com/md/recursor/scripting/
.. _LuaRocks: https://rocks.moonscript.org/
.. _libuv: https://github.com/libuv/libuv
.. _Lua: https://www.lua.org/about.html
.. _LuaJIT: http://luajit.org/luajit.html
.. _luasec: https://luarocks.org/modules/luarocks/luasec
.. _luasocket: https://luarocks.org/modules/luarocks/luasocket
.. _`real process managers`: http://blog.crocodoc.com/post/48703468992/process-managers-the-good-the-bad-and-the-ugly
.. _`systemd socket activation`: http://0pointer.de/blog/projects/socket-activation.html
/* Copyright (C) 2015 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <assert.h>
#include <uv.h>
#include <contrib/cleanup.h>
#include <libknot/descriptor.h>
#include "lib/cache.h"
#include "lib/cdb.h"
#include "daemon/bindings.h"
#include "daemon/worker.h"
#include "daemon/tls.h"
/** @internal Annotate for static checkers. */
KR_NORETURN int lua_error (lua_State *L);
/** @internal Prefix error with file:line */
static int format_error(lua_State* L, const char *err)
{
lua_Debug d;
lua_getstack(L, 1, &d);
/* error message prefix */
lua_getinfo(L, "Sln", &d);
if (strncmp(d.short_src, "[", 1) != 0) {
lua_pushstring(L, d.short_src);
lua_pushstring(L, ":");
lua_pushnumber(L, d.currentline);
lua_pushstring(L, ": error: ");
lua_concat(L, 4);
} else {
lua_pushstring(L, "error: ");
}
/* error message */
lua_pushstring(L, err);
lua_concat(L, 2);
return 1;
}
static inline struct worker_ctx *wrk_luaget(lua_State *L) {
lua_getglobal(L, "__worker");
struct worker_ctx *worker = lua_touserdata(L, -1);
lua_pop(L, 1);
return worker;
}
/** List loaded modules */
static int mod_list(lua_State *L)
{
struct engine *engine = engine_luaget(L);
lua_newtable(L);
for (unsigned i = 0; i < engine->modules.len; ++i) {
struct kr_module *module = engine->modules.at[i];
lua_pushstring(L, module->name);
lua_rawseti(L, -2, i + 1);
}
return 1;
}
/** Load module. */
static int mod_load(lua_State *L)
{
/* Check parameters */
int n = lua_gettop(L);
if (n != 1 || !lua_isstring(L, 1)) {
format_error(L, "expected 'load(string name)'");
lua_error(L);
}
/* Parse precedence declaration */
auto_free char *declaration = strdup(lua_tostring(L, 1));
if (!declaration) {
return kr_error(ENOMEM);
}
const char *name = strtok(declaration, " ");
const char *precedence = strtok(NULL, " ");
const char *ref = strtok(NULL, " ");
/* Load engine module */
struct engine *engine = engine_luaget(L);
int ret = engine_register(engine, name, precedence, ref);
if (ret != 0) {
if (ret == kr_error(EIDRM)) {
format_error(L, "referenced module not found");
} else {
format_error(L, kr_strerror(ret));
}
lua_error(L);
}
lua_pushboolean(L, 1);
return 1;
}
/** Unload module. */
static int mod_unload(lua_State *L)
{
/* Check parameters */
int n = lua_gettop(L);
if (n != 1 || !lua_isstring(L, 1)) {
format_error(L, "expected 'unload(string name)'");
lua_error(L);
}
/* Unload engine module */
struct engine *engine = engine_luaget(L);
int ret = engine_unregister(engine, lua_tostring(L, 1));
if (ret != 0) {
format_error(L, kr_strerror(ret));
lua_error(L);
}
lua_pushboolean(L, 1);
return 1;
}
int lib_modules(lua_State *L)
{
static const luaL_Reg lib[] = {
{ "list", mod_list },
{ "load", mod_load },
{ "unload", mod_unload },
{ NULL, NULL }
};
register_lib(L, "modules", lib);
return 1;
}
/** Append 'addr = {port = int, udp = bool, tcp = bool}' */
static int net_list_add(const char *key, void *val, void *ext)
{
lua_State *L = (lua_State *)ext;
endpoint_array_t *ep_array = val;
lua_newtable(L);
for (size_t i = ep_array->len; i--;) {
struct endpoint *ep = ep_array->at[i];
lua_pushinteger(L, ep->port);
lua_setfield(L, -2, "port");
lua_pushboolean(L, ep->flags & NET_UDP);
lua_setfield(L, -2, "udp");
lua_pushboolean(L, ep->flags & NET_TCP);
lua_setfield(L, -2, "tcp");
lua_pushboolean(L, ep->flags & NET_TLS);
lua_setfield(L, -2, "tls");
}
lua_setfield(L, -2, key);
return kr_ok();
}
/** List active endpoints. */
static int net_list(lua_State *L)
{
struct engine *engine = engine_luaget(L);
lua_newtable(L);
map_walk(&engine->net.endpoints, net_list_add, L);
return 1;
}
/** Listen on interface address list. */
static int net_listen_iface(lua_State *L, int port, int flags)
{
/* Expand 'addr' key if exists */
lua_getfield(L, 1, "addr");
if (lua_isnil(L, -1)) {
lua_pop(L, 1);
lua_pushvalue(L, 1);
}
/* Bind to address list */
struct engine *engine = engine_luaget(L);
size_t count = lua_rawlen(L, -1);
for (size_t i = 0; i < count; ++i) {
lua_rawgeti(L, -1, i + 1);
int ret = network_listen(&engine->net, lua_tostring(L, -1),
port, flags);
if (ret != 0) {
kr_log_info("[system] bind to '%s#%d' %s\n", lua_tostring(L, -1), port, kr_strerror(ret));
}
lua_pop(L, 1);
}
lua_pushboolean(L, true);
return 1;
}
static bool table_get_flag(lua_State *L, int index, const char *key, bool def)
{
bool result = def;
lua_getfield(L, index, key);
if (lua_isboolean(L, -1)) {
result = lua_toboolean(L, -1);
}
lua_pop(L, 1);
return result;
}
/** Listen on endpoint. */
static int net_listen(lua_State *L)
{
/* Check parameters */
int n = lua_gettop(L);
int port = KR_DNS_PORT;
if (n > 1 && lua_isnumber(L, 2)) {
port = lua_tointeger(L, 2);
}
bool tls = (port == KR_DNS_TLS_PORT);
if (n > 2 && lua_istable(L, 3)) {
tls = table_get_flag(L, 3, "tls", tls);
}
/* Process interface or (address, port, flags) triple. */
int flags = tls ? (NET_TCP|NET_TLS) : (NET_TCP|NET_UDP);
if (lua_istable(L, 1)) {
return net_listen_iface(L, port, flags);
} else if (n < 1 || !lua_isstring(L, 1)) {
format_error(L, "expected 'listen(string addr, number port = 53[, bool tls = false])'");
lua_error(L);
}
/* Open resolution context cache */
struct engine *engine = engine_luaget(L);
int ret = network_listen(&engine->net, lua_tostring(L, 1), port, flags);
if (ret != 0) {
format_error(L, kr_strerror(ret));
lua_error(L);
}
lua_pushboolean(L, true);
return 1;
}
/** Close endpoint. */
static int net_close(lua_State *L)
{
/* Check parameters */
int n = lua_gettop(L);
if (n < 2) {
format_error(L, "expected 'close(string addr, number port)'");
lua_error(L);
}
/* Open resolution context cache */
struct engine *engine = engine_luaget(L);
int ret = network_close(&engine->net, lua_tostring(L, 1), lua_tointeger(L, 2));
lua_pushboolean(L, ret == 0);
return 1;
}
/** List available interfaces. */
static int net_interfaces(lua_State *L)
{
/* Retrieve interface list */
int count = 0;
char buf[INET6_ADDRSTRLEN]; /* https://tools.ietf.org/html/rfc4291 */
uv_interface_address_t *info = NULL;
uv_interface_addresses(&info, &count);
lua_newtable(L);
for (int i = 0; i < count; ++i) {
uv_interface_address_t iface = info[i];
lua_getfield(L, -1, iface.name);
if (lua_isnil(L, -1)) {
lua_pop(L, 1);
lua_newtable(L);
}
/* Address */
lua_getfield(L, -1, "addr");
if (lua_isnil(L, -1)) {
lua_pop(L, 1);
lua_newtable(L);
}
if (iface.address.address4.sin_family == AF_INET) {
uv_ip4_name(&iface.address.address4, buf, sizeof(buf));
} else if (iface.address.address4.sin_family == AF_INET6) {
uv_ip6_name(&iface.address.address6, buf, sizeof(buf));
} else {
buf[0] = '\0';
}
lua_pushstring(L, buf);
lua_rawseti(L, -2, lua_rawlen(L, -2) + 1);
lua_setfield(L, -2, "addr");
/* Hardware address. */
char *p = buf;
memset(buf, 0, sizeof(buf));
for (unsigned k = 0; k < sizeof(iface.phys_addr); ++k) {
sprintf(p, "%.2x:", iface.phys_addr[k] & 0xff);
p += 3;
}
*(p - 1) = '\0';
lua_pushstring(L, buf);
lua_setfield(L, -2, "mac");
/* Push table */
lua_setfield(L, -2, iface.name);
}
uv_free_interface_addresses(info, count);
return 1;
}
/** Set UDP maximum payload size. */
static int net_bufsize(lua_State *L)
{
struct engine *engine = engine_luaget(L);
knot_rrset_t *opt_rr = engine->resolver.opt_rr;
if (!lua_isnumber(L, 1)) {
lua_pushnumber(L, knot_edns_get_payload(opt_rr));
return 1;
}
int bufsize = lua_tointeger(L, 1);
if (bufsize < 512 || bufsize > UINT16_MAX) {
format_error(L, "bufsize must be within <512, 65535>");
lua_error(L);
}
knot_edns_set_payload(opt_rr, (uint16_t) bufsize);
return 0;
}
/** Set TCP pipelining size. */
static int net_pipeline(lua_State *L)
{
struct worker_ctx *worker = wrk_luaget(L);
if (!worker) {
return 0;
}
if (!lua_isnumber(L, 1)) {
lua_pushnumber(L, worker->tcp_pipeline_max);
return 1;
}
int len = lua_tointeger(L, 1);
if (len < 0 || len > UINT16_MAX) {
format_error(L, "tcp_pipeline must be within <0, 65535>");
lua_error(L);
}
worker->tcp_pipeline_max = len;
lua_pushnumber(L, len);
return 1;
}
static int net_tls(lua_State *L)
{
struct engine *engine = engine_luaget(L);
if (!engine) {
return 0;
}
struct network *net = &engine->net;
if (!net) {
return 0;
}
/* Only return current credentials. */
if (lua_gettop(L) == 0) {
/* No credentials configured yet. */
if (!net->tls_credentials) {
return 0;
}
lua_newtable(L);
lua_pushstring(L, net->tls_credentials->tls_cert);
lua_setfield(L, -2, "cert_file");
lua_pushstring(L, net->tls_credentials->tls_key);
lua_setfield(L, -2, "key_file");
return 1;
}
if ((lua_gettop(L) != 2) || !lua_isstring(L, 1) || !lua_isstring(L, 2)) {
lua_pushstring(L, "net.tls takes two parameters: (\"cert_file\", \"key_file\")");
lua_error(L);
}
int r = tls_certificate_set(net, lua_tostring(L, 1), lua_tostring(L, 2));
if (r != 0) {
lua_pushstring(L, strerror(ENOMEM));
lua_error(L);
}
lua_pushboolean(L, true);
return 1;
}
int lib_net(lua_State *L)
{
static const luaL_Reg lib[] = {
{ "list", net_list },
{ "listen", net_listen },
{ "close", net_close },
{ "interfaces", net_interfaces },
{ "bufsize", net_bufsize },
{ "tcp_pipeline", net_pipeline },
{ "tls", net_tls },
{ NULL, NULL }
};
register_lib(L, "net", lib);
return 1;
}
/** Return available cached backends. */
static int cache_backends(lua_State *L)
{
struct engine *engine = engine_luaget(L);
lua_newtable(L);
for (unsigned i = 0; i < engine->backends.len; ++i) {
const struct kr_cdb_api *api = engine->backends.at[i];
lua_pushboolean(L, api == engine->resolver.cache.api);
lua_setfield(L, -2, api->name);
}
return 1;
}
/** Return number of cached records. */
static int cache_count(lua_State *L)
{
struct engine *engine = engine_luaget(L);
const struct kr_cdb_api *api = engine->resolver.cache.api;
/* First key is a version counter, omit it. */
struct kr_cache *cache = &engine->resolver.cache;
if (kr_cache_is_open(cache)) {
lua_pushinteger(L, api->count(cache->db) - 1);
return 1;
}
return 0;
}
/** Return cache statistics. */
static int cache_stats(lua_State *L)
{
struct engine *engine = engine_luaget(L);
struct kr_cache *cache = &engine->resolver.cache;
lua_newtable(L);
lua_pushnumber(L, cache->stats.hit);
lua_setfield(L, -2, "hit");
lua_pushnumber(L, cache->stats.miss);
lua_setfield(L, -2, "miss");
lua_pushnumber(L, cache->stats.insert);
lua_setfield(L, -2, "insert");
lua_pushnumber(L, cache->stats.delete);
lua_setfield(L, -2, "delete");
return 1;
}
static const struct kr_cdb_api *cache_select(struct engine *engine, const char **conf)
{
/* Return default backend */
if (*conf == NULL || !strstr(*conf, "://")) {
return engine->backends.at[0];
}
/* Find storage backend from config prefix */
for (unsigned i = 0; i < engine->backends.len; ++i) {
const struct kr_cdb_api *api = engine->backends.at[i];
if (strncmp(*conf, api->name, strlen(api->name)) == 0) {
*conf += strlen(api->name) + strlen("://");
return api;
}
}
return NULL;
}
/** Open cache */
static int cache_open(lua_State *L)
{
/* Check parameters */
int n = lua_gettop(L);
if (n < 1 || !lua_isnumber(L, 1)) {
format_error(L, "expected 'open(number max_size, string config = \"\")'");
lua_error(L);
}
/* Select cache storage backend */
struct engine *engine = engine_luaget(L);
unsigned cache_size = lua_tonumber(L, 1);
const char *conf = n > 1 ? lua_tostring(L, 2) : NULL;
const char *uri = conf;
const struct kr_cdb_api *api = cache_select(engine, &conf);
if (!api) {
format_error(L, "unsupported cache backend");
lua_error(L);
}
/* Close if already open */
kr_cache_close(&engine->resolver.cache);
/* Reopen cache */
struct kr_cdb_opts opts = {
(conf && strlen(conf)) ? conf : ".",
cache_size
};
int ret = kr_cache_open(&engine->resolver.cache, api, &opts, engine->pool);
if (ret != 0) {
format_error(L, "can't open cache");
lua_error(L);
}
/* Store current configuration */
lua_getglobal(L, "cache");
lua_pushstring(L, "current_size");
lua_pushnumber(L, cache_size);
lua_rawset(L, -3);
lua_pushstring(L, "current_storage");
lua_pushstring(L, uri);
lua_rawset(L, -3);
lua_pop(L, 1);
lua_pushboolean(L, 1);
return 1;
}
static int cache_close(lua_State *L)
{
struct engine *engine = engine_luaget(L);
struct kr_cache *cache = &engine->resolver.cache;
if (!kr_cache_is_open(cache)) {
return 0;
}
kr_cache_close(cache);
lua_getglobal(L, "cache");
lua_pushstring(L, "current_size");
lua_pushnumber(L, 0);
lua_rawset(L, -3);
lua_pop(L, 1);
lua_pushboolean(L, 1);
return 1;
}
/** @internal Prefix walk. */
static int cache_prefixed(struct kr_cache *cache, const char *args, knot_db_val_t *results, int maxresults)
{
/* Decode parameters */
uint8_t namespace = 'R';
char *extra = (char *)strchr(args, ' ');
if (extra != NULL) {
extra[0] = '\0';
namespace = extra[1];
}
/* Convert to domain name */
uint8_t buf[KNOT_DNAME_MAXLEN];
if (!knot_dname_from_str(buf, args, sizeof(buf))) {
return kr_error(EINVAL);
}
/* Start prefix search */
return kr_cache_match(cache, namespace, buf, results, maxresults);
}
/** @internal Delete iterated key. */
static int cache_remove_prefix(struct kr_cache *cache, const char *args)
{
/* Check if we can remove */
if (!cache || !cache->api || !cache->api->remove) {
return kr_error(ENOSYS);
}
static knot_db_val_t result_set[1000];
int ret = cache_prefixed(cache, args, result_set, 1000);
if (ret < 0) {
return ret;
}
/* Duplicate result set as we're going to remove it
* which will invalidate result set. */
for (int i = 0; i < ret; ++i) {
void *dst = malloc(result_set[i].len);
if (!dst) {
return kr_error(ENOMEM);
}
memcpy(dst, result_set[i].data, result_set[i].len);
result_set[i].data = dst;
}
cache->api->remove(cache->db, result_set, ret);
/* Free keys */
for (int i = 0; i < ret; ++i) {
free(result_set[i].data);
}
return ret;
}
/** Prune expired/invalid records. */
static int cache_prune(lua_State *L)
{
struct engine *engine = engine_luaget(L);
struct kr_cache *cache = &engine->resolver.cache;
if (!kr_cache_is_open(cache)) {
return 0;
}
/* Check parameters */
int prune_max = UINT16_MAX;
int n = lua_gettop(L);
if (n >= 1 && lua_isnumber(L, 1)) {
prune_max = lua_tointeger(L, 1);
}
/* Check if API supports pruning. */
int ret = kr_error(ENOSYS);
if (cache->api->prune) {
ret = cache->api->prune(cache->db, prune_max);
}
/* Commit and format result. */
if (ret < 0) {
format_error(L, kr_strerror(ret));
lua_error(L);
}
lua_pushinteger(L, ret);
return 1;
}
/** Clear all records. */
static int cache_clear(lua_State *L)
{
struct engine *engine = engine_luaget(L);
struct kr_cache *cache = &engine->resolver.cache;
if (!kr_cache_is_open(cache)) {
return 0;
}
/* Check parameters */
const char *args = NULL;
int n = lua_gettop(L);
if (n >= 1 && lua_isstring(L, 1)) {
args = lua_tostring(L, 1);
}
/* Clear a sub-tree in cache. */
if (args && strlen(args) > 0) {
int ret = cache_remove_prefix(cache, args);
if (ret < 0) {
format_error(L, kr_strerror(ret));
lua_error(L);
}
lua_pushinteger(L, ret);
return 1;
}
/* Clear cache. */
int ret = kr_cache_clear(cache);
if (ret < 0) {
format_error(L, kr_strerror(ret));
lua_error(L);
}
/* Clear reputation tables */
lru_deinit(engine->resolver.cache_rtt);
lru_deinit(engine->resolver.cache_rep);
lru_init(engine->resolver.cache_rtt, LRU_RTT_SIZE);
lru_init(engine->resolver.cache_rep, LRU_REP_SIZE);
lru_deinit(engine->resolver.cache_cookie);
lru_init(engine->resolver.cache_cookie, LRU_COOKIES_SIZE);
lua_pushboolean(L, true);
return 1;
}
/** @internal Dump cache key into table on Lua stack. */
static void cache_dump_key(lua_State *L, knot_db_val_t *key)
{
char buf[KNOT_DNAME_MAXLEN];
/* Extract type */
uint16_t type = 0;
const char *endp = (const char *)key->data + key->len - sizeof(uint16_t);
memcpy(&type, endp, sizeof(uint16_t));
endp -= 1;
/* Extract domain name */
char *dst = buf;
const char *scan = endp - 1;
while (scan > (const char *)key->data) {
if (*scan == '\0') {
const size_t lblen = endp - scan - 1;
memcpy(dst, scan + 1, lblen);
dst += lblen;
*dst++ = '.';
endp = scan;
}
--scan;
}
memcpy(dst, scan + 1, endp - scan);
/* If name typemap doesn't exist yet, create it */
lua_getfield(L, -1, buf);
if (lua_isnil(L, -1)) {
lua_pop(L, 1);
lua_newtable(L);
}
/* Append to typemap */
char type_buf[16] = { '\0' };
knot_rrtype_to_string(type, type_buf, sizeof(type_buf));
lua_pushboolean(L, true);
lua_setfield(L, -2, type_buf);
/* Set name typemap */
lua_setfield(L, -2, buf);
}
/** Query cached records. */
static int cache_get(lua_State *L)
{
struct engine *engine = engine_luaget(L);
struct kr_cache *cache = &engine->resolver.cache;
if (!kr_cache_is_open(cache)) {
return 0;
}
/* Check parameters */
int n = lua_gettop(L);
if (n < 1 || !lua_isstring(L, 1)) {
format_error(L, "expected 'cache.get(string key)'");
lua_error(L);
}
/* Clear a sub-tree in cache. */
const char *args = lua_tostring(L, 1);
/* Retrieve set of keys */
static knot_db_val_t result_set[100];
int ret = cache_prefixed(cache, args, result_set, 100);
if (ret < 0) {
format_error(L, kr_strerror(ret));
lua_error(L);
}
/* Format output */
lua_newtable(L);
for (int i = 0; i < ret; ++i) {
cache_dump_key(L, &result_set[i]);
}
return 1;
}
int lib_cache(lua_State *L)
{
static const luaL_Reg lib[] = {
{ "backends", cache_backends },
{ "count", cache_count },
{ "stats", cache_stats },
{ "open", cache_open },
{ "close", cache_close },
{ "prune", cache_prune },
{ "clear", cache_clear },
{ "get", cache_get },
{ NULL, NULL }
};
register_lib(L, "cache", lib);
return 1;
}
static void event_free(uv_timer_t *timer)
{
struct worker_ctx *worker = timer->loop->data;
lua_State *L = worker->engine->L;
int ref = (intptr_t) timer->data;
luaL_unref(L, LUA_REGISTRYINDEX, ref);
free(timer);
}
static int execute_callback(lua_State *L, int argc)
{
int ret = engine_pcall(L, argc);
if (ret != 0) {
fprintf(stderr, "error: %s\n", lua_tostring(L, -1));
}
/* Clear the stack, there may be event a/o enything returned */
lua_settop(L, 0);
return ret;
}
static void event_callback(uv_timer_t *timer)
{
struct worker_ctx *worker = timer->loop->data;
lua_State *L = worker->engine->L;
/* Retrieve callback and execute */
lua_rawgeti(L, LUA_REGISTRYINDEX, (intptr_t) timer->data);
lua_rawgeti(L, -1, 1);
lua_pushinteger(L, (intptr_t) timer->data);
int ret = execute_callback(L, 1);
/* Free callback if not recurrent or an error */
if (ret != 0 || (uv_timer_get_repeat(timer) == 0 && uv_is_active((uv_handle_t *)timer) == 0)) {
if (!uv_is_closing((uv_handle_t *)timer)) {
uv_close((uv_handle_t *)timer, (uv_close_cb) event_free);
}
}
}
static void event_fdcallback(uv_poll_t* handle, int status, int events)
{
struct worker_ctx *worker = handle->loop->data;
lua_State *L = worker->engine->L;
/* Retrieve callback and execute */
lua_rawgeti(L, LUA_REGISTRYINDEX, (intptr_t) handle->data);
lua_rawgeti(L, -1, 1);
lua_pushinteger(L, (intptr_t) handle->data);
lua_pushinteger(L, status);
lua_pushinteger(L, events);
int ret = execute_callback(L, 3);
/* Free callback if not recurrent or an error */
if (ret != 0) {
if (!uv_is_closing((uv_handle_t *)handle)) {
uv_close((uv_handle_t *)handle, (uv_close_cb) event_free);
}
}
}
static int event_sched(lua_State *L, unsigned timeout, unsigned repeat)
{
uv_timer_t *timer = malloc(sizeof(*timer));
if (!timer) {
format_error(L, "out of memory");
lua_error(L);
}
/* Start timer with the reference */
uv_loop_t *loop = uv_default_loop();
uv_timer_init(loop, timer);
int ret = uv_timer_start(timer, event_callback, timeout, repeat);
if (ret != 0) {
free(timer);
format_error(L, "couldn't start the event");
lua_error(L);
}
/* Save callback and timer in registry */
lua_newtable(L);
lua_pushvalue(L, 2);
lua_rawseti(L, -2, 1);
lua_pushlightuserdata(L, timer);
lua_rawseti(L, -2, 2);
int ref = luaL_ref(L, LUA_REGISTRYINDEX);
/* Save reference to the timer */
timer->data = (void *) (intptr_t)ref;
lua_pushinteger(L, ref);
return 1;
}
static int event_after(lua_State *L)
{
/* Check parameters */
int n = lua_gettop(L);
if (n < 2 || !lua_isnumber(L, 1) || !lua_isfunction(L, 2)) {
format_error(L, "expected 'after(number timeout, function)'");
lua_error(L);
}
return event_sched(L, lua_tonumber(L, 1), 0);
}
static int event_recurrent(lua_State *L)
{
/* Check parameters */
int n = lua_gettop(L);
if (n < 2 || !lua_isnumber(L, 1) || !lua_isfunction(L, 2)) {
format_error(L, "expected 'recurrent(number interval, function)'");
lua_error(L);
}
return event_sched(L, 0, lua_tonumber(L, 1));
}
static int event_cancel(lua_State *L)
{
int n = lua_gettop(L);
if (n < 1 || !lua_isnumber(L, 1)) {
format_error(L, "expected 'cancel(number event)'");
lua_error(L);
}
/* Fetch event if it exists */
lua_rawgeti(L, LUA_REGISTRYINDEX, lua_tointeger(L, 1));
if (!lua_istable(L, -1)) {
lua_pushboolean(L, false);
return 1;
}
/* Close the timer */
lua_rawgeti(L, -1, 2);
uv_handle_t *timer = lua_touserdata(L, -1);
if (!uv_is_closing(timer)) {
uv_close(timer, (uv_close_cb) event_free);
}
lua_pushboolean(L, true);
return 1;
}
static int event_reschedule(lua_State *L)
{
int n = lua_gettop(L);
if (n < 2 || !lua_isnumber(L, 1) || !lua_isnumber(L, 2)) {
format_error(L, "expected 'reschedule(number event, number timeout)'");
lua_error(L);
}
/* Fetch event if it exists */
lua_rawgeti(L, LUA_REGISTRYINDEX, lua_tointeger(L, 1));
if (!lua_istable(L, -1)) {
lua_pushboolean(L, false);
return 1;
}
/* Reschedule the timer */
lua_rawgeti(L, -1, 2);
uv_handle_t *timer = lua_touserdata(L, -1);
if (!uv_is_closing(timer)) {
if (uv_is_active(timer)) {
uv_timer_stop((uv_timer_t *)timer);
}
int ret = uv_timer_start((uv_timer_t *)timer, event_callback, lua_tointeger(L, 2), 0);
if (ret != 0) {
event_cancel(L);
lua_pushboolean(L, false);
return 1;
}
}
lua_pushboolean(L, true);
return 1;
}
static int event_fdwatch(lua_State *L)
{
/* Check parameters */
int n = lua_gettop(L);
if (n < 2 || !lua_isnumber(L, 1) || !lua_isfunction(L, 2)) {
format_error(L, "expected 'socket(number fd, function)'");
lua_error(L);
}
uv_poll_t *handle = malloc(sizeof(*handle));
if (!handle) {
format_error(L, "out of memory");
lua_error(L);
}
/* Start timer with the reference */
int sock = lua_tonumber(L, 1);
uv_loop_t *loop = uv_default_loop();
#if defined(__APPLE__) || defined(__FreeBSD__)
/* libuv is buggy and fails to create poller for
* kqueue sockets as it can't be fcntl'd to non-blocking mode,
* so we pass it a copy of standard input and then
* switch it with real socket before starting the poller
*/
int decoy_fd = dup(STDIN_FILENO);
int ret = uv_poll_init(loop, handle, decoy_fd);
if (ret == 0) {
handle->io_watcher.fd = sock;
}
close(decoy_fd);
#else
int ret = uv_poll_init(loop, handle, sock);
#endif
if (ret == 0) {
ret = uv_poll_start(handle, UV_READABLE, event_fdcallback);
}
if (ret != 0) {
free(handle);
format_error(L, "couldn't start event poller");
lua_error(L);
}
/* Save callback and timer in registry */
lua_newtable(L);
lua_pushvalue(L, 2);
lua_rawseti(L, -2, 1);
lua_pushlightuserdata(L, handle);
lua_rawseti(L, -2, 2);
int ref = luaL_ref(L, LUA_REGISTRYINDEX);
/* Save reference to the timer */
handle->data = (void *) (intptr_t)ref;
lua_pushinteger(L, ref);
return 1;
}
int lib_event(lua_State *L)
{
static const luaL_Reg lib[] = {
{ "after", event_after },
{ "recurrent", event_recurrent },
{ "cancel", event_cancel },
{ "socket", event_fdwatch },
{ "reschedule", event_reschedule },
{ NULL, NULL }
};
register_lib(L, "event", lib);
return 1;
}
/* @internal Call the Lua callback stored in baton. */
static void resolve_callback(struct worker_ctx *worker, struct kr_request *req, void *baton)
{
assert(worker);
assert(req);
assert(baton);
lua_State *L = worker->engine->L;
intptr_t cb_ref = (intptr_t) baton;
lua_rawgeti(L, LUA_REGISTRYINDEX, cb_ref);
luaL_unref(L, LUA_REGISTRYINDEX, cb_ref);
lua_pushlightuserdata(L, req->answer);
lua_pushlightuserdata(L, req);
(void) execute_callback(L, 2);
}
static int wrk_resolve(lua_State *L)
{
struct worker_ctx *worker = wrk_luaget(L);
if (!worker) {
return 0;
}
/* Create query packet */
knot_pkt_t *pkt = knot_pkt_new(NULL, KNOT_EDNS_MAX_UDP_PAYLOAD, NULL);
if (!pkt) {
lua_pushstring(L, strerror(ENOMEM));
lua_error(L);
}
uint8_t dname[KNOT_DNAME_MAXLEN];
knot_dname_from_str(dname, lua_tostring(L, 1), sizeof(dname));
/* Check class and type */
uint16_t rrtype = lua_tointeger(L, 2);
if (!lua_isnumber(L, 2)) {
lua_pushstring(L, "invalid RR type");
lua_error(L);
}
uint16_t rrclass = lua_tointeger(L, 3);
if (!lua_isnumber(L, 3)) { /* Default class is IN */
rrclass = KNOT_CLASS_IN;
}
knot_pkt_put_question(pkt, dname, rrclass, rrtype);
knot_wire_set_rd(pkt->wire);
/* Add OPT RR */
pkt->opt_rr = knot_rrset_copy(worker->engine->resolver.opt_rr, NULL);
if (!pkt->opt_rr) {
return kr_error(ENOMEM);
}
/* Add completion callback */
int ret = 0;
unsigned options = lua_tointeger(L, 4);
if (lua_isfunction(L, 5)) {
/* Store callback in registry */
lua_pushvalue(L, 5);
int cb = luaL_ref(L, LUA_REGISTRYINDEX);
ret = worker_resolve(worker, pkt, options, resolve_callback, (void *) (intptr_t)cb);
} else {
ret = worker_resolve(worker, pkt, options, NULL, NULL);
}
knot_rrset_free(&pkt->opt_rr, NULL);
knot_pkt_free(&pkt);
lua_pushboolean(L, ret == 0);
return 1;
}
static inline double getseconds(uv_timeval_t *tv)
{
return (double)tv->tv_sec + 0.000001*((double)tv->tv_usec);
}
/** Return worker statistics. */
static int wrk_stats(lua_State *L)
{
struct worker_ctx *worker = wrk_luaget(L);
if (!worker) {
return 0;
}
lua_newtable(L);
lua_pushnumber(L, worker->stats.concurrent);
lua_setfield(L, -2, "concurrent");
lua_pushnumber(L, worker->stats.udp);
lua_setfield(L, -2, "udp");
lua_pushnumber(L, worker->stats.tcp);
lua_setfield(L, -2, "tcp");
lua_pushnumber(L, worker->stats.ipv6);
lua_setfield(L, -2, "ipv6");
lua_pushnumber(L, worker->stats.ipv4);
lua_setfield(L, -2, "ipv4");
lua_pushnumber(L, worker->stats.queries);
lua_setfield(L, -2, "queries");
lua_pushnumber(L, worker->stats.dropped);
lua_setfield(L, -2, "dropped");
lua_pushnumber(L, worker->stats.timeout);
lua_setfield(L, -2, "timeout");
/* Add subset of rusage that represents counters. */
uv_rusage_t rusage;
if (uv_getrusage(&rusage) == 0) {
lua_pushnumber(L, getseconds(&rusage.ru_utime));
lua_setfield(L, -2, "usertime");
lua_pushnumber(L, getseconds(&rusage.ru_stime));
lua_setfield(L, -2, "systime");
lua_pushnumber(L, rusage.ru_majflt);
lua_setfield(L, -2, "pagefaults");
lua_pushnumber(L, rusage.ru_nswap);
lua_setfield(L, -2, "swaps");
lua_pushnumber(L, rusage.ru_nvcsw + rusage.ru_nivcsw);
lua_setfield(L, -2, "csw");
}
/* Get RSS */
size_t rss = 0;
if (uv_resident_set_memory(&rss) == 0) {
lua_pushnumber(L, rss);
lua_setfield(L, -2, "rss");
}
return 1;
}
int lib_worker(lua_State *L)
{
static const luaL_Reg lib[] = {
{ "resolve", wrk_resolve },
{ "stats", wrk_stats },
{ NULL, NULL }
};
register_lib(L, "worker", lib);
return 1;
}
/* Copyright (C) 2015 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
/**
* Bindings to engine services, see \a https://www.lua.org/manual/5.2/manual.html#luaL_newlib for the reference.
*/
#pragma once
#include <lua.h>
#include <lualib.h>
#include <lauxlib.h>
#include "daemon/engine.h"
/** @internal Compatibility wrapper for Lua 5.0 - 5.2 */
#if LUA_VERSION_NUM >= 502
#define register_lib(L, name, lib) \
luaL_newlib((L), (lib))
#else
#define lua_rawlen(L, obj) \
lua_objlen((L), (obj))
#define register_lib(L, name, lib) \
luaL_openlib((L), (name), (lib), 0)
/* Adapted from Lua 5.2.0 */
static inline void luaL_setfuncs (lua_State *L, const luaL_Reg *l, int nup) {
luaL_checkstack(L, nup+1, "too many upvalues");
for (; l->name != NULL; l++) { /* fill the table with given functions */
int i;
lua_pushstring(L, l->name);
for (i = 0; i < nup; i++) /* copy upvalues to the top */
lua_pushvalue(L, -(nup+1));
lua_pushcclosure(L, l->func, nup); /* closure with those upvalues */
lua_settable(L, -(nup + 3));
}
lua_pop(L, nup); /* remove upvalues */
}
#endif
/**
* Load 'modules' package.
* @param L scriptable
* @return number of packages to load
*/
int lib_modules(lua_State *L);
/**
* Load 'net' package.
* @param L scriptable
* @return number of packages to load
*/
int lib_net(lua_State *L);
/**
* Load 'cache' package.
* @param L scriptable
* @return number of packages to load
*/
int lib_cache(lua_State *L);
/**
* Load 'event' package.
* @param L scriptable
* @return number of packages to load
*/
int lib_event(lua_State *L);
/**
* Load worker API.
* @param L scriptable
* @return number of packages to load
*/
int lib_worker(lua_State *L);
/* Copyright (C) CZ.NIC, z.s.p.o. <knot-resolver@labs.nic.cz>
* SPDX-License-Identifier: GPL-3.0-or-later
*/
#pragma once
#include <lua.h>
/** Make all the bindings accessible from the lua state,
* .i.e. define those lua tables. */
void kr_bindings_register(lua_State *L);
/* Copyright (C) CZ.NIC, z.s.p.o. <knot-resolver@labs.nic.cz>
* SPDX-License-Identifier: GPL-3.0-or-later
*/
#include "daemon/bindings/impl.h"
/** @internal return cache, or throw lua error if not open */
static struct kr_cache * cache_assert_open(lua_State *L)
{
struct kr_cache *cache = &the_resolver->cache;
if (kr_fails_assert(cache) || !kr_cache_is_open(cache))
lua_error_p(L, "no cache is open yet, use cache.open() or cache.size, etc.");
return cache;
}
/** Return available cached backends. */
static int cache_backends(lua_State *L)
{
lua_newtable(L);
for (unsigned i = 0; i < the_engine->backends.len; ++i) {
const struct kr_cdb_api *api = the_engine->backends.at[i];
lua_pushboolean(L, api == the_resolver->cache.api);
lua_setfield(L, -2, api->name);
}
return 1;
}
/** Return number of cached records. */
static int cache_count(lua_State *L)
{
struct kr_cache *cache = cache_assert_open(L);
int count = cache->api->count(cache->db, &cache->stats);
if (count >= 0) {
/* First key is a version counter, omit it if nonempty. */
lua_pushinteger(L, count ? count - 1 : 0);
return 1;
}
return 0;
}
/** Return time of last checkpoint, or re-set it if passed `true`. */
static int cache_checkpoint(lua_State *L)
{
struct kr_cache *cache = cache_assert_open(L);
if (lua_gettop(L) == 0) { /* Return the current value. */
lua_newtable(L);
lua_pushnumber(L, cache->checkpoint_monotime);
lua_setfield(L, -2, "monotime");
lua_newtable(L);
lua_pushnumber(L, cache->checkpoint_walltime.tv_sec);
lua_setfield(L, -2, "sec");
lua_pushnumber(L, cache->checkpoint_walltime.tv_usec);
lua_setfield(L, -2, "usec");
lua_setfield(L, -2, "walltime");
return 1;
}
if (lua_gettop(L) != 1 || !lua_isboolean(L, 1) || !lua_toboolean(L, 1))
lua_error_p(L, "cache.checkpoint() takes no parameters or a true value");
kr_cache_make_checkpoint(cache);
return 1;
}
/** Return cache statistics. */
static int cache_stats(lua_State *L)
{
struct kr_cache *cache = cache_assert_open(L);
lua_newtable(L);
#define add_stat(name) \
lua_pushinteger(L, (cache->stats.name)); \
lua_setfield(L, -2, #name)
add_stat(open);
add_stat(close);
add_stat(count);
cache->stats.count_entries = cache->api->count(cache->db, &cache->stats);
add_stat(count_entries);
add_stat(clear);
add_stat(commit);
add_stat(read);
add_stat(read_miss);
add_stat(write);
add_stat(remove);
add_stat(remove_miss);
add_stat(match);
add_stat(match_miss);
add_stat(read_leq);
add_stat(read_leq_miss);
/* usage_percent statistics special case - double */
cache->stats.usage_percent = cache->api->usage_percent(cache->db);
lua_pushnumber(L, cache->stats.usage_percent);
lua_setfield(L, -2, "usage_percent");
#undef add_stat
return 1;
}
static const struct kr_cdb_api *cache_select(const char **conf)
{
/* Return default backend */
if (*conf == NULL || !strstr(*conf, "://")) {
return the_engine->backends.at[0];
}
/* Find storage backend from config prefix */
for (unsigned i = 0; i < the_engine->backends.len; ++i) {
const struct kr_cdb_api *api = the_engine->backends.at[i];
if (strncmp(*conf, api->name, strlen(api->name)) == 0) {
*conf += strlen(api->name) + strlen("://");
return api;
}
}
return NULL;
}
static int cache_max_ttl(lua_State *L)
{
struct kr_cache *cache = cache_assert_open(L);
int n = lua_gettop(L);
if (n > 0) {
if (!lua_isnumber(L, 1) || n > 1)
lua_error_p(L, "expected 'max_ttl(number ttl)'");
uint32_t min = cache->ttl_min;
int64_t ttl = lua_tointeger(L, 1);
if (ttl < 1 || ttl < min || ttl > TTL_MAX_MAX) {
lua_error_p(L,
"max_ttl must be larger than minimum TTL, and in range <1, "
STR(TTL_MAX_MAX) ">'");
}
cache->ttl_max = ttl;
}
lua_pushinteger(L, cache->ttl_max);
return 1;
}
static int cache_min_ttl(lua_State *L)
{
struct kr_cache *cache = cache_assert_open(L);
int n = lua_gettop(L);
if (n > 0) {
if (!lua_isnumber(L, 1))
lua_error_p(L, "expected 'min_ttl(number ttl)'");
uint32_t max = cache->ttl_max;
int64_t ttl = lua_tointeger(L, 1);
if (ttl < 0 || ttl > max || ttl > TTL_MAX_MAX) {
lua_error_p(L,
"min_ttl must be smaller than maximum TTL, and in range <0, "
STR(TTL_MAX_MAX) ">'");
}
cache->ttl_min = ttl;
}
lua_pushinteger(L, cache->ttl_min);
return 1;
}
/** Open cache */
static int cache_open(lua_State *L)
{
/* Check parameters */
int n = lua_gettop(L);
if (n < 1 || !lua_isnumber(L, 1))
lua_error_p(L, "expected 'open(number max_size, string config = \"\")'");
/* Select cache storage backend */
lua_Integer csize_lua = lua_tointeger(L, 1);
if (!(csize_lua >= 8192 && csize_lua < SIZE_MAX)) { /* min. is basically arbitrary */
lua_error_p(L, "invalid cache size specified, it must be in range <8192, "
STR(SIZE_MAX) ">");
}
size_t cache_size = csize_lua;
const char *conf = n > 1 ? lua_tostring(L, 2) : NULL;
const char *uri = conf;
const struct kr_cdb_api *api = cache_select(&conf);
if (!api)
lua_error_p(L, "unsupported cache backend");
/* Close if already open */
kr_cache_close(&the_resolver->cache);
/* Reopen cache */
struct kr_cdb_opts opts = {
.is_cache = true,
.path = (conf && strlen(conf)) ? conf : ".",
.maxsize = cache_size,
};
int ret = kr_cache_open(&the_resolver->cache, api, &opts, &the_engine->pool);
if (ret != 0) {
char cwd[PATH_MAX];
get_workdir(cwd, sizeof(cwd));
return luaL_error(L, "can't open cache path '%s'; working directory '%s'; %s",
opts.path, cwd, kr_strerror(ret));
}
/* Let's check_health() every five seconds to avoid keeping old cache alive
* even in case of not having any work to do. */
ret = kr_cache_check_health(&the_resolver->cache, 5000);
if (ret != 0) {
kr_log_error(CACHE, "periodic health check failed (ignored): %s\n",
kr_strerror(ret));
}
/* Store current configuration */
lua_getglobal(L, "cache");
lua_pushstring(L, "current_size");
lua_pushnumber(L, cache_size);
lua_rawset(L, -3);
lua_pushstring(L, "current_storage");
lua_pushstring(L, uri);
lua_rawset(L, -3);
lua_pop(L, 1);
lua_pushboolean(L, 1);
return 1;
}
static int cache_close(lua_State *L)
{
struct kr_cache *cache = &the_resolver->cache;
if (!kr_cache_is_open(cache)) {
return 0;
}
kr_cache_close(cache);
lua_getglobal(L, "cache");
lua_pushstring(L, "current_size");
lua_pushnumber(L, 0);
lua_rawset(L, -3);
lua_pop(L, 1);
lua_pushboolean(L, 1);
return 1;
}
#if 0
/** @internal Prefix walk. */
static int cache_prefixed(struct kr_cache *cache, const char *prefix, bool exact_name,
knot_db_val_t keyval[][2], int maxcount)
{
/* Convert to domain name */
uint8_t buf[KNOT_DNAME_MAXLEN];
if (!knot_dname_from_str(buf, prefix, sizeof(buf))) {
return kr_error(EINVAL);
}
/* Start prefix search */
return kr_cache_match(cache, buf, exact_name, keyval, maxcount);
}
#endif
/** Clear everything. */
static int cache_clear_everything(lua_State *L)
{
struct kr_cache *cache = cache_assert_open(L);
/* Clear records and packets. */
int ret = kr_cache_clear(cache);
lua_error_maybe(L, ret);
/* Clear reputation tables */
lru_reset(the_resolver->cache_cookie);
lua_pushboolean(L, true);
return 1;
}
#if 0
/** @internal Dump cache key into table on Lua stack. */
static void cache_dump(lua_State *L, knot_db_val_t keyval[])
{
knot_dname_t dname[KNOT_DNAME_MAXLEN];
char name[KNOT_DNAME_TXT_MAXLEN];
uint16_t type;
int ret = kr_unpack_cache_key(keyval[0], dname, &type);
if (ret < 0) {
return;
}
ret = !knot_dname_to_str(name, dname, sizeof(name));
if (kr_fails_assert(!ret)) return;
/* If name typemap doesn't exist yet, create it */
lua_getfield(L, -1, name);
if (lua_isnil(L, -1)) {
lua_pop(L, 1);
lua_newtable(L);
}
/* Append to typemap */
char type_buf[KR_RRTYPE_STR_MAXLEN] = { '\0' };
knot_rrtype_to_string(type, type_buf, sizeof(type_buf));
lua_pushboolean(L, true);
lua_setfield(L, -2, type_buf);
/* Set name typemap */
lua_setfield(L, -2, name);
}
/** Query cached records. TODO: fix caveats in ./README.rst documentation? */
static int cache_get(lua_State *L)
{
//struct kr_cache *cache = cache_assert_open(L); // to be fixed soon
/* Check parameters */
int n = lua_gettop(L);
if (n < 1 || !lua_isstring(L, 1))
lua_error_p(L, "expected 'cache.get(string key)'");
/* Retrieve set of keys */
const char *prefix = lua_tostring(L, 1);
knot_db_val_t keyval[100][2];
int ret = cache_prefixed(cache, prefix, false/*FIXME*/, keyval, 100);
lua_error_maybe(L, ret);
/* Format output */
lua_newtable(L);
for (int i = 0; i < ret; ++i) {
cache_dump(L, keyval[i]);
}
return 1;
}
#endif
static int cache_get(lua_State *L)
{
lua_error_maybe(L, ENOSYS);
return kr_error(ENOSYS); /* doesn't happen */
}
/** Set time interval for cleaning rtt cache.
* Servers with score >= KR_NS_TIMEOUT will be cleaned after
* this interval ended up, so that they will be able to participate
* in NS elections again. */
static int cache_ns_tout(lua_State *L)
{
/* Check parameters */
int n = lua_gettop(L);
if (n < 1) {
lua_pushinteger(L, the_resolver->cache_rtt_tout_retry_interval);
return 1;
}
if (!lua_isnumber(L, 1))
lua_error_p(L, "expected 'cache.ns_tout(interval in ms)'");
lua_Integer interval_lua = lua_tointeger(L, 1);
if (!(interval_lua > 0 && interval_lua < UINT_MAX)) {
lua_error_p(L, "invalid interval specified, it must be in range > 0, < "
STR(UINT_MAX));
}
the_resolver->cache_rtt_tout_retry_interval = interval_lua;
lua_pushinteger(L, the_resolver->cache_rtt_tout_retry_interval);
return 1;
}
int kr_bindings_cache(lua_State *L)
{
static const luaL_Reg lib[] = {
{ "backends", cache_backends },
{ "count", cache_count },
{ "stats", cache_stats },
{ "checkpoint", cache_checkpoint },
{ "open", cache_open },
{ "close", cache_close },
{ "clear_everything", cache_clear_everything },
{ "get", cache_get },
{ "max_ttl", cache_max_ttl },
{ "min_ttl", cache_min_ttl },
{ "ns_tout", cache_ns_tout },
{ NULL, NULL }
};
luaL_register(L, "cache", lib);
return 1;
}
.. SPDX-License-Identifier: GPL-3.0-or-later
Cache
=====
Cache in Knot Resolver is stored on disk and also shared between
:ref:`systemd-multiple-instances` so resolver doesn't lose the cached data on
restart or crash.
To improve performance even further the resolver implements so-called aggressive caching
for DNSSEC-validated data (:rfc:`8198`), which improves performance and also protects
against some types of Random Subdomain Attacks.
.. _`cache_sizing`:
Sizing
------
For personal and small office use-cases cache size around 100 MB is more than enough.
For large deployments we recommend to run Knot Resolver on a dedicated machine,
and to allocate 90% of machine's free memory for resolver's cache.
.. note:: Choosing a cache size that can fit into RAM is important even if the
cache is stored on disk (default). Otherwise, the extra I/O caused by disk
access for missing pages can cause performance issues.
For example, imagine you have a machine with 16 GB of memory.
After machine restart you use command ``free -m`` to determine
amount of free memory (without swap):
.. code-block:: bash
$ free -m
total used free
Mem: 15907 979 14928
Now you can configure cache size to be 90% of the free memory 14 928 MB, i.e. 13 453 MB:
.. code-block:: lua
-- 90 % of free memory after machine restart
cache.size = 13453 * MB
It is also possible to set the cache size based on the file system size. This is useful
if you use a dedicated partition for cache (e.g. non-persistent tmpfs). It is recommended
to leave some free space for special files, such as locks.:
.. code-block:: lua
cache.size = cache.fssize() - 10*MB
.. note:: The `cache garbage collector <../deployment-advanced-no-systemd-processes.html#garbage-collector>`_
can be used to periodically trim the cache. It is enabled and configured by
default when running kresd with systemd integration.
.. _`cache_persistence`:
Persistence
-----------
.. tip:: Using tmpfs for cache improves performance and reduces disk I/O.
By default the cache is saved on a persistent storage device
so the content of the cache is persisted during system reboot.
This usually leads to smaller latency after restart etc.,
however in certain situations a non-persistent cache storage might be preferred, e.g.:
- Resolver handles high volume of queries and I/O performance to disk is too low.
- Threat model includes attacker getting access to disk content in power-off state.
- Disk has limited number of writes (e.g. flash memory in routers).
If non-persistent cache is desired configure cache directory to be on
tmpfs_ filesystem, a temporary in-memory file storage.
The cache content will be saved in memory, and thus have faster access
and will be lost on power-off or reboot.
.. note:: In most of the Unix-like systems ``/tmp`` and ``/var/run`` are
commonly mounted as tmpfs. While it is technically possible to move the
cache to an existing tmpfs filesystem, it is *not recommended*, since the
path to cache is configured in multiple places.
Mounting the cache directory as tmpfs_ is the recommended approach. Make sure
to use appropriate ``size=`` option and don't forget to adjust the size in the
config file as well.
.. code-block:: none
# /etc/fstab
tmpfs /var/cache/knot-resolver tmpfs rw,size=2G,uid=knot-resolver,gid=knot-resolver,nosuid,nodev,noexec,mode=0700 0 0
.. code-block:: lua
-- /etc/knot-resolver/kresd.conf
cache.size = cache.fssize() - 10*MB
.. _tmpfs: https://en.wikipedia.org/wiki/Tmpfs
Configuration reference
-----------------------
.. function:: cache.open(max_size[, config_uri])
:param number max_size: Maximum cache size in bytes.
:return: ``true`` if cache was opened
Open cache with a size limit. The cache will be reopened if already open.
Note that the max_size cannot be lowered, only increased due to how cache is implemented.
.. tip:: Use ``kB, MB, GB`` constants as a multiplier, e.g. ``100*MB``.
The URI ``lmdb://path`` allows you to change the cache directory.
Example:
.. code-block:: lua
cache.open(100 * MB, 'lmdb:///var/cache/knot-resolver')
.. envvar:: cache.size
Set the cache maximum size in bytes. Note that this is only a hint to the backend,
which may or may not respect it. See :func:`cache.open()`.
.. code-block:: lua
cache.size = 100 * MB -- equivalent to `cache.open(100 * MB)`
.. envvar:: cache.current_size
Get the maximum size in bytes.
.. code-block:: lua
print(cache.current_size)
.. envvar:: cache.storage
Set the cache storage backend configuration, see :func:`cache.backends()` for
more information. If the new storage configuration is invalid, it is not set.
.. code-block:: lua
cache.storage = 'lmdb://.'
.. envvar:: cache.current_storage
Get the storage backend configuration.
.. code-block:: lua
print(cache.current_storage)
.. function:: cache.backends()
:return: map of backends
.. note:: For now there is only one backend implementation, even though the APIs are ready for different (synchronous) backends.
The cache supports runtime-changeable backends, using the optional :rfc:`3986` URI, where the scheme
represents backend protocol and the rest of the URI backend-specific configuration. By default, it
is a ``lmdb`` backend in working directory, i.e. ``lmdb://``.
Example output:
.. code-block:: lua
[lmdb://] => true
.. function:: cache.count()
:return: Number of entries in the cache. Meaning of the number is an implementation detail and is subject of change.
.. function:: cache.close()
:return: ``true`` if cache was closed
Close the cache.
.. note:: This may or may not clear the cache, depending on the cache backend.
.. function:: cache.fssize()
:return: Partition size of cache storage.
.. function:: cache.stats()
Return table with low-level statistics for internal cache operation and storage.
This counts each access to cache and does not directly map to individual
DNS queries or resource records.
For query-level statistics see :ref:`stats module <mod-stats>`.
Example:
.. code-block:: lua
> cache.stats()
[clear] => 0
[close] => 0
[commit] => 117
[count] => 2
[count_entries] => 6187
[match] => 21
[match_miss] => 2
[open] => 0
[read] => 4313
[read_leq] => 9
[read_leq_miss] => 4
[read_miss] => 1143
[remove] => 17
[remove_miss] => 0
[usage_percent] => 15.625
[write] => 189
Cache operation `read_leq` (*read less or equal*, i.e. range search) was requested 9 times,
and 4 out of 9 operations were finished with *cache miss*.
Cache contains 6187 internal entries which occupy 15.625 % cache size.
.. function:: cache.max_ttl([ttl])
:param number ttl: maximum TTL in seconds (default: 1 day)
.. KR_CACHE_DEFAULT_TTL_MAX ^^
:return: current maximum TTL
Get or set upper TTL bound applied to all received records.
.. note:: The `ttl` value must be in range `(min_ttl, 2147483647)`.
.. code-block:: lua
-- Get maximum TTL
cache.max_ttl()
518400
-- Set maximum TTL
cache.max_ttl(172800)
172800
.. function:: cache.min_ttl([ttl])
:param number ttl: minimum TTL in seconds (default: 5 seconds)
.. KR_CACHE_DEFAULT_TTL_MIN ^^
:return: current minimum TTL
Get or set lower TTL bound applied to all received records.
Forcing TTL higher than specified violates DNS standards, so use higher values with care.
TTL still won't be extended beyond expiration of the corresponding DNSSEC signature.
.. note:: The `ttl` value must be in range `<0, max_ttl)`.
.. code-block:: lua
-- Get minimum TTL
cache.min_ttl()
0
-- Set minimum TTL
cache.min_ttl(5)
5
.. function:: cache.ns_tout([timeout])
:param number timeout: NS retry interval in milliseconds (default: :c:macro:`KR_NS_TIMEOUT_RETRY_INTERVAL`)
:return: current timeout
Get or set time interval for which a nameserver address will be ignored after determining that it doesn't return (useful) answers.
The intention is to avoid waiting if there's little hope; instead, kresd can immediately SERVFAIL or immediately use stale records (with :ref:`serve_stale <mod-serve_stale>` module).
.. warning:: This settings applies only to the current kresd process.
.. function:: cache.get([domain])
This function is not implemented at this moment.
We plan to re-introduce it soon, probably with a slightly different API.
.. function:: cache.clear([name], [exact_name], [rr_type], [chunk_size], [callback], [prev_state])
Purge cache records matching specified criteria. There are two specifics:
* To reliably remove **negative** cache entries you need to clear subtree with the whole zone. E.g. to clear negative cache entries for (formerly non-existing) record `www.example.com. A` you need to flush whole subtree starting at zone apex, e.g. `example.com.` [#]_.
* This operation is asynchronous and might not be yet finished when call to ``cache.clear()`` function returns. Return value indicates if clearing continues asynchronously or not.
:param string name: subtree to purge; if the name isn't provided, whole cache is purged
(and any other parameters are disregarded).
:param bool exact_name: if set to ``true``, only records with *the same* name are removed;
default: false.
:param kres.type rr_type: you may additionally specify the type to remove,
but that is only supported with ``exact_name == true``; default: nil.
:param integer chunk_size: the number of records to remove in one round; default: 100.
The purpose is not to block the resolver for long.
The default ``callback`` repeats the command after one millisecond
until all matching data are cleared.
:param function callback: a custom code to handle result of the underlying C call.
Its parameters are copies of those passed to `cache.clear()` with one additional
parameter ``rettable`` containing table with return value from current call.
``count`` field contains a return code from :func:`kr_cache_remove_subtree()`.
:param table prev_state: return value from previous run (can be used by callback)
:rtype: table
:return: ``count`` key is always present. Other keys are optional and their presence indicate special conditions.
* **count** *(integer)* - number of items removed from cache by this call (can be 0 if no entry matched criteria)
* **not_apex** - cleared subtree is not cached as zone apex; proofs of non-existence were probably not removed
* **subtree** *(string)* - hint where zone apex lies (this is estimation from cache content and might not be accurate)
* **chunk_limit** - more than ``chunk_size`` items needs to be cleared, clearing will continue asynchronously
Examples:
.. code-block:: lua
-- Clear whole cache
> cache.clear()
[count] => 76
-- Clear records at and below 'com.'
> cache.clear('com.')
[chunk_limit] => chunk size limit reached; the default callback will continue asynchronously
[not_apex] => to clear proofs of non-existence call cache.clear('com.')
[count] => 100
[round] => 1
[subtree] => com.
> worker.sleep(0.1)
[cache] asynchronous cache.clear('com', false) finished
-- Clear only 'www.example.com.'
> cache.clear('www.example.com.', true)
[round] => 1
[count] => 1
[not_apex] => to clear proofs of non-existence call cache.clear('example.com.')
[subtree] => example.com.
.. [#] This is a consequence of DNSSEC negative cache which relies on proofs of non-existence on various owner nodes. It is impossible to efficiently flush part of DNS zones signed with NSEC3.
/* Copyright (C) CZ.NIC, z.s.p.o. <knot-resolver@labs.nic.cz>
* SPDX-License-Identifier: GPL-3.0-or-later
*/
#include "daemon/bindings/impl.h"
#include <unistd.h>
#include <uv.h>
static void event_free(uv_timer_t *timer)
{
lua_State *L = the_engine->L;
int ref = (intptr_t) timer->data;
luaL_unref(L, LUA_REGISTRYINDEX, ref);
free(timer);
}
static void event_callback(uv_timer_t *timer)
{
lua_State *L = the_engine->L;
/* Retrieve callback and execute */
lua_rawgeti(L, LUA_REGISTRYINDEX, (intptr_t) timer->data);
lua_rawgeti(L, -1, 1);
lua_pushinteger(L, (intptr_t) timer->data);
int ret = execute_callback(L, 1);
/* Free callback if not recurrent or an error */
if (ret != 0 || (uv_timer_get_repeat(timer) == 0 && uv_is_active((uv_handle_t *)timer) == 0)) {
if (!uv_is_closing((uv_handle_t *)timer)) {
uv_close((uv_handle_t *)timer, (uv_close_cb) event_free);
}
}
}
static void event_fdcallback(uv_poll_t* handle, int status, int events)
{
lua_State *L = the_engine->L;
/* Retrieve callback and execute */
lua_rawgeti(L, LUA_REGISTRYINDEX, (intptr_t) handle->data);
lua_rawgeti(L, -1, 1);
lua_pushinteger(L, (intptr_t) handle->data);
lua_pushinteger(L, status);
lua_pushinteger(L, events);
int ret = execute_callback(L, 3);
/* Free callback if not recurrent or an error */
if (ret != 0) {
if (!uv_is_closing((uv_handle_t *)handle)) {
uv_close((uv_handle_t *)handle, (uv_close_cb) event_free);
}
}
}
static int event_sched(lua_State *L, unsigned timeout, unsigned repeat)
{
uv_timer_t *timer = malloc(sizeof(*timer));
if (!timer)
lua_error_p(L, "out of memory");
/* Start timer with the reference */
uv_loop_t *loop = uv_default_loop();
int ret = uv_timer_init(loop, timer);
if (ret != 0)
goto exit_err;
ret = uv_timer_start(timer, event_callback, timeout, repeat);
if (ret != 0)
goto exit_err;
/* Save callback and timer in registry */
lua_newtable(L);
lua_pushvalue(L, 2);
lua_rawseti(L, -2, 1);
lua_pushpointer(L, timer);
lua_rawseti(L, -2, 2);
int ref = luaL_ref(L, LUA_REGISTRYINDEX);
/* Save reference to the timer */
timer->data = (void *) (intptr_t)ref;
lua_pushinteger(L, ref);
return 1;
exit_err:
free(timer);
lua_error_p(L, "couldn't start the event");
}
static int event_after(lua_State *L)
{
/* Check parameters */
int n = lua_gettop(L);
if (n < 2 || !lua_isnumber(L, 1) || !lua_isfunction(L, 2))
lua_error_p(L, "expected 'after(number timeout, function)'");
return event_sched(L, lua_tointeger(L, 1), 0);
}
static int event_recurrent(lua_State *L)
{
/* Check parameters */
int n = lua_gettop(L);
if (n < 2 || !lua_isnumber(L, 1) || lua_tointeger(L, 1) == 0
|| !lua_isfunction(L, 2))
lua_error_p(L, "expected 'recurrent(number interval, function)'");
return event_sched(L, 0, lua_tointeger(L, 1));
}
static int event_cancel(lua_State *L)
{
int n = lua_gettop(L);
if (n < 1 || !lua_isnumber(L, 1))
lua_error_p(L, "expected 'cancel(number event)'");
/* Fetch event if it exists */
lua_rawgeti(L, LUA_REGISTRYINDEX, lua_tointeger(L, 1));
bool ok = lua_istable(L, -1);
/* Close the timer */
uv_handle_t **timer_pp = NULL;
if (ok) {
lua_rawgeti(L, -1, 2);
timer_pp = lua_touserdata(L, -1);
ok = timer_pp && *timer_pp;
/* That have been sufficient safety checks, hopefully. */
}
if (ok && !uv_is_closing(*timer_pp)) {
uv_close(*timer_pp, (uv_close_cb)event_free);
}
lua_pushboolean(L, ok);
return 1;
}
static int event_reschedule(lua_State *L)
{
int n = lua_gettop(L);
if (n < 2 || !lua_isnumber(L, 1) || !lua_isnumber(L, 2))
lua_error_p(L, "expected 'reschedule(number event, number timeout)'");
/* Fetch event if it exists */
lua_rawgeti(L, LUA_REGISTRYINDEX, lua_tointeger(L, 1));
bool ok = lua_istable(L, -1);
/* Reschedule the timer */
uv_handle_t **timer_pp = NULL;
if (ok) {
lua_rawgeti(L, -1, 2);
timer_pp = lua_touserdata(L, -1);
ok = timer_pp && *timer_pp;
/* That have been sufficient safety checks, hopefully. */
}
if (ok && !uv_is_closing(*timer_pp)) {
int ret = uv_timer_start((uv_timer_t *)*timer_pp,
event_callback, lua_tointeger(L, 2), 0);
if (ret != 0) {
uv_close(*timer_pp, (uv_close_cb)event_free);
ok = false;
}
}
lua_pushboolean(L, ok);
return 1;
}
static int event_fdwatch(lua_State *L)
{
/* Check parameters */
int n = lua_gettop(L);
if (n < 2 || !lua_isnumber(L, 1) || !lua_isfunction(L, 2))
lua_error_p(L, "expected 'socket(number fd, function)'");
uv_poll_t *handle = malloc(sizeof(*handle));
if (!handle)
lua_error_p(L, "out of memory");
/* Start timer with the reference */
int sock = lua_tointeger(L, 1);
uv_loop_t *loop = uv_default_loop();
int ret = uv_poll_init(loop, handle, sock);
if (ret == 0)
ret = uv_poll_start(handle, UV_READABLE, event_fdcallback);
if (ret != 0) {
free(handle);
lua_error_p(L, "couldn't start event poller");
}
/* Save callback and timer in registry */
lua_newtable(L);
lua_pushvalue(L, 2);
lua_rawseti(L, -2, 1);
lua_pushpointer(L, handle);
lua_rawseti(L, -2, 2);
int ref = luaL_ref(L, LUA_REGISTRYINDEX);
/* Save reference to the timer */
handle->data = (void *) (intptr_t)ref;
lua_pushinteger(L, ref);
return 1;
}
int kr_bindings_event(lua_State *L)
{
static const luaL_Reg lib[] = {
{ "after", event_after },
{ "recurrent", event_recurrent },
{ "cancel", event_cancel },
{ "socket", event_fdwatch },
{ "reschedule", event_reschedule },
{ NULL, NULL }
};
luaL_register(L, "event", lib);
return 1;
}
.. SPDX-License-Identifier: GPL-3.0-or-later
Timers and events reference
^^^^^^^^^^^^^^^^^^^^^^^^^^^
The timer represents exactly the thing described in the examples - it allows you to execute closures_
after specified time, or event recurrent events. Time is always described in milliseconds,
but there are convenient variables that you can use - ``sec, minute, hour``.
For example, ``5 * hour`` represents five hours, or 5*60*60*100 milliseconds.
.. function:: event.after(time, function)
:return: event id
Execute function after the specified time has passed.
The first parameter of the callback is the event itself.
Example:
.. code-block:: lua
event.after(1 * minute, function() print('Hi!') end)
.. function:: event.recurrent(interval, function)
:return: event id
Execute function immediately and then periodically after each ``interval``.
Example:
.. code-block:: lua
msg_count = 0
event.recurrent(5 * sec, function(e)
msg_count = msg_count + 1
print('Hi #'..msg_count)
end)
.. function:: event.reschedule(event_id, timeout)
Reschedule a running event, it has no effect on canceled events.
New events may reuse the event_id, so the behaviour is undefined if the function
is called after another event is started.
Example:
.. code-block:: lua
local interval = 1 * minute
event.after(1 * minute, function (ev)
print('Good morning!')
-- Halve the interval for each iteration
interval = interval / 2
event.reschedule(ev, interval)
end)
.. function:: event.cancel(event_id)
Cancel running event, it has no effect on already canceled events.
New events may reuse the event_id, so the behaviour is undefined if the function
is called after another event is started.
Example:
.. code-block:: lua
e = event.after(1 * minute, function() print('Hi!') end)
event.cancel(e)
Watch for file descriptor activity. This allows embedding other event loops or simply
firing events when a pipe endpoint becomes active. In another words, asynchronous
notifications for daemon.
.. function:: event.socket(fd, cb)
:param number fd: file descriptor to watch
:param cb: closure or callback to execute when fd becomes active
:return: event id
Execute function when there is activity on the file descriptor and calls a closure
with event id as the first parameter, status as second and number of events as third.
Example:
.. code-block:: lua
e = event.socket(0, function(e, status, nevents)
print('activity detected')
end)
e.cancel(e)
Asynchronous function execution
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The `event` package provides a very basic mean for non-blocking execution - it allows running code when activity on a file descriptor is detected, and when a certain amount of time passes. It doesn't however provide an easy to use abstraction for non-blocking I/O. This is instead exposed through the `worker` package (if `cqueues` Lua package is installed in the system).
.. function:: worker.coroutine(function)
Start a new coroutine with given function (closure). The function can do I/O or run timers without blocking the main thread. See cqueues_ for documentation of possible operations and synchronization primitives. The main limitation is that you can't wait for a finish of a coroutine from processing layers, because it's not currently possible to suspend and resume execution of processing layers.
Example:
.. code-block:: lua
worker.coroutine(function ()
for i = 0, 10 do
print('executing', i)
worker.sleep(1)
end
end)
.. function:: worker.sleep(seconds)
Pause execution of current function (asynchronously if running inside a worker coroutine).
Example:
.. code-block:: lua
function async_print(testname, sleep)
log(testname .. ': system time before sleep' .. tostring(os.time())
worker.sleep(sleep) -- other coroutines continue execution now
log(testname .. ': system time AFTER sleep' .. tostring(os.time())
end
worker.coroutine(function() async_print('call #1', 5) end)
worker.coroutine(function() async_print('call #2', 3) end)
Output from this example demonstrates that both calls to function ``async_print`` were executed asynchronously:
.. code-block:: none
call #2: system time before sleep 1578065073
call #1: system time before sleep 1578065073
call #2: system time AFTER sleep 1578065076
call #1: system time AFTER sleep 1578065078
/* Copyright (C) CZ.NIC, z.s.p.o. <knot-resolver@labs.nic.cz>
* SPDX-License-Identifier: GPL-3.0-or-later
*/
#include <dirent.h>
#include <lua.h>
#include <lauxlib.h>
#include <string.h>
const char * lua_table_checkindices(lua_State *L, const char *keys[])
{
/* Iterate over table at the top of the stack.
* http://www.lua.org/manual/5.1/manual.html#lua_next */
for (lua_pushnil(L); lua_next(L, -2); lua_pop(L, 1)) {
lua_pop(L, 1); /* we don't need the value */
/* We need to copy the key, as _tostring() confuses _next().
* https://www.lua.org/manual/5.1/manual.html#lua_tolstring */
lua_pushvalue(L, -1);
const char *key = lua_tostring(L, -1);
if (!key)
return "<NON-STRING_INDEX>";
for (const char **k = keys; ; ++k) {
if (*k == NULL)
return key;
if (strcmp(*k, key) == 0)
break;
}
}
return NULL;
}
/** Return table listing filenames in a given directory (ls -A). */
static int kluautil_list_dir(lua_State *L)
{
lua_newtable(L); // empty table even on errors
const char *path = lua_tolstring(L, 1, NULL);
if (!path) return 1;
DIR *dir = opendir(path);
if (!dir) return 1;
struct dirent *entry;
int lua_i = 1;
while ((entry = readdir(dir)) != NULL) {
if (strcmp(entry->d_name, ".") != 0 && strcmp(entry->d_name, "..") != 0) {
lua_pushstring(L, entry->d_name);
lua_rawseti(L, -2, lua_i++);
}
}
closedir(dir);
return 1;
}
/* Each of these just creates the correspondingly named lua table of functions. */
int kr_bindings_cache (lua_State *L); /* ./cache.c */
int kr_bindings_event (lua_State *L); /* ./event.c */
int kr_bindings_modules (lua_State *L); /* ./modules.c */
int kr_bindings_net (lua_State *L); /* ./net.c */
int kr_bindings_worker (lua_State *L); /* ./worker.c */
void kr_bindings_register(lua_State *L)
{
kr_bindings_cache(L);
kr_bindings_event(L);
kr_bindings_modules(L);
kr_bindings_net(L);
kr_bindings_worker(L);
/* Finally some lua utils *written in C*, not really a binding. */
lua_register(L, "kluautil_list_dir", kluautil_list_dir);
}
void lua_error_p(lua_State *L, const char *fmt, ...)
{
/* Add a stack trace and throw the result as a lua error. */
luaL_traceback(L, L, "error occurred here (config filename:lineno is at the bottom, if config is involved):", 0);
/* Push formatted custom message, prepended with "ERROR: ". */
lua_pushliteral(L, "\nERROR: ");
{
va_list args;
va_start(args, fmt);
lua_pushvfstring(L, fmt, args);
va_end(args);
}
lua_concat(L, 3);
lua_error(L);
/* TODO: we might construct a little more friendly trace by using luaL_where().
* In particular, in case the error happens in a function that was called
* directly from a config file (the most common case), there isn't much need
* to format the trace in this heavy way. */
}