diff --git a/Knot.files b/Knot.files
index 613f302748008d6923e4cbad06aebf36d947eba3..2449661075f2dd07d118e79716fe108e4bb6cc97 100644
--- a/Knot.files
+++ b/Knot.files
@@ -1,3 +1,5 @@
+Doxyfile
+KNOWN_ISSUES
 Makefile.am
 README
 configure.ac
@@ -45,9 +47,6 @@ src/common-knot/print.c
 src/common-knot/print.h
 src/common-knot/ref.c
 src/common-knot/ref.h
-src/common-knot/slab/alloc-common.h
-src/common-knot/slab/slab.c
-src/common-knot/slab/slab.h
 src/common-knot/sockaddr.c
 src/common-knot/sockaddr.h
 src/common-knot/strlcat.c
@@ -179,10 +178,18 @@ src/knot/worker/queue.c
 src/knot/worker/queue.h
 src/knot/zone/contents.c
 src/knot/zone/contents.h
+src/knot/zone/events/events.c
+src/knot/zone/events/events.h
+src/knot/zone/events/handlers.c
+src/knot/zone/events/handlers.h
+src/knot/zone/events/replan.c
+src/knot/zone/events/replan.h
 src/knot/zone/node.c
 src/knot/zone/node.h
 src/knot/zone/semantic-check.c
 src/knot/zone/semantic-check.h
+src/knot/zone/timers.c
+src/knot/zone/timers.h
 src/knot/zone/zone-diff.c
 src/knot/zone/zone-diff.h
 src/knot/zone/zone-dump.c
@@ -199,14 +206,6 @@ src/knot/zone/zonedb.c
 src/knot/zone/zonedb.h
 src/knot/zone/zonefile.c
 src/knot/zone/zonefile.h
-src/knot/zone/events/events.c
-src/knot/zone/events/events.h
-src/knot/zone/events/handlers.h
-src/knot/zone/events/handlers.c
-src/knot/zone/events/replan.c
-src/knot/zone/events/replan.h
-src/knot/zone/timers.c
-src/knot/zone/timers.h
 src/libknot/binary.c
 src/libknot/binary.h
 src/libknot/common.h
@@ -350,7 +349,7 @@ tests/wire.c
 tests/worker_pool.c
 tests/worker_queue.c
 tests/zone_events.c
+tests/zone_timers.c
 tests/zone_update.c
 tests/zonedb.c
 tests/ztree.c
-tests/zone_timers.c
diff --git a/src/Makefile.am b/src/Makefile.am
index 32e2a9c85a5845b6cab98779d5c9ab8c314988ad..a3166df85e7706349a4dc676308437446fb8dc94 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -85,9 +85,6 @@ libknots_la_SOURCES =				\
 	common-knot/print.h			\
 	common-knot/ref.c			\
 	common-knot/ref.h			\
-	common-knot/slab/alloc-common.h		\
-	common-knot/slab/slab.c			\
-	common-knot/slab/slab.h			\
 	common-knot/sockaddr.c			\
 	common-knot/sockaddr.h			\
 	common-knot/strlcat.c			\
diff --git a/src/common-knot/slab/alloc-common.h b/src/common-knot/slab/alloc-common.h
deleted file mode 100644
index 84b7a4e580dc24a88e383e48a964ed6b7f57011a..0000000000000000000000000000000000000000
--- a/src/common-knot/slab/alloc-common.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*  Copyright (C) 2011 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
-
-    This program is free software: you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation, either version 3 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-/*!
- * \file alloc-common.h
- *
- * \author Lubos Slovak <lubos.slovak@nic.cz>
- *
- * \brief Common macros for alloc.
- *
- * \addtogroup alloc
- * @{
- */
-
-#pragma once
-
-#include <stdio.h>
-
-//#define MEM_DEBUG
-//#define MEM_NOSLAB
-//#define MEM_POISON
-#define MEM_SLAB_CAP 5   // Cap slab_cache empty slab count (undefined = inf)
-#define MEM_COLORING       // Slab cache coloring
-//#define MEM_SLAB_DEPOT     // Use slab depot for slab caching (not thread-safe)
-
-/* Eliminate compiler warning with unused parameters. */
-#ifndef UNUSED
-#define UNUSED(param) (void)(param)
-#endif
-
-/* Optimisation macros. */
-#ifndef knot_likely
-#define knot_likely(x)       __builtin_expect((x),1)
-#endif
-#ifndef knot_unlikely
-#define knot_unlikely(x)     __builtin_expect((x),0)
-#endif
-
-#ifdef MEM_DEBUG
-#define dbg_mem(msg...) fprintf(stderr, msg)
-#else
-#define dbg_mem(msg...)
-#endif
-
-/*! @} */
diff --git a/src/common-knot/slab/slab.c b/src/common-knot/slab/slab.c
deleted file mode 100644
index 5b1f20bc465cac2e8464054a6110e1fc7a6c8da0..0000000000000000000000000000000000000000
--- a/src/common-knot/slab/slab.c
+++ /dev/null
@@ -1,513 +0,0 @@
-/*  Copyright (C) 2011 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
-
-    This program is free software: you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation, either version 3 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program.  If not, see <http://www.gnu.org/licenses/>.
-*/
-
-#include <stdio.h>
-#include <unistd.h>
-#include <string.h>
-#include <assert.h>
-#include <stdlib.h>
-#include <sys/mman.h>
-
-#include "common-knot/slab/alloc-common.h"
-#include "common-knot/slab/slab.h"
-
-/*
- * Magic constants.
- */
-#define SLAB_MAGIC    0x51  /*!< "Sl" magic byte (slab type). */
-#define POISON_DWORD  0xdeadbeef /*!< Memory boundary guard magic. */
-#define SLAB_MINCOLOR 64 /*!< Minimum space reserved for cache coloring. */
-
-
-/*! \brief Return binary logarithm of a number, which is a power of 2. */
-static inline unsigned fastlog2(unsigned v)
-{
-	// Works if we know the size is a power of 2
-	register unsigned int r = (v & 0xAAAAAAAA) != 0;
-	r |= ((v & 0xFFFF0000) != 0) << 4;
-	r |= ((v & 0xFF00FF00) != 0) << 3;
-	r |= ((v & 0xF0F0F0F0) != 0) << 2;
-	r |= ((v & 0xCCCCCCCC) != 0) << 1;
-	return r;
-}
-
-/*
- * Slab run-time constants.
- */
-
-size_t SLAB_SZ = 0; /*!< Slab size. */
-size_t SLAB_MASK = 0; /*!< \brief Slab address mask (for computing offsets). */
-
-/*!
- * Depot is a caching sub-allocator of slabs.
- * It mitigates performance impact of sequentially allocating and freeing
- * from a slab with just a few slab items by caching N slabs before returning
- * them to the system.
- *
- */
-#ifdef MEM_SLAB_DEPOT
-static slab_depot_t _depot_g; /*! \brief Global slab depot. */
-#endif // MEM_SLAB_DEPOT
-
-/*!
- * \brief Allocate a slab of given bufsize from depot.
- *
- * \retval Reserved memory for slab on success.
- * \retval NULL on errors.
- */
-static void* slab_depot_alloc(size_t bufsize)
-{
-	void *page = 0;
-#ifdef MEM_SLAB_DEPOT
-	if (_depot_g.available) {
-		for (int i = _depot_g.available - 1; i > -1 ; --i) {
-			if(_depot_g.cache[i]->bufsize == bufsize) {
-				page = _depot_g.cache[i];
-				_depot_g.cache[i] = _depot_g.cache[--_depot_g.available];
-				return page;
-			}
-		}
-		page = _depot_g.cache[--_depot_g.available];
-	} else {
-		if(posix_memalign(&page, SLAB_SIZE, SLAB_SIZE) == 0) {
-			((slab_t*)page)->bufsize = 0;
-		} else {
-			page = 0;
-		}
-
-	}
-#else // MEM_SLAB_DEPOT
-	UNUSED(bufsize);
-	if(posix_memalign(&page, SLAB_SZ, SLAB_SZ) == 0) {
-		((slab_t*)page)->bufsize = 0;
-	} else {
-		page = 0;
-	}
-#endif // MEM_SLAB_DEPOT
-	UNUSED(bufsize);
-
-	return page;
-}
-
-/*!
- * \brief Return a slab to the depot.
- *
- * \note If the depot is full, slab gets immediately freed.
- */
-static inline void slab_depot_free(void* slab)
-{
-#ifdef MEM_SLAB_DEPOT
-	if (_depot_g.available < SLAB_DEPOT_SIZE) {
-		_depot_g.cache[_depot_g.available++] = slab;
-	} else {
-		free(slab);
-	}
-#else // MEM_SLAB_DEPOT
-    free(slab);
-#endif // MEM_SLAB_DEPOT
-}
-
-/*! \brief Initialize slab depot. */
-static void slab_depot_init()
-{
-#ifdef MEM_SLAB_DEPOT
-	_depot_g.available = 0;
-#endif // MEM_SLAB_DEPOT
-}
-
-/*! \brief Destroy slab depot. */
-static void slab_depot_destroy()
-{
-#ifdef MEM_SLAB_DEPOT
-	while(_depot_g.available) {
-		free(_depot_g.cache[--_depot_g.available]);
-	}
-#endif // MEM_SLAB_DEPOT
-}
-
-/*
- * Initializers.
- */
-
-/*! \brief Initializes slab subsystem (it is called automatically). */
-void __attribute__ ((constructor)) slab_init()
-{
-	long slab_size = sysconf(_SC_PAGESIZE);
-	if (slab_size < 0) {
-		slab_size = SLAB_MINSIZE;
-	}
-
-	// Fetch page size
-	SLAB_SZ = (size_t)slab_size;
-	unsigned slab_logsz = fastlog2(SLAB_SZ);
-
-	// Compute slab page mask
-	SLAB_MASK = 0;
-	for (unsigned i = 0; i < slab_logsz; ++i) {
-		SLAB_MASK |= 1 << i;
-	}
-	SLAB_MASK = ~SLAB_MASK;
-
-	// Initialize depot
-	slab_depot_init();
-}
-
-/*! \brief Deinitializes slab subsystem (it is called automatically). */
-void __attribute__ ((destructor)) slab_deinit()
-{
-	// Deinitialize depot
-	if (SLAB_MASK) {
-		slab_depot_destroy();
-	}
-}
-
-/*
- * Cache helper functions.
- */
-
-/* \note Not used right now.
-static void slab_dump(slab_t* slab) {
-
-	printf("%s: buffers (bufsize=%zuB, %u/%u free): \n",
-	       __func__, slab->cache->bufsize, slab->bufs_free,
-	       slab->bufs_count);
-
-	void** buf = slab->head;
-	int i = 0, n = 0;
-	while(buf != 0) {
-		size_t diff = (size_t)((char*)buf - (char*)slab->base);
-		printf("-> %lu", diff / slab->cache->bufsize);
-		buf = (void**)(*buf);
-		if (++i == 10) {
-			printf("\n");
-			i = 0;
-		}
-		++n;
-	}
-
-	printf("\n");
-}
-*/
-
-/*!
- * \brief Free all slabs from a slab cache.
- * \return Number of freed slabs.
- */
-static inline int slab_cache_free_slabs(slab_t* slab)
-{
-	int count = 0;
-	while (slab) {
-		slab_t* next = slab->next;
-		slab_destroy(&slab);
-		++count;
-		slab = next;
-
-	}
-	return count;
-}
-
-/*
- * Slab helper functions.
- */
-
-/*! \brief Remove slab from a linked list. */
-static void slab_list_remove(slab_t* slab)
-{
-	// Disconnect from list
-	if (slab->prev) {
-		slab->prev->next = slab->next;
-	}
-	if(slab->next) {
-		slab->next->prev = slab->prev;
-	}
-
-	// Disconnect from cache
-	slab_cache_t* cache = slab->cache;
-	{
-		if (cache->slabs_free == slab) {
-			cache->slabs_free = slab->next;
-		} else if (cache->slabs_full == slab) {
-			cache->slabs_full = slab->next;
-		}
-	}
-}
-
-/*! \brief Insert slab into a linked list. */
-static void slab_list_insert(slab_t** list, slab_t* item)
-{
-	// If list exists, push to the top
-	item->prev = 0;
-	item->next = *list;
-	if(*list) {
-		(*list)->prev = item;
-	}
-	*list = item;
-}
-
-/*! \brief Move slab from one linked list to another. */
-static inline void slab_list_move(slab_t** target, slab_t* slab)
-{
-	slab_list_remove(slab);
-	slab_list_insert(target, slab);
-}
-
-/*
- * API functions.
- */
-
-slab_t* slab_create(slab_cache_t* cache)
-{
-	const size_t size = SLAB_SZ;
-
-	slab_t* slab = slab_depot_alloc(cache->bufsize);
-
-	if (knot_unlikely(slab == 0)) {
-		dbg_mem("%s: failed to allocate aligned memory block\n",
-		          __func__);
-		return 0;
-	}
-
-	/* Initialize slab. */
-	slab->magic = SLAB_MAGIC;
-	slab->cache = cache;
-	slab_list_insert(&cache->slabs_free, slab);
-#ifdef MEM_SLAB_CAP
-	++cache->empty;
-#endif
-
-	/* Already initialized? */
-	if (slab->bufsize == cache->bufsize) {
-		return slab;
-	} else {
-		slab->bufsize = cache->bufsize;
-	}
-
-	/* Ensure the item size can hold at least a size of ptr. */
-	size_t item_size = slab->bufsize;
-	if (knot_unlikely(item_size < SLAB_MIN_BUFLEN)) {
-		item_size = SLAB_MIN_BUFLEN;
-	}
-
-	/* Ensure at least some space for coloring */
-	size_t data_size = size - sizeof(slab_t);
-#ifdef MEM_COLORING
-	size_t free_space = data_size % item_size;
-	if (knot_unlikely(free_space < SLAB_MINCOLOR)) {
-		free_space = SLAB_MINCOLOR;
-	}
-
-
-	/// unsigned short color = __sync_fetch_and_add(&cache->color, 1);
-	unsigned short color = (cache->color += sizeof(void*));
-	color = color % free_space;
-#else
-	const unsigned short color = 0;
-#endif
-
-	/* Calculate useable data size */
-	data_size -= color;
-	slab->bufs_count = data_size / item_size;
-	slab->bufs_free = slab->bufs_count;
-
-	// Save first item as next free
-	slab->base = (char*)slab + sizeof(slab_t) + color;
-	slab->head = (void**)slab->base;
-
-	// Create freelist, skip last member, which is set to NULL
-	char* item = (char*)slab->head;
-	for(unsigned i = 0; i < slab->bufs_count - 1; ++i) {
-		*((void**)item) = item + item_size;
-		item += item_size;
-	}
-
-	// Set last buf to NULL (tail)
-	*((void**)item) = (void*)0;
-
-	// Ensure the last item has a NULL next
-	dbg_mem("%s: created slab (%p, %p) (%zu B)\n",
-	          __func__, slab, slab + size, size);
-	return slab;
-}
-
-void slab_destroy(slab_t** slab)
-{
-	/* Disconnect from the list */
-	slab_list_remove(*slab);
-
-	/* Free slab */
-	slab_depot_free(*slab);
-
-	/* Invalidate pointer. */
-	dbg_mem("%s: deleted slab %p\n", __func__, *slab);
-	*slab = 0;
-}
-
-void* slab_alloc(slab_t* slab)
-{
-	// Fetch first free item
-	void **item = 0;
-	{
-		if((item = slab->head)) {
-			slab->head = (void**)*item;
-			--slab->bufs_free;
-		} else {
-			// No more free items
-			return 0;
-		}
-	}
-
-#ifdef MEM_DEBUG
-	// Increment statistics
-	__sync_add_and_fetch(&slab->cache->stat_allocs, 1);
-#endif
-
-	// Move to full?
-	if (knot_unlikely(slab->bufs_free == 0)) {
-		slab_list_move(&slab->cache->slabs_full, slab);
-	} else {
-#ifdef MEM_SLAB_CAP
-		// Mark not empty?
-		if (knot_unlikely(slab->bufs_free == slab->bufs_count - 1)) {
-			--slab->cache->empty;
-		}
-#endif
-	}
-
-	return item;
-}
-
-void slab_free(void* ptr)
-{
-	// Null pointer check
-	if (knot_unlikely(!ptr)) {
-		return;
-	}
-
-	// Get slab start address
-	slab_t* slab = slab_from_ptr(ptr);
-	assert(slab);
-
-	// Check if it exists in directory
-	if (slab->magic == SLAB_MAGIC) {
-
-		// Return buf to slab
-		*((void**)ptr) = (void*)slab->head;
-		slab->head = (void**)ptr;
-		++slab->bufs_free;
-
-#ifdef MEM_DEBUG
-		// Increment statistics
-		__sync_add_and_fetch(&slab->cache->stat_frees, 1);
-#endif
-
-		// Return to partial
-		if(knot_unlikely(slab->bufs_free == 1)) {
-			slab_list_move(&slab->cache->slabs_free, slab);
-		} else {
-#ifdef MEM_SLAB_CAP
-		// Recycle if empty
-			if(knot_unlikely(slab_isempty(slab))) {
-				if(slab->cache->empty == MEM_SLAB_CAP) {
-					slab_destroy(&slab);
-				} else {
-					++slab->cache->empty;
-				}
-			}
-#endif
-		}
-
-	} else {
-
-		// Pointer is not a slab
-		// Presuming it's a large block
-		slab_obj_t* bs = (slab_obj_t*)ptr - 1;
-
-#ifdef MEM_POISON
-		// Remove memory barrier
-		mprotect(ptr + bs->size, sizeof(int), PROT_READ|PROT_WRITE);
-#endif
-
-		// Unmap
-		dbg_mem("%s: unmapping large block of %zu bytes at %p\n",
-		          __func__, bs->size, ptr);
-		free(bs);
-	}
-}
-
-int slab_cache_init(slab_cache_t* cache, size_t bufsize)
-{
-	if (knot_unlikely(!bufsize)) {
-		return -1;
-	}
-
-	memset(cache, 0, sizeof(slab_cache_t));
-	cache->bufsize = bufsize;
-	dbg_mem("%s: created cache of size %zu\n",
-	          __func__, bufsize);
-
-	return 0;
-}
-
-void slab_cache_destroy(slab_cache_t* cache) {
-
-	// Free slabs
-	unsigned free_s = slab_cache_free_slabs(cache->slabs_free);
-	unsigned full_s = slab_cache_free_slabs(cache->slabs_full);
-#ifndef MEM_DEBUG
-	UNUSED(free_s);
-	UNUSED(full_s);
-#else
-	dbg_mem("%s: %u empty/partial, %u full caches\n",
-	          __func__, free_s, full_s);
-#endif
-
-	// Invalidate cache
-	cache->bufsize = 0;
-	cache->slabs_free = cache->slabs_full = 0;
-}
-
-void* slab_cache_alloc(slab_cache_t* cache)
-{
-	slab_t* slab = cache->slabs_free;
-	if(!cache->slabs_free) {
-		slab = slab_create(cache);
-		if (slab == NULL) {
-			return NULL;
-		}
-	}
-
-
-	return slab_alloc(slab);
-}
-
-int slab_cache_reap(slab_cache_t* cache)
-{
-	// For now, just free empty slabs
-	slab_t* slab = cache->slabs_free;
-	int count = 0;
-	while (slab) {
-		slab_t* next = slab->next;
-		if (slab_isempty(slab)) {
-			slab_destroy(&slab);
-			++count;
-		}
-		slab = next;
-
-	}
-
-	cache->empty = 0;
-	return count;
-}
diff --git a/src/common-knot/slab/slab.h b/src/common-knot/slab/slab.h
deleted file mode 100644
index 582a3f44bf02237e1d423626179ba55fb2fd2587..0000000000000000000000000000000000000000
--- a/src/common-knot/slab/slab.h
+++ /dev/null
@@ -1,275 +0,0 @@
-/*  Copyright (C) 2011 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
-
-    This program is free software: you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation, either version 3 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-/*!
- * \file slab.h
- *
- * \author Marek Vavrusa <marek.vavusa@nic.cz>
- *
- * \brief SLAB allocator.
- *
- * SLAB cache works with either custom SLAB sizes and
- * Next-Highest-Power-Of-2 sizes.
- *
- * Slab size is a multiple of PAGE_SIZE and uses
- * system allocator for larger blocks.
- *
- * Allocated SLABs are PAGE_SIZE aligned for a fast O(1)
- * address-from-item lookup. This results in nearly none memory
- * overhead for a very small blocks (<64B), but it requires the
- * underlying allocator to be effective in allocating page size aligned memory
- * as well. The major disadvantage is that each Slab must be aligned to it's
- * size as opposed to boundary tags.
- *
- * Slab implements simple coloring mechanism to improve
- * cache line utilisation.
- *
- * \ref SLAB_SIZE is a fixed size of a slab. As a rule of thumb, the slab is
- * effective when the maximum allocated block size is below 1/4 of a SLAB_SIZE.
- * f.e. 16kB SLAB is most effective up to 4kB block size.
- *
- * \ref MEM_POISON flag enables checking read/writes after the allocated memory
- * and segmentation fault. This poses a significant time and space overhead.
- * Enable only when debugging.
- *
- * \ref MEM_SLAB_CAP defines a maximum limit of a number of empty slabs that a cache
- * can keep at a time. This results in a slight performance regression,
- * but actively recycles unuse memory.
- *
- * \ref MEM_DEPOT_COUNT defines how many recycled slabs will be cached for a later
- * use instead of returning them immediately to the OS. This significantly
- * reduces a number of syscalls in some cases.
- * f.e. 16 means 16 * SLAB_SIZE cache, for 16kB slabs = 256kB cache
- *
- * \ref MEM_COLORING enables simple cache coloring. This is generally a useful
- *      feature since all slabs are page size aligned and
- *      (depending on architecture) this slightly improves performance
- *      and cacheline usage at the cost of a minimum of 64 bytes per slab of
- *      overhead. Undefine MEM_COLORING in common.h to disable coloring.
- *
- * Optimal usage for a specific behavior (similar allocation sizes):
- * \code
- * slab_cache_t cache;
- * slab_cache_init(&cache, N); // Initialize, N means cache chunk size
- * ...
- * void* mem = slab_cache_alloc(&cache); // Allocate N bytes
- * ...
- * slab_free(mem); // Recycle memory
- * ...
- * slab_cache_destroy(&cache); // Deinitialize cache
- * \endcode
- *
- *
- * \todo Allocate slab headers elsewhere and use just first sizeof(void*) bytes
- *       in each slab as a pointer to slab header. This could improve the
- *       performance (issue #1583).
- *
- * \note Slab allocation is not thread safe for performance reasons.
- *
- * \addtogroup alloc
- * @{
- */
-
-#pragma once
-
-#include <pthread.h>
-#include <stdint.h>
-
-/* Constants. */
-#define SLAB_MINSIZE 4096  //!< Slab minimal size (4K blocks)
-#define SLAB_MIN_BUFLEN 8  //!< Minimal allocation block size is 8B.
-#define SLAB_DEPOT_SIZE 16 //!< N slabs cached = N*SLAB_SIZE kB cap
-struct slab_cache_t;
-extern size_t SLAB_MASK;
-
-/* Macros. */
-
-/*! \brief Return slab base address from pointer. */
-#define slab_from_ptr(p) ((void*)((size_t)(p) & SLAB_MASK))
-
-/*! \brief Return true if slab is empty. */
-#define slab_isempty(s) ((s)->bufs_free == (s)->bufs_count)
-
-/*!
- * \brief Slab descriptor.
- *
- * Slab is a block of memory used for an allocation of
- * smaller objects (bufs) later on.
- * Each slab is currently aligned to page size to easily
- * determine slab address from buf pointer.
- *
- * \warning Do not use slab_t directly as it cannot grow, see slab_cache_t.
- */
-typedef struct slab_t {
-	char magic;                 /*!< Identifies memory block type. */
-	unsigned short bufsize;     /*!< Slab bufsize. */
-	struct slab_cache_t *cache; /*!< Owner cache. */
-	struct slab_t *prev, *next; /*!< Neighbours in slab lists. */
-	unsigned bufs_count;        /*!< Number of bufs in slab. */
-	unsigned bufs_free;         /*!< Number of available bufs. */
-	void **head;                /*!< Pointer to first available buf. */
-	char* base;                 /*!< Base address for bufs. */
-} slab_t;
-
-/*!
- * \brief Slab depot.
- *
- * To mitigate slab initialization costs, depot keeps a finite number of
- * stacked slabs before returning them to the system.
- */
-typedef struct slab_depot_t {
-	size_t available;               /*!< Number of available pages. */
-	slab_t* cache[SLAB_DEPOT_SIZE]; /*!< Stack of free slabs. */
-} slab_depot_t;
-
-/*!
- * \brief Large object descriptor.
- *
- * Large object differs from slab with magic byte and
- * contains object size.
- *
- * Magic needs to be first to overlap with slab_t magic byte.
- */
-typedef struct slab_obj_t {
-	char magic;  /*!< Identifies memory block type. */
-	size_t size; /*!< Object size. */
-} slab_obj_t;
-
-/*!
- * \brief Slab cache descriptor.
- *
- * Slab cache is a list of 0..n slabs with the same buf size.
- * It is responsible for slab state keeping.
- *
- * Once a slab is created, it is moved to free list.
- * When it is full, it is moved to full list.
- * Once a buf from full slab is freed, the slab is moved to
- * free list again (there may be some hysteresis for mitigating
- * a sequential alloc/free).
- *
- * Allocation of new slabs is on-demand, empty slabs are reused if possible.
- *
- * \note Slab implementation is different from Bonwick (Usenix 2001)
- *       http://www.usenix.org/event/usenix01/bonwick.html
- *       as it doesn't feature empty and partial list.
- *       This is due to fact, that user space allocator rarely
- *       needs to count free slabs. There is no way the OS could
- *       notify the application, that the memory is scarce.
- *       A slight performance increased is measured in benchmark.
- *
- * \note Statistics are only available if MEM_DEBUG is enabled.
- */
-typedef struct slab_cache_t {
-	unsigned short color;    /*!< Current cache color. */
-	unsigned short empty;    /*!< Number of empty slabs. */
-	size_t bufsize;          /*!< Cache object (buf) size. */
-	slab_t *slabs_free;      /*!< List of free slabs. */
-	slab_t *slabs_full;      /*!< List of full slabs. */
-
-	/* Statistics. */
-	unsigned long stat_allocs; /*!< Allocation count. */
-	unsigned long stat_frees;  /*!< Free count. */
-} slab_cache_t;
-
-/*!
- * \brief Create a slab of predefined size.
- *
- * At the moment, slabs are equal to page size and page size aligned.
- * This enables quick and efficient buf to slab lookup by pointer arithmetic.
- *
- * Slab uses simple coloring scheme with and the memory block is always
- * sizeof(void*) aligned.
- *
- * \param cache Parent cache.
- * \retval Slab instance on success.
- * \retval NULL on error.
- */
-slab_t* slab_create(slab_cache_t* cache);
-
-/*!
- * \brief Destroy slab instance.
- *
- * Slab is disconnected from any list and freed.
- * Dereferenced slab parameter is set to NULL.
- *
- * \param slab Pointer to given slab.
- */
-void slab_destroy(slab_t** slab);
-
-/*!
- * \brief Allocate a buf from slab.
- *
- * Returns a pointer to allocated memory or NULL on error.
- *
- * \param slab Given slab instance.
- * \retval Pointer to allocated memory.
- * \retval NULL on error.
- */
-void* slab_alloc(slab_t* slab);
-
-/*!
- * \brief Recycle memory.
- *
- * Given memory is returned to owner slab.
- * Memory content may be rewritten.
- *
- * \param ptr Returned memory.
- */
-void slab_free(void* ptr);
-
-/*!
- * \brief Create a slab cache.
- *
- * Create a slab cache with no allocated slabs.
- * Slabs are allocated on-demand.
- *
- * \param cache Pointer to uninitialized cache.
- * \param bufsize Single item size for later allocs.
- * \retval 0 on success.
- * \retval -1 on error;
- */
-int slab_cache_init(slab_cache_t* cache, size_t bufsize);
-
-/*!
- * \brief Destroy a slab cache.
- *
- * Destroy a slab cache and all associated slabs.
- *
- * \param cache Pointer to slab cache.
- */
-void slab_cache_destroy(slab_cache_t* cache);
-
-/*!
- * \brief Allocate from the cache.
- *
- * It tries to use partially free caches first,
- * empty caches second and allocates a new cache
- * as a last resort.
- *
- * \param cache Given slab cache.
- * \retval Pointer to allocated memory.
- * \retval NULL on error.
- */
-void* slab_cache_alloc(slab_cache_t* cache);
-
-/*!
- * \brief Free unused slabs from cache.
- *
- * \param cache Given slab cache.
- * \return Number of freed slabs.
- */
-int slab_cache_reap(slab_cache_t* cache);
-
-/*! @} */
diff --git a/src/knot/zone/events/events.c b/src/knot/zone/events/events.c
index 0a6821002a72ed66a9991c39b4db7979b4c72423..0a1968033e2aad24766ce859a4418412bf5d223b 100644
--- a/src/knot/zone/events/events.c
+++ b/src/knot/zone/events/events.c
@@ -398,7 +398,7 @@ time_t zone_events_get_next(const struct zone_t *zone, zone_event_type_t *type)
 	return next_time;
 }
 
-void zone_events_update(zone_t *zone, const zone_t *old_zone)
+void zone_events_update(zone_t *zone, zone_t *old_zone)
 {
 	replan_events(zone, old_zone);
 }
diff --git a/src/knot/zone/events/events.h b/src/knot/zone/events/events.h
index 71cf7845f6aa3d039e6de3a7133d4b2285569c13..5bb27fe9acfd05169481b99306665e3f22855b64 100644
--- a/src/knot/zone/events/events.h
+++ b/src/knot/zone/events/events.h
@@ -176,7 +176,7 @@ time_t zone_events_get_next(const struct zone_t *zone, zone_event_type_t *type);
  * \param zone      Zone with new config.
  * \param old_zone  Zone with old config.
  */
-void zone_events_update(struct zone_t *zone, const struct zone_t *old_zone);
+void zone_events_update(struct zone_t *zone, struct zone_t *old_zone);
 
 /*!
  * \brief Replans DDNS processing event if DDNS queue is not empty.
diff --git a/src/knot/zone/events/replan.c b/src/knot/zone/events/replan.c
index 1aecf49d907bb6ff7f95a2fa618ad23595f4cbd2..6039f9b24a9546eb35b19df90f20ae159a7045f6 100644
--- a/src/knot/zone/events/replan.c
+++ b/src/knot/zone/events/replan.c
@@ -114,11 +114,11 @@ static void replan_dnssec(zone_t *zone)
 }
 
 /*!< Replans DDNS event. */
-void replan_update(zone_t *zone, const zone_t *old_zone)
+void replan_update(zone_t *zone, zone_t *old_zone)
 {
 	const bool have_updates = old_zone->ddns_queue_size > 0;
 	if (have_updates) {
-		duplicate_ddns_q(zone, (zone_t *)old_zone);
+		duplicate_ddns_q(zone, old_zone);
 	}
 
 	if (have_updates) {
@@ -126,12 +126,12 @@ void replan_update(zone_t *zone, const zone_t *old_zone)
 	}
 }
 
-void replan_events(zone_t *zone, const zone_t *old_zone)
+void replan_events(zone_t *zone, zone_t *old_zone)
 {
 	replan_soa_events(zone, old_zone);
 	replan_xfer(zone, old_zone);
 	replan_flush(zone, old_zone);
 	replan_event(zone, old_zone, ZONE_EVENT_NOTIFY);
-	replan_update(zone, (zone_t *)old_zone);
+	replan_update(zone, old_zone);
 	replan_dnssec(zone);
 }
diff --git a/src/knot/zone/events/replan.h b/src/knot/zone/events/replan.h
index b7174cedbd428cd937cb55529279ef9975c23097..25b3e0bc4303d8ccdcd4b4cb949ba90dfec51bbe 100644
--- a/src/knot/zone/events/replan.h
+++ b/src/knot/zone/events/replan.h
@@ -20,7 +20,7 @@
 #include "knot/zone/zone.h"
 
 /*! \brief Replans zone's events using old zone. */
-void replan_events(zone_t *zone, const zone_t *old_zone);
+void replan_events(zone_t *zone, zone_t *old_zone);
 
 /*! \brief Replans zone's DDNS events using old zone's DDNS queue. */
-void replan_update(zone_t *zone, const zone_t *old_zone);
+void replan_update(zone_t *zone, zone_t *old_zone);
diff --git a/src/zscanner/scanner.c.g2 b/src/zscanner/scanner.c.g2
index 342a717bd2292853042121dfb39d569e898df65b..8072cdf55207ce23514097c5eaa9e76247966f61 100644
--- a/src/zscanner/scanner.c.g2
+++ b/src/zscanner/scanner.c.g2
@@ -73128,8 +73128,8 @@ int zs_scanner_parse_file(zs_scanner_t *s,
 	}
 
 	// Get absolute path of the zone file.
-	if (realpath(file_name, (char*)(s->buffer)) != NULL) {
-		char *full_name = strdup((char*)(s->buffer));
+	char *full_name = realpath(file_name, NULL);
+	if (full_name != NULL) {
 		free(s->path);
 		s->path = strdup(dirname(full_name));
 		free(full_name);
diff --git a/src/zscanner/scanner.c.t0 b/src/zscanner/scanner.c.t0
index e793fb8330e85af8212c52e85423ec437f6a7158..684d05d524068acb402da7050b49d95e3d92c2cc 100644
--- a/src/zscanner/scanner.c.t0
+++ b/src/zscanner/scanner.c.t0
@@ -7449,8 +7449,8 @@ int zs_scanner_parse_file(zs_scanner_t *s,
 	}
 
 	// Get absolute path of the zone file.
-	if (realpath(file_name, (char*)(s->buffer)) != NULL) {
-		char *full_name = strdup((char*)(s->buffer));
+	char *full_name = realpath(file_name, NULL);
+	if (full_name != NULL) {
 		free(s->path);
 		s->path = strdup(dirname(full_name));
 		free(full_name);
diff --git a/src/zscanner/scanner.rl b/src/zscanner/scanner.rl
index e53e041c0c5e3ae520e21972d3269e72a1c51a93..506854f52e18715afc735cb990e3477f974ca5ff 100644
--- a/src/zscanner/scanner.rl
+++ b/src/zscanner/scanner.rl
@@ -296,8 +296,8 @@ int zs_scanner_parse_file(zs_scanner_t *s,
 	}
 
 	// Get absolute path of the zone file.
-	if (realpath(file_name, (char*)(s->buffer)) != NULL) {
-		char *full_name = strdup((char*)(s->buffer));
+	char *full_name = realpath(file_name, NULL);
+	if (full_name != NULL) {
 		free(s->path);
 		s->path = strdup(dirname(full_name));
 		free(full_name);
diff --git a/tests/.gitignore b/tests/.gitignore
index 1dc26d1ba89200dd5922b1c110c54396418851f3..3ef75e8bd5cac0492e09ea94c9ad3d6169fe224a 100644
--- a/tests/.gitignore
+++ b/tests/.gitignore
@@ -34,7 +34,6 @@ rrl
 rrset
 rrset_wire
 server
-slab
 wire
 worker_pool
 worker_queue
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 80beba65e8a70804d78b8931fb641b00256d83c4..2db0910e0bf96f4f34d663e82731afaa10515469 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -9,45 +9,44 @@ LDADD = \
 	$(top_builddir)/src/libknots.la
 
 check_PROGRAMS = \
-	journal				\
-	slab				\
-	hattrie				\
-	hhash				\
-	dthreads			\
 	acl				\
-	fdset				\
-	base64				\
 	base32hex			\
-	descriptor			\
-	server				\
+	base64				\
+	changeset			\
 	conf				\
-	rrl				\
-	wire				\
+	descriptor			\
 	dname				\
-	ztree				\
-	zonedb				\
-	changeset			\
 	dnssec_keys			\
 	dnssec_nsec3			\
 	dnssec_sign			\
 	dnssec_zone_nsec		\
-	rdata				\
-	rdataset			\
-	rrset				\
-	rrset_wire			\
-	node				\
+	dthreads			\
 	edns				\
+	fdset				\
+	hattrie				\
+	hhash				\
+	journal				\
+	namedb				\
+	node				\
 	pkt				\
-	process_query			\
 	process_answer			\
-	requestor			\
+	process_query			\
 	query_module			\
+	rdata				\
+	rdataset			\
+	requestor			\
+	rrl				\
+	rrset				\
+	rrset_wire			\
+	server				\
+	wire				\
 	worker_pool			\
 	worker_queue			\
 	zone_events			\
-	zone_update			\
 	zone_timers			\
-	namedb
+	zone_update			\
+	zonedb				\
+	ztree
 
 check-compile-only: $(check_PROGRAMS)