diff --git a/src/common/fdset.c b/src/common/fdset.c
index 00e57048a39fc9443a3f6904134588e8f78ccd27..5f93d278ff31992b01c8546592c7a8bf2d23092b 100644
--- a/src/common/fdset.c
+++ b/src/common/fdset.c
@@ -44,7 +44,7 @@ static int fdset_resize(fdset_t *set, unsigned size)
 	void *tmp = NULL;
 	MEM_RESIZE(tmp, set->ctx, size * sizeof(void*));
 	MEM_RESIZE(tmp, set->pfd, size * sizeof(struct pollfd));
-	MEM_RESIZE(tmp, set->tmout, size * sizeof(timev_t));
+	MEM_RESIZE(tmp, set->timeout, size * sizeof(timev_t));
 	set->size = size;
 	return KNOT_EOK;
 }
@@ -67,7 +67,7 @@ int fdset_clear(fdset_t* set)
 
 	free(set->ctx);
 	free(set->pfd);
-	free(set->tmout);
+	free(set->timeout);
 	memset(set, 0, sizeof(fdset_t));
 	return KNOT_EOK;
 }
@@ -88,7 +88,7 @@ int fdset_add(fdset_t *set, int fd, unsigned events, void *ctx)
 	set->pfd[i].events = events;
 	set->pfd[i].revents = 0;
 	set->ctx[i] = ctx;
-	set->tmout[i] = 0;
+	set->timeout[i] = 0;
 
 	/* Return index to this descriptor. */
 	return i;
@@ -108,14 +108,14 @@ int fdset_remove(fdset_t *set, unsigned i)
 	unsigned last = set->n; /* Already decremented */
 	if (i < last) {
 		set->pfd[i] = set->pfd[last];
-		set->tmout[i] = set->tmout[last];
+		set->timeout[i] = set->timeout[last];
 		set->ctx[i] = set->ctx[last];
 	}
 
 	return KNOT_EOK;
 }
 
-int fdset_set_tmout(fdset_t* set, int i, int interval)
+int fdset_set_watchdog(fdset_t* set, int i, int interval)
 {
 	if (set == NULL || i >= set->n) {
 		return KNOT_EINVAL;
@@ -123,7 +123,7 @@ int fdset_set_tmout(fdset_t* set, int i, int interval)
 
 	/* Lift watchdog if interval is negative. */
 	if (interval < 0) {
-		set->tmout[i] = 0;
+		set->timeout[i] = 0;
 		return KNOT_EOK;
 	}
 
@@ -132,11 +132,11 @@ int fdset_set_tmout(fdset_t* set, int i, int interval)
 	if (time_now(&now) < 0)
 		return KNOT_ERROR;
 
-	set->tmout[i] = now.tv_sec + interval; /* Only seconds precision. */
+	set->timeout[i] = now.tv_sec + interval; /* Only seconds precision. */
 	return KNOT_EOK;
 }
 
-int fdset_sweep(fdset_t* set, fdset_sweep_f cb, void *data)
+int fdset_sweep(fdset_t* set, fdset_sweep_cb_t cb, void *data)
 {
 	if (set == NULL || cb == NULL) {
 		return KNOT_EINVAL;
@@ -152,7 +152,7 @@ int fdset_sweep(fdset_t* set, fdset_sweep_f cb, void *data)
 	while (i < set->n) {
 
 		/* Check sweep state, remove if requested. */
-		if (set->tmout[i] > 0 && set->tmout[i] <= now.tv_sec) {
+		if (set->timeout[i] > 0 && set->timeout[i] <= now.tv_sec) {
 			if (cb(set, i, data) == FDSET_SWEEP) {
 				if (fdset_remove(set, i) == KNOT_EOK)
 					continue; /* Stay on the index. */
diff --git a/src/common/fdset.h b/src/common/fdset.h
index d74765ad2ef7b5123ddd43b09b5258c22d5f8de2..f0ef84928ea2742831e8b4b075093b1275ab1b56 100644
--- a/src/common/fdset.h
+++ b/src/common/fdset.h
@@ -41,7 +41,7 @@ typedef struct fdset {
 	unsigned size;       /*!< Array size (allocated). */
 	void* *ctx;          /*!< Context for each fd. */
 	struct pollfd *pfd;  /*!< poll state for each fd */
-	time_t *tmout;       /*!< Timeout for each fd (seconds precision). */
+	time_t *timeout;       /*!< Timeout for each fd (seconds precision). */
 } fdset_t;
 
 /*! \brief Mark-and-sweep state. */
@@ -51,7 +51,7 @@ enum fdset_sweep_state {
 };
 
 /*! \brief Sweep callback (set, index, data) */
-typedef enum fdset_sweep_state (*fdset_sweep_f)(fdset_t*, int, void*);
+typedef enum fdset_sweep_state (*fdset_sweep_cb_t)(fdset_t*, int, void*);
 
 /*!
  * \brief Initialize fdset to given size.
@@ -93,18 +93,20 @@ int fdset_remove(fdset_t *set, unsigned i);
 /*!
  * \brief Set file descriptor watchdog interval.
  *
- * Descriptors without activity in given interval
- * can be disposed with fdset_sweep().
+ * Set time (interval from now) after which the associated file descriptor
+ * should be sweeped (see fdset_sweep). Good example is setting a grace period
+ * of N seconds between socket activity. If socket is not active within
+ * <now, now + interval>, it is sweeped and potentially closed.
  *
  * \param set Target set.
  * \param i Index for the file descriptor.
  * \param interval Allowed interval without activity (seconds).
- *                 <0 removes watchdog interval.
+ *                 -1 disables watchdog timer
  *
  * \retval 0 if successful.
  * \retval -1 on errors.
  */
-int fdset_set_tmout(fdset_t* set, int i, int interval);
+int fdset_set_watchdog(fdset_t* set, int i, int interval);
 
 /*!
  * \brief Sweep file descriptors with exceeding inactivity period.
@@ -113,12 +115,10 @@ int fdset_set_tmout(fdset_t* set, int i, int interval);
  * \param cb Callback for sweeped descriptors.
  * \param data Pointer to extra data.
  *
- * \note See
- *
  * \retval number of sweeped descriptors.
  * \retval -1 on errors.
  */
-int fdset_sweep(fdset_t* set, fdset_sweep_f cb, void *data);
+int fdset_sweep(fdset_t* set, fdset_sweep_cb_t cb, void *data);
 
 /*!
  * \brief pselect(2) compatibility wrapper.
diff --git a/src/knot/server/tcp-handler.c b/src/knot/server/tcp-handler.c
index 58cc1bfebe9371489da055bf689da055dece9c34..92eb40209cae1b35f0f8a72d2309fe0f7e69e07c 100644
--- a/src/knot/server/tcp-handler.c
+++ b/src/knot/server/tcp-handler.c
@@ -327,27 +327,25 @@ tcp_worker_t* tcp_worker_create()
 {
 	tcp_worker_t *w = malloc(sizeof(tcp_worker_t));
 	if (w == NULL)
-		goto cleanup;
+		return NULL;
 
 	/* Create signal pipes. */
 	memset(w, 0, sizeof(tcp_worker_t));
-	if (pipe(w->pipe) < 0)
-		goto cleanup;
+	if (pipe(w->pipe) < 0) {
+		free(w);
+		return NULL;
+	}
 
 	/* Create fdset. */
 	if (fdset_init(&w->set, FDSET_INIT_SIZE) != KNOT_EOK) {
 		close(w->pipe[0]);
 		close(w->pipe[1]);
-		goto cleanup;
+		free(w);
+		return NULL;
 	}
 
 	fdset_add(&w->set, w->pipe[0], POLLIN, NULL);
 	return w;
-
-	/* Cleanup */
-cleanup:
-	free(w);
-	return NULL;
 }
 
 void tcp_worker_free(tcp_worker_t* w)
@@ -606,14 +604,14 @@ int tcp_loop_worker(dthread_t *thread)
 				int client, next_id;
 				if (read(fd, &client, sizeof(int)) == sizeof(int)) {
 					next_id = fdset_add(set, client, POLLIN, NULL);
-					fdset_set_tmout(set, next_id, max_hs);
+					fdset_set_watchdog(set, next_id, max_hs);
 				}
 			} else {
 				/* Process query over TCP. */
 				int ret = tcp_handle(w, fd, qbuf, SOCKET_MTU_SZ);
 				if (ret == KNOT_EOK) {
 					/* Update socket activity timer. */
-					fdset_set_tmout(set, i, max_idle);
+					fdset_set_watchdog(set, i, max_idle);
 				}
 				if (ret == KNOT_ECONNREFUSED) {
 					fdset_remove(set, i);
diff --git a/src/knot/server/xfr-handler.c b/src/knot/server/xfr-handler.c
index 26f2f2c4f09aff964c4aa792df2f086798da167d..39d012e9ad2a9488765513ca1c83189807dc9cbd 100644
--- a/src/knot/server/xfr-handler.c
+++ b/src/knot/server/xfr-handler.c
@@ -75,10 +75,12 @@ static bool xfr_pending_incr(xfrhandler_t *xfr)
 {
 	bool ret = false;
 	pthread_mutex_lock(&xfr->pending_mx);
+	rcu_read_lock();
 	if (xfr->pending < conf()->xfers) {
 		++xfr->pending;
 		ret = true;
 	}
+	rcu_read_unlock();
 	pthread_mutex_unlock(&xfr->pending_mx);
 
 	return ret;
@@ -347,7 +349,7 @@ static int xfr_task_expire(fdset_t *set, int i, knot_ns_xfr_t *rq)
 	case XFR_TYPE_NOTIFY:
 		if ((long)--rq->data > 0) { /* Retries */
 			notify_create_request(contents, rq->wire, &rq->wire_size);
-			fdset_set_tmout(set, i, NOTIFY_TIMEOUT);
+			fdset_set_watchdog(set, i, NOTIFY_TIMEOUT);
 			rq->send(rq->session, &rq->addr, rq->wire, rq->wire_size);
 			log_zone_info("%s Query issued (serial %u).\n",
 			              rq->msg, knot_zone_serial(contents));
@@ -443,10 +445,10 @@ static int xfr_task_start(knot_ns_xfr_t *rq)
 
 static int xfr_task_is_transfer(knot_ns_xfr_t *rq)
 {
-	return rq->type <= XFR_TYPE_IIN;
+	return rq->type == XFR_TYPE_AIN || rq->type == XFR_TYPE_IIN;
 }
 
-static void xfr_async_setbuf(knot_ns_xfr_t* rq, uint8_t *buf, size_t buflen)
+static void xfr_async_setbuf(knot_ns_xfr_t *rq, uint8_t *buf, size_t buflen)
 {
 	/* Update request. */
 	rq->wire = buf;
@@ -460,7 +462,7 @@ static void xfr_async_setbuf(knot_ns_xfr_t* rq, uint8_t *buf, size_t buflen)
 	}
 }
 
-static int xfr_async_start(fdset_t *set, knot_ns_xfr_t* rq)
+static int xfr_async_start(fdset_t *set, knot_ns_xfr_t *rq)
 {
 	/* Update XFR message prefix. */
 	int ret = KNOT_EOK;
@@ -479,7 +481,7 @@ static int xfr_async_start(fdset_t *set, knot_ns_xfr_t* rq)
 		if (next_id >= 0) {
 			/* Set default connection timeout. */
 			rcu_read_lock();
-			fdset_set_tmout(set, next_id, conf()->max_conn_reply);
+			fdset_set_watchdog(set, next_id, conf()->max_conn_reply);
 			rcu_read_unlock();
 		} else {
 			/* Or refuse if failed. */
@@ -490,7 +492,7 @@ static int xfr_async_start(fdset_t *set, knot_ns_xfr_t* rq)
 	return ret;
 }
 
-static int xfr_async_state(knot_ns_xfr_t* rq)
+static int xfr_async_state(knot_ns_xfr_t *rq)
 {
 	/* Check socket status. */
 	int err = EINVAL;
@@ -540,12 +542,17 @@ static int xfr_async_finish(fdset_t *set, unsigned id)
 			pthread_mutex_unlock(&zd->lock);
 		}
 		break;
-	case XFR_TYPE_NOTIFY: /* Send on first timeout <0,5>s. */
-		fdset_set_tmout(set, id, (int)(tls_rand() * 5));
+	case XFR_TYPE_NOTIFY:
+		/* This is a bit of a hack to adapt NOTIFY lifetime tracking.
+		 * When NOTIFY event enters handler, it shouldn't be sent immediately.
+		 * To accomodate for this, <0, 5>s random delay is set on
+		 * event startup, so the first query fires when this timer
+		 * expires. */
+		fdset_set_watchdog(set, id, (int)(tls_rand() * 5));
 		return KNOT_EOK;
 	case XFR_TYPE_SOA:
 	case XFR_TYPE_FORWARD:
-		fdset_set_tmout(set, id, conf()->max_conn_reply);
+		fdset_set_watchdog(set, id, conf()->max_conn_reply);
 		break;
 	default:
 		break;
@@ -790,6 +797,10 @@ static int xfr_task_xfer(xfrworker_t *w, knot_ns_xfr_t *rq)
 	/* Only for successful xfers. */
 	if (ret > 0) {
 		ret = xfr_task_finalize(w, rq);
+
+		/* EBUSY on incremental transfer has a special meaning and
+		 * is caused by a journal not able to free up space for incoming
+		 * transfer, thus forcing to start a new full zone transfer. */
 		if (ret == KNOT_EBUSY && rq->type == XFR_TYPE_IIN) {
 			return xfr_start_axfr(w, rq, diff_nospace_msg);
 		} else {
@@ -1016,11 +1027,9 @@ int xfr_worker(dthread_t *thread)
 	time_now(&next_sweep);
 	next_sweep.tv_sec += XFR_SWEEP_INTERVAL;
 
-	/* Capacity limits. */
-	rcu_read_lock();
+	/* Approximate thread capacity limits. */
 	unsigned threads = w->master->unit->size;
 	unsigned thread_capacity = XFR_MAX_TASKS / threads;
-	rcu_read_unlock();
 
 	/* Set of connections. */
 	fdset_t set;
diff --git a/src/knot/server/zones.c b/src/knot/server/zones.c
index 17b0c7ceb22d275ff4bd99866fb91a8ea7961828..eba8fc516e5fb1f46d48a3e0e73ffbd6803904a0 100644
--- a/src/knot/server/zones.c
+++ b/src/knot/server/zones.c
@@ -3176,7 +3176,7 @@ int zones_schedule_notify(knot_zone_t *zone)
 	return KNOT_EOK;
 }
 
-int zones_schedule_refresh(knot_zone_t *zone, int time)
+int zones_schedule_refresh(knot_zone_t *zone, int64_t time)
 {
 	if (!zone || !zone->data) {
 		return KNOT_EINVAL;
diff --git a/src/knot/server/zones.h b/src/knot/server/zones.h
index 64086c423b01181ca250e1589bcb93cd0ffb0d8b..2260e01a0819ae13f9db183cd41be66ecb0dc889 100644
--- a/src/knot/server/zones.h
+++ b/src/knot/server/zones.h
@@ -312,7 +312,7 @@ int zones_store_and_apply_chgsets(knot_changesets_t *chs,
  * \retval KNOT_EINVAL
  * \retval KNOT_ERROR
  */
-int zones_schedule_refresh(knot_zone_t *zone, int time);
+int zones_schedule_refresh(knot_zone_t *zone, int64_t time);
 
 /*!
  * \brief Schedule NOTIFY after zone update.