Skip to content
Snippets Groups Projects
Forked from Knot projects / Knot DNS
10765 commits behind the upstream repository.
events.c 19.89 KiB
/*  Copyright (C) 2014 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>

    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
*/

#include <assert.h>
#include <time.h>

#include "common/evsched.h"
#include "common/mempool.h"
#include "knot/server/server.h"
#include "knot/updates/changesets.h"
#include "knot/dnssec/zone-events.h"
#include "knot/worker/pool.h"
#include "knot/worker/task.h"
#include "knot/zone/events.h"
#include "knot/zone/zone.h"
#include "knot/zone/zone-load.h"
#include "knot/zone/zonefile.h"
#include "libknot/rdata/soa.h"
#include "libknot/dnssec/random.h"
#include "knot/nameserver/internet.h"
#include "knot/nameserver/update.h"
#include "knot/nameserver/notify.h"
#include "knot/nameserver/requestor.h"
#include "knot/nameserver/tsig_ctx.h"
#include "knot/nameserver/process_answer.h"

/* ------------------------- bootstrap timer logic -------------------------- */

#define BOOTSTRAP_RETRY (30) /*!< Interval between AXFR bootstrap retries. */
#define BOOTSTRAP_MAXTIME (24*60*60) /*!< Maximum AXFR retry cap of 24 hours. */

/*! \brief Progressive bootstrap retry timer. */
static void bootstrap_next(uint32_t *timer)
{
	*timer *= 2;
	*timer += knot_random_uint32_t() % BOOTSTRAP_RETRY;
	if (*timer > BOOTSTRAP_MAXTIME) {
		*timer = BOOTSTRAP_MAXTIME;
	}
}

/* ------------------------- zone query requesting -------------------------- */

/*! \brief Zone event logging. */
#define ZONE_QUERY_LOG(severity, zone, remote, what, msg...) \
	NS_PROC_LOG(severity, LOG_ZONE, &remote->addr, zone->conf->name, \
	            what " of '%s' with '%s': ", msg)

/*! \brief Create zone query packet. */
static knot_pkt_t *zone_query(const zone_t *zone, uint16_t qtype, mm_ctx_t *mm)
{
	knot_pkt_t *pkt = knot_pkt_new(NULL, KNOT_WIRE_MAX_PKTSIZE, mm);
	if (pkt == NULL) {
		return NULL;
	}

	knot_wire_set_aa(pkt->wire);
	knot_pkt_put_question(pkt, zone->name, KNOT_CLASS_IN, qtype);

	return pkt;
}

/*!
 * \brief Create a zone event query, send it, wait for the response and process it.
 *
 * \note Everything in this function is executed synchronously, returns when
 *       the query processing is either complete or an error occurs.
 */
static int zone_query_execute(zone_t *zone, uint16_t pkt_type, const conf_iface_t *remote)
{
	uint16_t query_type = KNOT_RRTYPE_SOA;
	uint16_t opcode = KNOT_OPCODE_QUERY;
	switch(pkt_type) {
	case KNOT_QUERY_AXFR: query_type = KNOT_RRTYPE_AXFR; break;
	case KNOT_QUERY_IXFR: query_type = KNOT_RRTYPE_IXFR; break;
	case KNOT_QUERY_NOTIFY: opcode = KNOT_OPCODE_NOTIFY; break;
	}

	/* Create a memory pool for this task. */
	int ret = KNOT_EOK;
	mm_ctx_t mm;
	mm_ctx_mempool(&mm, 4096);

	/* Create a query message. */
	knot_pkt_t *query = zone_query(zone, query_type, &mm);
	if (query == NULL) {
		return KNOT_ENOMEM;
	}
	knot_wire_set_opcode(query->wire, opcode);

	/* Put current SOA in authority (optional). */
	zone_contents_t *contents = zone->contents;
	if (pkt_type == KNOT_QUERY_IXFR || pkt_type == KNOT_QUERY_NOTIFY) {
		knot_pkt_begin(query, KNOT_AUTHORITY);
		knot_rrset_t soa_rr = node_rrset(contents->apex, KNOT_RRTYPE_SOA);
		knot_pkt_put(query, COMPR_HINT_QNAME, &soa_rr, 0);
	}

	/* Answer processing parameters. */
	struct process_answer_param param = { 0 };
	param.zone = zone;
	param.query = query;
	param.remote = &remote->addr;
	tsig_init(&param.tsig_ctx, remote->key);

	ret = tsig_sign_packet(&param.tsig_ctx, query);
	if (ret != KNOT_EOK) {
		mp_delete(mm.ctx);
		return ret;
	}

	/* Create requestor instance. */
	struct requestor re;
	requestor_init(&re, NS_PROC_ANSWER, &mm);

	/* Create a request. */
	struct request *req = requestor_make(&re, remote, query);
	if (req == NULL) {
		mp_delete(mm.ctx);
		return KNOT_ENOMEM;
	}

	requestor_enqueue(&re, req, &param);

	/* Send the queries and process responses. */
	struct timeval tv = { conf()->max_conn_hs, 0 };
	ret = requestor_exec(&re, &tv);

	/* Cleanup. */
	requestor_clear(&re);
	mp_delete(mm.ctx);

	return ret;
}

/*!
 * \todo Separate signing from zone loading and drop this function.
 *
 * DNSSEC signing is planned from two places - after zone loading and after
 * successful resign. This function just logs the message and reschedules the
 * DNSSEC timer.
 *
 * I would rather see the invocation of the signing from event_dnssec()
 * function. This would require to split refresh event to zone load and zone
 * publishing.
 */
static void schedule_dnssec(zone_t *zone)
{
	// log a message

	char time_str[64] = { 0 };
	struct tm time_gm = { 0 };
	time_t unixtime = zone->dnssec.refresh_at;
	gmtime_r(&unixtime, &time_gm);
	strftime(time_str, sizeof(time_str), KNOT_LOG_TIME_FORMAT, &time_gm);
	log_zone_info("DNSSEC: Zone %s - Next event on %s.\n",
	              zone->conf->name, time_str);

	// schedule

	zone_events_schedule_at(zone, ZONE_EVENT_DNSSEC, unixtime);
}

/* -- zone events handling callbacks --------------------------------------- */

typedef int (*zone_event_cb)(zone_t *zone);

static int event_reload(zone_t *zone)
{
	assert(zone);
	fprintf(stderr, "RELOAD of '%s'\n", zone->conf->name);

	/* Take zone file mtime and load it. */
	time_t mtime = zonefile_mtime(zone->conf->file);
	conf_zone_t *zone_config = zone->conf;
	zone_contents_t *contents = zone_load_contents(zone_config);
	if (!contents) {
		return KNOT_ERROR; // TODO: specific error code
	}

	/* Apply changes in journal. */
	int result = zone_load_journal(contents, zone_config);
	if (result != KNOT_EOK) {
		goto fail;
	}

	/* Post load actions - calculate delta, sign with DNSSEC... */
	result = zone_load_post(contents, zone);
	if (result != KNOT_EOK) {
		if (result == KNOT_ESPACE) {
			log_zone_error("Zone '%s' journal size is too small to fit the changes.\n",
			               zone_config->name);
		} else {
			log_zone_error("Zone '%s' failed to store changes in the journal - %s\n",
			               zone_config->name, knot_strerror(result));
		}
		goto fail;
	}

	/* Check zone contents consistency. */
	result = zone_load_check(contents, zone_config);
	if (result != KNOT_EOK) {
		goto fail;
	}

	/* Everything went alright, switch the contents. */
	zone->zonefile_mtime = mtime;
	zone_contents_t *old = zone_switch_contents(zone, contents);
	if (old != NULL) {
		synchronize_rcu();
		zone_contents_deep_free(&old);
	}

	/* Schedule notify and refresh after load. */
	if (zone_master(zone)) {
		zone_events_schedule(zone, ZONE_EVENT_REFRESH, ZONE_EVENT_NOW);
	}
	if (!zone_contents_is_empty(contents)) {
		zone->xfr_in.bootstrap_retry = ZONE_EVENT_NOW;
		zone_events_schedule(zone, ZONE_EVENT_NOTIFY,  zone->xfr_in.bootstrap_retry);
	}
	if (zone->conf->dnssec_enable) {
		schedule_dnssec(zone);
	}

	/* Periodic execution. */
	zone_events_schedule(zone, ZONE_EVENT_FLUSH, zone_config->dbsync_timeout);

	log_zone_info("Zone '%s' loaded.\n", zone_config->name);
	return KNOT_EOK;

fail:
	zone_contents_deep_free(&contents);
	return result;
}

static int event_refresh(zone_t *zone)
{
	assert(zone);
	fprintf(stderr, "REFRESH of '%s'\n", zone->conf->name);

	zone_contents_t *contents = zone->contents;
	if (zone_contents_is_empty(contents)) {
		/* No contents, schedule retransfer now. */
		zone_events_schedule(zone, ZONE_EVENT_XFER, ZONE_EVENT_NOW);
		return KNOT_EOK;
	}

	const conf_iface_t *master = zone_master(zone);
	assert(master);

	int ret = zone_query_execute(zone, KNOT_QUERY_NORMAL, master);

	knot_rdataset_t *soa = node_rdataset(contents->apex, KNOT_RRTYPE_SOA);
	if (ret != KNOT_EOK) {
		/* Log connection errors. */
		ZONE_QUERY_LOG(LOG_ERR, zone, master, "SOA query", "%s", knot_strerror(ret));
		/* Rotate masters if current failed. */
		zone_master_rotate(zone);
		/* Schedule next retry. */
		zone_events_schedule(zone, ZONE_EVENT_REFRESH, knot_soa_retry(soa));
	} else {
		/* SOA query answered, reschedule refresh and expire timers. */
		zone_events_schedule(zone, ZONE_EVENT_REFRESH, knot_soa_refresh(soa));
		zone_events_schedule(zone, ZONE_EVENT_EXPIRE,  knot_soa_expire(soa));
	}
	return ret;
}

static int event_xfer(zone_t *zone)
{
	assert(zone);
	fprintf(stderr, "XFER of '%s'\n", zone->conf->name);

	const conf_iface_t *master = zone_master(zone);
	assert(master);

	uint16_t pkt_type = KNOT_QUERY_IXFR;
	if (zone_contents_is_empty(zone->contents) || zone->flags & ZONE_FORCE_AXFR) {
		pkt_type = KNOT_QUERY_AXFR;
	}

	int ret = zone_query_execute(zone, pkt_type, master);

	/* IXFR failed, revert to AXFR. */
	if (pkt_type == KNOT_QUERY_IXFR && ret != KNOT_EOK) {
		ZONE_QUERY_LOG(LOG_WARNING, zone, master, "IXFR", "Fallback to AXFR");
		zone->flags |= ZONE_FORCE_AXFR;
		ret = event_xfer(zone);
		zone->flags &= ~ZONE_FORCE_AXFR;
		return ret;
	}

	if (zone_contents_is_empty(zone->contents)) {
		/* Log connection errors. */
		ZONE_QUERY_LOG(LOG_ERR, zone, master, "AXFR", "%s", knot_strerror(ret));
		/* Zone contents is still empty, increment bootstrap retry timer
		 * and try again. */
		bootstrap_next(&zone->xfr_in.bootstrap_retry);
		zone_events_schedule(zone, ZONE_EVENT_XFER, zone->xfr_in.bootstrap_retry);
		return ret;
	} else {
		/* New zone transferred, reschedule zone expiration and refresh
		 * timers and send notifications to slaves. */
		knot_rdataset_t *soa = node_rdataset(zone->contents->apex, KNOT_RRTYPE_SOA);
		zone_events_schedule(zone, ZONE_EVENT_EXPIRE,  knot_soa_expire(soa));
		zone_events_schedule(zone, ZONE_EVENT_REFRESH, knot_soa_refresh(soa));
		zone_events_schedule(zone, ZONE_EVENT_NOTIFY,  ZONE_EVENT_NOW);
		zone->xfr_in.bootstrap_retry = ZONE_EVENT_NOW;
	}

	return ret;
}

static int event_update(zone_t *zone)
{
	assert(zone);

	knot_pkt_t *resp = knot_pkt_new(NULL, KNOT_WIRE_MAX_PKTSIZE, NULL);
	if (resp == NULL) {
		return KNOT_ENOMEM;
	}

	struct request_data *update = zone_update_dequeue(zone);

	/* Initialize query response. */
	assert(update);
	assert(update->query);
	knot_pkt_init_response(resp, update->query);

	/* Create minimal query data context. */
	struct process_query_param param = {0};
	param.remote = &update->remote;
	struct query_data qdata = {0};
	qdata.param = &param;
	qdata.query = update->query;
	qdata.zone  = zone;

	/* Process the update query. */
	int ret = update_process_query(resp, &qdata);

	/* Send response. */
#warning TODO: proper API for this
	sendto(update->fd, resp->wire, resp->size, 0,
	       (struct sockaddr *)param.remote, sockaddr_len(param.remote));

	/* Cleanup. */
	knot_pkt_free(&resp);
	knot_pkt_free(&update->query);
	free(update);

	return ret;
}

static int event_expire(zone_t *zone)
{
	assert(zone);
	fprintf(stderr, "EXPIRE of '%s'\n", zone->conf->name);

	zone_contents_t *expired = zone_switch_contents(zone, NULL);
	synchronize_rcu();
	zone_contents_deep_free(&expired);

	log_zone_info("Zone '%s' expired.\n", zone->conf->name);

	return KNOT_EOK;
}

static int event_flush(zone_t *zone)
{
	assert(zone);
	fprintf(stderr, "FLUSH of '%s'\n", zone->conf->name);

	/* Reschedule. */
	int next_timeout = zone->conf->dbsync_timeout;
	if (next_timeout > 0) {
		zone_events_schedule(zone, ZONE_EVENT_FLUSH, next_timeout);
	}

	if (zone_contents_is_empty(zone->contents)) {
		return KNOT_EOK;
	}

	return zone_flush_journal(zone);
}

static int event_notify(zone_t *zone)
{
	assert(zone);
	fprintf(stderr, "NOTIFY of '%s'\n", zone->conf->name);

	/* Walk through configured remotes and send messages. */
	conf_remote_t *remote = 0;
	WALK_LIST(remote, zone->conf->acl.notify_out) {
		conf_iface_t *iface = remote->remote;

		int ret = zone_query_execute(zone, KNOT_QUERY_NOTIFY, iface);
		if (ret != KNOT_EOK) {
			ZONE_QUERY_LOG(LOG_ERR, zone, iface, "NOTIFY", "%s", knot_strerror(ret));
		}
	}

	return KNOT_EOK;
}

static int event_dnssec(zone_t *zone)
{
	assert(zone);
	fprintf(stderr, "DNSSEC of '%s'\n", zone->conf->name);

	changesets_t *chs = changesets_create(1);
	if (chs == NULL) {
		return KNOT_ENOMEM;
	}

	changeset_t *ch = changesets_get_last(chs);
	assert(ch);

	int ret = KNOT_ERROR;
	char *zname = knot_dname_to_str(zone->name);
	char *msgpref = sprintf_alloc("DNSSEC: Zone %s -", zname);
	free(zname);
	if (msgpref == NULL) {
		ret = KNOT_ENOMEM;
		goto done;
	}

	uint32_t refresh_at = 0;
	if (zone->dnssec.next_force) {
		log_zone_info("%s Complete resign started (dropping all "
			      "previous signatures)...\n", msgpref);

		zone->dnssec.next_force = false;
		ret = knot_dnssec_zone_sign_force(zone->contents, zone->conf,
		                                  ch, &refresh_at);
	} else {
		log_zone_info("%s Signing zone...\n", msgpref);
		ret = knot_dnssec_zone_sign(zone->contents, zone->conf,
		                            ch, KNOT_SOA_SERIAL_UPDATE,
		                            &refresh_at);
	}
	if (ret != KNOT_EOK) {
		goto done;
	}

	if (!changesets_empty(chs)) {
		ret = zone_change_apply_and_store(&chs, zone, "DNSSEC", NULL);
		if (ret != KNOT_EOK) {
			log_zone_error("%s Could not sign zone (%s).\n",
				       msgpref, knot_strerror(ret));
			goto done;
		}
	}

	// Schedule dependent events.

	zone->dnssec.refresh_at = refresh_at;
	schedule_dnssec(zone);
	zone_events_schedule(zone, ZONE_EVENT_NOTIFY, ZONE_EVENT_NOW);
	if (zone->conf->dbsync_timeout == 0) {
		zone_events_schedule(zone, ZONE_EVENT_FLUSH, ZONE_EVENT_NOW);
	}

done:
	changesets_free(&chs, NULL);
	free(msgpref);
	return ret;
}

#undef ZONE_QUERY_LOG

/* -- internal API --------------------------------------------------------- */

static bool valid_event(zone_event_type_t type)
{
	return (type >= 0 && type < ZONE_EVENT_COUNT);
}

/*! \brief Return remaining time to planned event (seconds). */
static time_t time_until(time_t planned)
{
	time_t now = time(NULL);
	return now < planned ? (planned - now) : 0;
}

/*!
 * \brief Find next scheduled zone event.
 *
 * \param events  Zone events.
 *
 * \return Zone event type, or ZONE_EVENT_INVALID if no event is scheduled.
 */
static zone_event_type_t get_next_event(zone_events_t *events)
{
	if (!events) {
		return ZONE_EVENT_INVALID;
	}

	zone_event_type_t next_type = ZONE_EVENT_INVALID;
	time_t next = 0;

	for (int i = 0; i < ZONE_EVENT_COUNT; i++) {
		time_t current = events->time[i];
		if (current == 0) {
			continue;
		}

		if (next == 0 || current < next) {
			next = current;
			next_type = i;
		}
	}

	return next_type;
}

/*!
 * \brief Set time of a given event type.
 */
static void event_set_time(zone_events_t *events, zone_event_type_t type, time_t time)
{
	assert(events);
	assert(valid_event(type));

	events->time[type] = time;
}

/*!
 * \brief Get time of a given event type.
 */
static time_t event_get_time(zone_events_t *events, zone_event_type_t type)
{
	assert(events);
	assert(valid_event(type));

	return events->time[type];
}

/*!
 * \brief Cancel scheduled item, schedule first enqueued item.
 *
 * The events mutex must be locked when calling this function.
 */
static void reschedule(zone_events_t *events)
{
	assert(events);
	assert(pthread_mutex_trylock(&events->mx) == EBUSY);

	if (!events->event || events->running) {
		return;
	}

	zone_event_type_t type = get_next_event(events);
	if (!valid_event(type)) {
		evsched_cancel(events->event);
		return;
	}

	time_t diff = time_until(events->time[type]);

	evsched_schedule(events->event, diff * 1000);
}

/* -- callbacks control ---------------------------------------------------- */

typedef struct event_info_t {
	zone_event_type_t type;
	const zone_event_cb callback;
	const char *name;
} event_info_t;

static const event_info_t EVENT_INFO[] = {
        { ZONE_EVENT_RELOAD,  event_reload,  "reload" },
        { ZONE_EVENT_REFRESH, event_refresh, "refresh" },
        { ZONE_EVENT_XFER,    event_xfer,    "transfer" },
        { ZONE_EVENT_UPDATE,  event_update,  "UPDATE" },
        { ZONE_EVENT_EXPIRE,  event_expire,  "expiration" },
        { ZONE_EVENT_FLUSH,   event_flush,   "journal flush" },
        { ZONE_EVENT_NOTIFY,  event_notify,  "notify" },
        { ZONE_EVENT_DNSSEC,  event_dnssec,  "DNSSEC resign" },
        { 0 }
};

static const event_info_t *get_event_info(zone_event_type_t type)
{
	const event_info_t *info;
	for (info = EVENT_INFO; info->callback != NULL; info++) {
		if (info->type == type) {
			return info;
		}
	}

	assert(0);
	return NULL;
}

/*!
 * \brief Zone event wrapper, expected to be called from a worker thread.
 *
 * 1. Takes the next planned event.
 * 2. Resets the event's scheduled time.
 * 3. Perform the event's callback.
 * 4. Schedule next event planned event.
 */
static void event_wrap(task_t *task)
{
	assert(task);
	assert(task->ctx);

	zone_t *zone = task->ctx;
	zone_events_t *events = &zone->events;

	pthread_mutex_lock(&events->mx);
	zone_event_type_t type = get_next_event(events);
	if (!valid_event(type)) {
		events->running = false;
		pthread_mutex_unlock(&events->mx);
		return;
	}
	event_set_time(events, type, 0);
	pthread_mutex_unlock(&events->mx);

	const event_info_t *info = get_event_info(type);
	int result = info->callback(zone);
	if (result != KNOT_EOK) {
		log_zone_error("[%s] %s failed - %s\n", zone->conf->name,
		               info->name, knot_strerror(result));
	}

	pthread_mutex_lock(&events->mx);
	events->running = false;
	reschedule(events);
	pthread_mutex_unlock(&events->mx);
}

/*!
 * \brief Called by scheduler thread if the event occurs.
 */
static int event_dispatch(event_t *event)
{
	assert(event);
	assert(event->data);

	zone_events_t *events = event->data;

	pthread_mutex_lock(&events->mx);
	if (!events->running) {
		events->running = true;
		worker_pool_assign(events->pool, &events->task);
	}
	pthread_mutex_unlock(&events->mx);

	return KNOT_EOK;
}

/* -- public API ----------------------------------------------------------- */

int zone_events_init(zone_t *zone)
{
	if (!zone) {
		return KNOT_EINVAL;
	}

	zone_events_t *events = &zone->events;

	memset(&zone->events, 0, sizeof(zone->events));
	pthread_mutex_init(&events->mx, NULL);
	events->task.ctx = zone;
	events->task.run = event_wrap;

	return KNOT_EOK;
}

int zone_events_setup(zone_t *zone, worker_pool_t *workers, evsched_t *scheduler)
{
	if (!zone || !workers || !scheduler) {
		return KNOT_EINVAL;
	}

	event_t *event;
	event = evsched_event_create(scheduler, event_dispatch, &zone->events);
	if (!event) {
		return KNOT_ENOMEM;
	}

	zone->events.event = event;
	zone->events.pool = workers;

	return KNOT_EOK;
}

void zone_events_deinit(zone_t *zone)
{
	if (!zone) {
		return;
	}

	evsched_cancel(zone->events.event);
	evsched_event_free(zone->events.event);

	assert(zone->events.running == false);
	pthread_mutex_destroy(&zone->events.mx);

	memset(&zone->events, 0, sizeof(zone->events));
}

void zone_events_schedule_at(zone_t *zone, zone_event_type_t type, time_t time)
{
	if (!zone || !valid_event(type)) {
		return;
	}

	zone_events_t *events = &zone->events;

	pthread_mutex_lock(&events->mx);

	time_t current = event_get_time(events, type);
	if (current == 0 || time == 0 || time < current) {
		event_set_time(events, type, time);
		reschedule(events);
	}

	pthread_mutex_unlock(&events->mx);
}

void zone_events_schedule(zone_t *zone, zone_event_type_t type, unsigned dt)
{
	fprintf(stderr, "%s: %s '%s' in '%u' seconds\n",
	        __func__, zone->conf->name, get_event_info(type)->name, dt);
	time_t abstime = time(NULL) + dt;
	return zone_events_schedule_at(zone, type, abstime);
}

void zone_events_cancel(zone_t *zone, zone_event_type_t type)
{
	zone_events_schedule_at(zone, type, 0);
}

void zone_events_cancel_all(zone_t *zone)
{
	if (!zone) {
		return;
	}

	zone_events_t *events = &zone->events;

	pthread_mutex_lock(&events->mx);
	for (int i = 0; i < ZONE_EVENT_COUNT; i++) {
		event_set_time(events, i, 0);
	}
	reschedule(events);
	pthread_mutex_unlock(&events->mx);
}

void zone_events_start(zone_t *zone)
{
	if (!zone) {
		return;
	}

	pthread_mutex_lock(&zone->events.mx);
	reschedule(&zone->events);
	pthread_mutex_unlock(&zone->events.mx);
}