axfr.c 10.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*  Copyright (C) 2014 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>

    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

17 18
#include "knot/nameserver/axfr.h"
#include "knot/nameserver/internet.h"
19
#include "knot/nameserver/process_query.h"
20
#include "knot/nameserver/process_answer.h"
Jan Kadlec's avatar
Jan Kadlec committed
21
#include "knot/updates/apply.h"
22
#include "knot/zone/zonefile.h"
23
#include "common/debug.h"
24
#include "libknot/descriptor.h"
25
#include "common/lists.h"
26

27
/* AXFR context. @note aliasing the generic xfr_proc */
28
struct axfr_proc {
29
	struct xfr_proc proc;
30 31 32 33
	hattrie_iter_t *i;
	unsigned cur_rrset;
};

34 35
static int axfr_put_rrsets(knot_pkt_t *pkt, zone_node_t *node,
                           struct axfr_proc *state)
36
{
37 38
	assert(node != NULL);

39
	int ret = KNOT_EOK;
40
	int i = state->cur_rrset;
41
	uint16_t rrset_count = node->rrset_count;
42
	unsigned flags = KNOT_PF_NOTRUNC;
43

44
	/* Append all RRs. */
45
	for (;i < rrset_count; ++i) {
46
		knot_rrset_t rrset = node_rrset_at(node, i);
47
		if (rrset.type == KNOT_RRTYPE_SOA) {
48 49
			continue;
		}
50
		ret = knot_pkt_put(pkt, 0, &rrset, flags);
51 52 53

		/* If something failed, remember the current RR for later. */
		if (ret != KNOT_EOK) {
54 55 56 57 58 59 60 61 62
			state->cur_rrset = i;
			return ret;
		}
	}

	state->cur_rrset = 0;
	return ret;
}

63 64
static int axfr_process_node_tree(knot_pkt_t *pkt, const void *item,
                                  struct xfr_proc *state)
65
{
66 67
	assert(item != NULL);

68 69 70 71 72 73
	struct axfr_proc *axfr = (struct axfr_proc*)state;

	if (axfr->i == NULL) {
		axfr->i = hattrie_iter_begin(item, true);
	}

74 75
	/* Put responses. */
	int ret = KNOT_EOK;
76
	zone_node_t *node = NULL;
77
	while (!hattrie_iter_finished(axfr->i)) {
78
		node = (zone_node_t *)*hattrie_iter_val(axfr->i);
79
		ret = axfr_put_rrsets(pkt, node, axfr);
80 81 82
		if (ret != KNOT_EOK) {
			break;
		}
83
		hattrie_iter_next(axfr->i);
84 85 86
	}

	/* Finished all nodes. */
87 88 89 90
	if (ret == KNOT_EOK) {
		hattrie_iter_free(axfr->i);
		axfr->i = NULL;
	}
91 92 93
	return ret;
}

94
static void axfr_query_cleanup(struct query_data *qdata)
95
{
96
	struct axfr_proc *axfr = (struct axfr_proc *)qdata->ext;
97

98 99 100
	hattrie_iter_free(axfr->i);
	ptrlist_free(&axfr->proc.nodes, qdata->mm);
	mm_free(qdata->mm, axfr);
101 102 103

	/* Allow zone changes (finished). */
	rcu_read_unlock();
104 105
}

106 107 108 109 110 111 112 113 114 115 116
static int axfr_query_check(struct query_data *qdata)
{
	/* Check valid zone, transaction security and contents. */
	NS_NEED_ZONE(qdata, KNOT_RCODE_NOTAUTH);
	NS_NEED_AUTH(&qdata->zone->conf->acl.xfr_out, qdata);
	/* Check expiration. */
	NS_NEED_ZONE_CONTENTS(qdata, KNOT_RCODE_SERVFAIL);

	return NS_PROC_DONE;
}

117
static int axfr_query_init(struct query_data *qdata)
118 119 120
{
	assert(qdata);

121 122 123 124 125 126 127 128 129 130
	/* Check AXFR query validity. */
	int state = axfr_query_check(qdata);
	if (state == NS_PROC_FAIL) {
		if (qdata->rcode == KNOT_RCODE_FORMERR) {
			return KNOT_EMALF;
		} else {
			return KNOT_EDENIED;
		}
	}

131
	/* Create transfer processing context. */
132
	mm_ctx_t *mm = qdata->mm;
133

134
	zone_contents_t *zone = qdata->zone->contents;
135
	struct axfr_proc *axfr = mm_alloc(mm, sizeof(struct axfr_proc));
136
	if (axfr == NULL) {
137 138
		return KNOT_ENOMEM;
	}
139 140
	memset(axfr, 0, sizeof(struct axfr_proc));
	init_list(&axfr->proc.nodes);
141 142

	/* Put data to process. */
143 144
	gettimeofday(&axfr->proc.tstamp, NULL);
	ptrlist_add(&axfr->proc.nodes, zone->nodes, mm);
145
	/* Put NSEC3 data if exists. */
146
	if (!zone_tree_is_empty(zone->nsec3_nodes)) {
147
		ptrlist_add(&axfr->proc.nodes, zone->nsec3_nodes, mm);
148
	}
149 150

	/* Set up cleanup callback. */
151
	qdata->ext = axfr;
152
	qdata->ext_cleanup = &axfr_query_cleanup;
153

154 155
	/* No zone changes during multipacket answer
	   (unlocked in axfr_answer_cleanup) */
156 157
	rcu_read_lock();

158 159 160
	return KNOT_EOK;
}

161 162
int xfr_process_list(knot_pkt_t *pkt, xfr_put_cb process_item,
                     struct query_data *qdata)
163
{
164 165 166
	if (pkt == NULL || qdata == NULL || qdata->ext == NULL) {
		return KNOT_EINVAL;
	}
167 168 169

	int ret = KNOT_EOK;
	mm_ctx_t *mm = qdata->mm;
170
	struct xfr_proc *xfer = qdata->ext;
171

172
	zone_contents_t *zone = qdata->zone->contents;
173
	knot_rrset_t soa_rr = node_rrset(zone->apex, KNOT_RRTYPE_SOA);
174 175

	/* Prepend SOA on first packet. */
176
	if (xfer->npkts == 0) {
177
		ret = knot_pkt_put(pkt, 0, &soa_rr, KNOT_PF_NOTRUNC);
178 179 180
		if (ret != KNOT_EOK) {
			return ret;
		}
181
	}
182

183 184 185 186
	/* Process all items in the list. */
	while (!EMPTY_LIST(xfer->nodes)) {
		ptrnode_t *head = HEAD(xfer->nodes);
		ret = process_item(pkt, head->d, xfer);
187
		if (ret == KNOT_EOK) { /* Finished. */
188
			/* Complete change set. */
189
			rem_node((node_t *)head);
190
			mm_free(mm, head);
191
		} else { /* Packet full or other error. */
192 193
			break;
		}
194
	}
195 196 197

	/* Append SOA on last packet. */
	if (ret == KNOT_EOK) {
198
		ret = knot_pkt_put(pkt, 0, &soa_rr, KNOT_PF_NOTRUNC);
199 200
	}

201 202 203 204
	/* Update counters. */
	xfer->npkts  += 1;
	xfer->nbytes += pkt->size;

205 206
	return ret;
}
207

208
/* AXFR-specific logging (internal, expects 'qdata' variable set). */
209
#define AXFROUT_LOG(severity, msg...) \
210
	QUERY_LOG(severity, qdata, "AXFR, outgoing", msg)
211

Marek Vavruša's avatar
Marek Vavruša committed
212
int axfr_query_process(knot_pkt_t *pkt, struct query_data *qdata)
213
{
214
	if (pkt == NULL || qdata == NULL) {
Marek Vavruša's avatar
Marek Vavruša committed
215
		return NS_PROC_FAIL;
216
	}
217

218
	int ret = KNOT_EOK;
219
	struct timeval now = {0};
220 221 222 223 224 225

	/* If AXFR is disabled, respond with NOTIMPL. */
	if (qdata->param->proc_flags & NS_QUERY_NO_AXFR) {
		qdata->rcode = KNOT_RCODE_NOTIMPL;
		return NS_PROC_FAIL;
	}
226 227 228

	/* Initialize on first call. */
	if (qdata->ext == NULL) {
229

230
		ret = axfr_query_init(qdata);
231
		if (ret != KNOT_EOK) {
232
			AXFROUT_LOG(LOG_ERR, "failed to start (%s)",
233
			            knot_strerror(ret));
Marek Vavruša's avatar
Marek Vavruša committed
234
			return NS_PROC_FAIL;
235
		} else {
236
			AXFROUT_LOG(LOG_INFO, "started, serial %u",
237
			           zone_contents_serial(qdata->zone->contents));
238
		}
239
	}
240

241
	/* Reserve space for TSIG. */
242
	knot_pkt_reserve(pkt, tsig_wire_maxsize(qdata->sign.tsig_key));
243 244

	/* Answer current packet (or continue). */
245
	struct axfr_proc *axfr = (struct axfr_proc *)qdata->ext;
246
	ret = xfr_process_list(pkt, &axfr_process_node_tree, qdata);
247 248 249 250
	switch(ret) {
	case KNOT_ESPACE: /* Couldn't write more, send packet and continue. */
		return NS_PROC_FULL; /* Check for more. */
	case KNOT_EOK:    /* Last response. */
251
		gettimeofday(&now, NULL);
Daniel Salzman's avatar
Daniel Salzman committed
252 253
		AXFROUT_LOG(LOG_INFO,
		            "finished, %.02f seconds, %u messages, %u bytes",
254
		            time_diff(&axfr->proc.tstamp, &now) / 1000.0,
Daniel Salzman's avatar
Daniel Salzman committed
255
		            axfr->proc.npkts, axfr->proc.nbytes);
256
		return NS_PROC_DONE;
257 258
		break;
	default:          /* Generic error. */
259
		AXFROUT_LOG(LOG_ERR, "failed (%s)", knot_strerror(ret));
260
		return NS_PROC_FAIL;
261
	}
262
}
Marek Vavruša's avatar
Marek Vavruša committed
263
#undef AXFROUT_LOG
264

265 266
static void axfr_answer_cleanup(struct answer_data *data)
{
267 268
	assert(data != NULL);

269 270
	struct xfr_proc *proc = data->ext;
	if (proc) {
271
		zone_contents_deep_free(&proc->contents);
272 273 274 275 276 277
		mm_free(data->mm, proc);
		data->ext = NULL;
	}
}

static int axfr_answer_init(struct answer_data *data)
278
{
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
	assert(data);

	/* Create new zone contents. */
	zone_t *zone = data->param->zone;
	zone_contents_t *new_contents = zone_contents_new(zone->name);
	if (new_contents == NULL) {
		return KNOT_ENOMEM;
	}

	/* Create new processing context. */
	struct xfr_proc *proc = mm_alloc(data->mm, sizeof(struct xfr_proc));
	if (proc == NULL) {
		zone_contents_deep_free(&new_contents);
		return KNOT_ENOMEM;
	}

	memset(proc, 0, sizeof(struct xfr_proc));
296
	proc->contents = new_contents;
297 298 299 300 301 302 303 304 305
	gettimeofday(&proc->tstamp, NULL);

	/* Set up cleanup callback. */
	data->ext = proc;
	data->ext_cleanup = &axfr_answer_cleanup;

	return KNOT_EOK;
}

306
/* AXFR-specific logging (internal, expects 'adata' variable set). */
Daniel Salzman's avatar
Daniel Salzman committed
307 308
#define AXFRIN_LOG(severity, msg...) \
	ANSWER_LOG(severity, adata, "AXFR, incoming", msg)
309

310
static int axfr_answer_finalize(struct answer_data *adata)
311 312 313 314
{
	struct timeval now;
	gettimeofday(&now, NULL);

315
	/*
316 317
	 * Adjust zone so that node count is set properly and nodes are
	 * marked authoritative / delegation point.
318
	 */
319
	struct xfr_proc *proc = adata->ext;
320
	int rc = zone_contents_adjust_full(proc->contents, NULL, NULL);
321 322 323 324 325
	if (rc != KNOT_EOK) {
		return rc;
	}

	/* Switch contents. */
Marek Vavruša's avatar
Marek Vavruša committed
326
	zone_t *zone = adata->param->zone;
327 328
	zone_contents_t *old_contents =
	                zone_switch_contents(zone, proc->contents);
329
	synchronize_rcu();
330

331
	AXFRIN_LOG(LOG_INFO, "finished, "
Daniel Salzman's avatar
Daniel Salzman committed
332
	           "serial %u -> %u, %.02f seconds, %u messages, %u bytes",
333 334
	           zone_contents_serial(old_contents),
	           zone_contents_serial(proc->contents),
335
	           time_diff(&proc->tstamp, &now) / 1000.0,
Daniel Salzman's avatar
Daniel Salzman committed
336
	           proc->npkts, proc->nbytes);
337 338 339

	/* Do not free new contents with cleanup. */
	zone_contents_deep_free(&old_contents);
340
	proc->contents = NULL;
341 342 343 344

	return KNOT_EOK;
}

Marek Vavruša's avatar
Marek Vavruša committed
345
static int axfr_answer_packet(knot_pkt_t *pkt, struct xfr_proc *proc)
346
{
347 348
	assert(pkt != NULL);
	assert(proc != NULL);
349

Daniel Salzman's avatar
Daniel Salzman committed
350 351 352
	/* Update counters. */
	proc->npkts  += 1;
	proc->nbytes += pkt->size;
353

Daniel Salzman's avatar
Daniel Salzman committed
354
	/* Init zone creator. */
355
	zcreator_t zc = {.z = proc->contents, .master = false, .ret = KNOT_EOK };
356 357 358 359 360 361 362 363 364 365

	const knot_pktsection_t *answer = knot_pkt_section(pkt, KNOT_ANSWER);
	for (uint16_t i = 0; i < answer->count; ++i) {
		const knot_rrset_t *rr = &answer->rr[i];
		if (rr->type == KNOT_RRTYPE_SOA &&
		    node_rrtype_exists(zc.z->apex, KNOT_RRTYPE_SOA)) {
			return NS_PROC_DONE;
		} else {
			int ret = zcreator_step(&zc, rr);
			if (ret != KNOT_EOK) {
366
				return NS_PROC_FAIL;
367 368 369 370 371 372 373
			}
		}
	}

	return NS_PROC_MORE;
}

374
int axfr_answer_process(knot_pkt_t *pkt, struct answer_data *adata)
375
{
376 377 378 379 380 381 382 383 384
	if (pkt == NULL || adata == NULL) {
		return NS_PROC_FAIL;
	}

	/* Check RCODE. */
	uint8_t rcode = knot_wire_get_rcode(pkt->wire);
	if (rcode != KNOT_RCODE_NOERROR) {
		knot_lookup_table_t *lut = knot_lookup_by_id(knot_rcode_names, rcode);
		if (lut != NULL) {
385
			AXFRIN_LOG(LOG_ERR, "server responded with %s", lut->name);
386
		}
387 388 389
		return NS_PROC_FAIL;
	}

390
	/* Initialize processing with first packet. */
391 392 393
	if (adata->ext == NULL) {
		NS_NEED_TSIG_SIGNED(&adata->param->tsig_ctx, 0);
		if (!zone_transfer_needed(adata->param->zone, pkt)) {
394
			AXFRIN_LOG(LOG_INFO, "zone is up-to-date");
395 396
			return NS_PROC_DONE;
		}
397
		AXFRIN_LOG(LOG_INFO, "starting");
398

399
		int ret = axfr_answer_init(adata);
400
		if (ret != KNOT_EOK) {
401
			AXFRIN_LOG(LOG_ERR, "failed (%s)", knot_strerror(ret));
402 403
			return NS_PROC_FAIL;
		}
404
	} else {
405
		NS_NEED_TSIG_SIGNED(&adata->param->tsig_ctx, 100);
406
	}
407

408
	/* Process answer packet. */
409
	int ret = axfr_answer_packet(pkt, (struct xfr_proc *)adata->ext);
410
	if (ret == NS_PROC_DONE) {
411
		NS_NEED_TSIG_SIGNED(&adata->param->tsig_ctx, 0);
412
		/* This was the last packet, finalize zone and publish it. */
413
		int fret = axfr_answer_finalize(adata);
414 415
		if (fret != KNOT_EOK) {
			ret = NS_PROC_FAIL;
416
		}
417
	}
418

419
	return ret;
420 421
}

Marek Vavruša's avatar
Marek Vavruša committed
422
#undef AXFRIN_LOG