worker.c 18.1 KB
Newer Older
Marek Vavruša's avatar
Marek Vavruša committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
/*  Copyright (C) 2014 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>

    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

17
#include <uv.h>
18
#include <lua.h>
19
#include <libknot/packet/pkt.h>
20
21
#include <contrib/ucw/lib.h>
#include <contrib/ucw/mempool.h>
Marek Vavruša's avatar
Marek Vavruša committed
22
23
24
#if defined(__GLIBC__) && defined(_GNU_SOURCE)
#include <malloc.h>
#endif
25
26

#include "daemon/worker.h"
27
#include "daemon/engine.h"
28
#include "daemon/io.h"
29

30
31
32
/* @internal IO request entry. */
struct ioreq
{
33
34
35
36
37
38
39
	union {
		uv_udp_t      udp;
		uv_tcp_t      tcp;
		uv_udp_send_t send;
		uv_write_t    write;
		uv_connect_t  connect;
	} as;
40
41
};

42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
/** @internal Number of request within timeout window. */
#define MAX_PENDING (KR_NSREP_MAXADDR + (KR_NSREP_MAXADDR / 2))

/** @internal Query resolution task. */
struct qr_task
{
	struct kr_request req;
	struct worker_ctx *worker;
	knot_pkt_t *pktbuf;
	uv_handle_t *pending[MAX_PENDING];
	uint16_t pending_count;
	uint16_t addrlist_count;
	uint16_t addrlist_turn;
	struct sockaddr *addrlist;
	uv_timer_t retry, timeout;
	worker_cb_t on_complete;
	void *baton;
	struct {
		union {
			struct sockaddr_in ip4;
			struct sockaddr_in6 ip6;
		} addr;
		uv_handle_t *handle;
	} source;
	uint16_t iter_count;
	uint16_t refs;
	uint16_t bytes_remaining;
};

/* Convenience macros */
#define qr_task_ref(task) \
	do { ++(task)->refs; } while(0)
#define qr_task_unref(task) \
	do { if (--(task)->refs == 0) { qr_task_free(task); } } while (0)
#define qr_valid_handle(task, checked) \
	(!uv_is_closing((checked)) || (task)->source.handle == (checked))

/* Forward decls */
static int qr_task_step(struct qr_task *task, const struct sockaddr *packet_source, knot_pkt_t *packet);

/** @internal Get singleton worker. */
static inline struct worker_ctx *get_worker(void)
{
	return uv_default_loop()->data;
}

88
89
static inline struct ioreq *ioreq_take(struct worker_ctx *worker)
{
90
	struct ioreq *req = NULL;
91
92
93
94
95
96
	if (worker->ioreqs.len > 0) {
		req = array_tail(worker->ioreqs);
		array_pop(worker->ioreqs);
	} else {
		req = malloc(sizeof(*req));
	}
97
	kr_asan_unpoison(req, sizeof(*req));
98
99
100
101
102
	return req;
}

static inline void ioreq_release(struct worker_ctx *worker, struct ioreq *req)
{
103
	kr_asan_poison(req, sizeof(*req));
104
	if (!req || worker->ioreqs.len < 4 * MP_FREELIST_SIZE) {
105
106
107
108
109
110
		array_push(worker->ioreqs, req);
	} else {
		free(req);
	}
}

111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
static uv_handle_t *ioreq_spawn(struct qr_task *task, int socktype)
{
	if (task->pending_count >= MAX_PENDING) {
		return NULL;
	}
	/* Create connection for iterative query */
	uv_handle_t *req = (uv_handle_t *)ioreq_take(task->worker);
	if (!req) {
		return NULL;
	}
	io_create(task->worker->loop, req, socktype);
	req->data = task;
	/* Connect or issue query datagram */
	task->pending[task->pending_count] = req;
	task->pending_count += 1;
	return req;
}

static void ioreq_on_close(uv_handle_t *handle)
{
	struct worker_ctx *worker = get_worker();
	ioreq_release(worker, (struct ioreq *)handle);
}

static void ioreq_kill(uv_handle_t *req)
{
	assert(req);
	if (!uv_is_closing(req)) {
		io_stop_read(req);
		uv_close(req, ioreq_on_close);
	}
}

static void ioreq_killall(struct qr_task *task)
{
	for (size_t i = 0; i < task->pending_count; ++i) {
		ioreq_kill(task->pending[i]);
	}
	task->pending_count = 0;
}

152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
static inline struct mempool *pool_take(struct worker_ctx *worker)
{
	/* Recycle available mempool if possible */
	struct mempool *mp = NULL;
	if (worker->pools.len > 0) {
		mp = array_tail(worker->pools);
		array_pop(worker->pools);
	} else { /* No mempool on the freelist, create new one */
		mp = mp_new (4 * CPU_PAGE_SIZE);
	}
	kr_asan_unpoison(mp, sizeof(*mp));
	return mp;
}

static inline void pool_release(struct worker_ctx *worker, struct mempool *mp)
{
	/* Return mempool to ring or free it if it's full */
	if (worker->pools.len < MP_FREELIST_SIZE) {
		mp_flush(mp);
		array_push(worker->pools, mp);
		kr_asan_poison(mp, sizeof(*mp));
	} else {
		mp_delete(mp);
	}
}

178
static struct qr_task *qr_task_create(struct worker_ctx *worker, uv_handle_t *handle, knot_pkt_t *query, const struct sockaddr *addr)
179
{
180
	/* How much can client handle? */
181
	struct engine *engine = worker->engine;
182
	size_t answer_max = KNOT_WIRE_MIN_PKTSIZE;
183
	size_t pktbuf_max = KR_EDNS_PAYLOAD;
184
185
186
	if (engine->resolver.opt_rr) {
		pktbuf_max = MAX(knot_edns_get_payload(engine->resolver.opt_rr), pktbuf_max);
	}
187
	if (!addr && handle) { /* TCP */
188
		answer_max = KNOT_WIRE_MAX_PKTSIZE;
189
		pktbuf_max = KNOT_WIRE_MAX_PKTSIZE;
190
191
192
193
	} else if (knot_pkt_has_edns(query)) { /* EDNS */
		answer_max = MAX(knot_edns_get_payload(query->opt_rr), KNOT_WIRE_MIN_PKTSIZE);
	}

194
	/* Recycle available mempool if possible */
195
	mm_ctx_t pool = {
196
		.ctx = pool_take(worker),
197
198
		.alloc = (mm_alloc_t) mp_alloc
	};
199

200
	/* Create resolution task */
201
202
203
204
205
	struct qr_task *task = mm_alloc(&pool, sizeof(*task));
	if (!task) {
		mp_delete(pool.ctx);
		return NULL;
	}
206
	/* Create packet buffers for answer and subrequests */
207
	task->req.pool = pool;
208
209
210
	knot_pkt_t *pktbuf = knot_pkt_new(NULL, pktbuf_max, &task->req.pool);
	knot_pkt_t *answer = knot_pkt_new(NULL, answer_max, &task->req.pool);
	if (!pktbuf || !answer) {
211
212
213
214
		mp_delete(pool.ctx);
		return NULL;
	}
	task->req.answer = answer;
215
	task->pktbuf = pktbuf;
216
217
	task->addrlist = NULL;
	task->pending_count = 0;
218
	task->bytes_remaining = 0;
219
	task->iter_count = 0;
220
	task->refs = 1;
221
222
	task->worker = worker;
	task->source.handle = handle;
223
	uv_timer_init(worker->loop, &task->retry);
224
	uv_timer_init(worker->loop, &task->timeout);
225
	task->retry.data = task;
226
	task->timeout.data = task;
227
	task->on_complete = NULL;
228
	/* Remember query source addr */
229
230
	if (addr) {
		memcpy(&task->source.addr, addr, sockaddr_len(addr));
231
		task->req.qsource.addr = (const struct sockaddr *)&task->source.addr;
232
233
234
	} else {
		task->source.addr.ip4.sin_family = AF_UNSPEC;
	}
235
236
237
238
	/* Remember query source TSIG key */
	if (query->tsig_rr) {
		task->req.qsource.key = knot_rrset_copy(query->tsig_rr, &task->req.pool);
	}
239
240

	/* Start resolution */
241
	kr_resolve_begin(&task->req, &engine->resolver, answer);
242
	worker->stats.concurrent += 1;
243
244
245
	return task;
}

246
/* This is called when the task refcount is zero, free memory. */
247
static void qr_task_free(struct qr_task *task)
248
{
249
	/* Return mempool to ring or free it if it's full */
250
	struct worker_ctx *worker = task->worker;
251
252
	pool_release(worker, task->req.pool.ctx);
	/* @note The 'task' is invalidated from now on. */
Marek Vavruša's avatar
Marek Vavruša committed
253
	/* Decommit memory every once in a while */
254
	static int mp_delete_count = 0;
255
256
257
	if (++mp_delete_count == 100000) {
		lua_gc(worker->engine->L, LUA_GCCOLLECT, 0);
#if defined(__GLIBC__) && defined(_GNU_SOURCE)
Marek Vavruša's avatar
Marek Vavruša committed
258
		malloc_trim(0);
259
#endif
Marek Vavruša's avatar
Marek Vavruša committed
260
		mp_delete_count = 0;
261
	}
262
}
263

264
265
266
267
268
269
270
271
/* This is called when retry timer closes */
static void retransmit_close(uv_handle_t *handle)
{
	struct qr_task *task = handle->data;
	qr_task_unref(task);
}

/* This is called when task completes and timeout timer is closed. */
272
273
274
275
static void qr_task_complete(uv_handle_t *handle)
{
	struct qr_task *task = handle->data;
	struct worker_ctx *worker = task->worker;
276
277
	/* Kill pending I/O requests */
	ioreq_killall(task);
278
279
280
281
282
283
284
285
286
287
288
289
	/* Run the completion callback. */
	if (task->on_complete) {
		task->on_complete(worker, &task->req, task->baton);
	}
	/* Return handle to the event loop in case
	 * it was exclusively taken by this task. */
	if (task->source.handle && !uv_has_ref(task->source.handle)) {
		uv_ref(task->source.handle);
		io_start_read(task->source.handle);
	}
	/* Release task */
	qr_task_unref(task);
290
291
	/* Update stats */
	worker->stats.concurrent -= 1;
292
293
}

294
295
/* This is called when I/O timeouts */
static void on_timeout(uv_timer_t *req)
296
297
{
	struct qr_task *task = req->data;
298
	if (!uv_is_closing((uv_handle_t *)req)) {
299
		qr_task_step(task, NULL, NULL);
300
301
302
	}
}

303
/* This is called when we send subrequest / answer */
304
static int qr_task_on_send(struct qr_task *task, uv_handle_t *handle, int status)
305
{
306
307
	/* When NOOP, it means we sent the final answer to originator,
	 * there we start to close timers and finalize task. */
308
	if (task->req.state != KNOT_STATE_NOOP) {
309
310
		if (status == 0 && handle) {
			io_start_read(handle); /* Start reading answer */
311
		}
312
313
314
315
316
317
	} else {
		/* Close retry timer (borrows task) */
		qr_task_ref(task);
		uv_timer_stop(&task->retry);
		uv_close((uv_handle_t *)&task->retry, retransmit_close);
		/* Close timeout timer (finishes task) */
318
		uv_timer_stop(&task->timeout);
319
		uv_close((uv_handle_t *)&task->timeout, qr_task_complete);
320
	}
321
	return status;
322
323
}

324
325
static void on_send(uv_udp_send_t *req, int status)
{
326
	struct worker_ctx *worker = get_worker();
327
	struct qr_task *task = req->data;
328
	if (qr_valid_handle(task, (uv_handle_t *)req->handle)) {
329
330
		qr_task_on_send(task, (uv_handle_t *)req->handle, status);
	}
331
	qr_task_unref(task);
332
	ioreq_release(worker, (struct ioreq *)req);
333
334
335
336
}

static void on_write(uv_write_t *req, int status)
{
337
	struct worker_ctx *worker = get_worker();
338
	struct qr_task *task = req->data;
339
	if (qr_valid_handle(task, (uv_handle_t *)req->handle)) {
340
341
		qr_task_on_send(task, (uv_handle_t *)req->handle, status);
	}
342
	qr_task_unref(task);
343
	ioreq_release(worker, (struct ioreq *)req);
344
345
}

346
347
static int qr_task_send(struct qr_task *task, uv_handle_t *handle, struct sockaddr *addr, knot_pkt_t *pkt)
{
Marek Vavruša's avatar
Marek Vavruša committed
348
	if (!handle) {
349
		return qr_task_on_send(task, handle, kr_error(EIO));
350
	}
351
352
353
	struct ioreq *send_req = ioreq_take(task->worker);
	if (!send_req) {
		return qr_task_on_send(task, handle, kr_error(ENOMEM));
Marek Vavruša's avatar
Marek Vavruša committed
354
	}
355

356
	/* Send using given protocol */
357
	int ret = 0;
358
359
	if (handle->type == UV_UDP) {
		uv_buf_t buf = { (char *)pkt->wire, pkt->size };
360
361
		send_req->as.send.data = task;
		ret = uv_udp_send(&send_req->as.send, (uv_udp_t *)handle, &buf, 1, addr, &on_send);
362
363
364
365
366
367
	} else {
		uint16_t pkt_size = htons(pkt->size);
		uv_buf_t buf[2] = {
			{ (char *)&pkt_size, sizeof(pkt_size) },
			{ (char *)pkt->wire, pkt->size }
		};
368
369
370
371
		send_req->as.write.data = task;
		ret = uv_write(&send_req->as.write, (uv_stream_t *)handle, buf, 2, &on_write);
	}
	if (ret == 0) {
372
		qr_task_ref(task); /* Pending ioreq on current task */
373
374
	} else {
		ioreq_release(task->worker, send_req);
375
	}
376

377
378
379
380
	/* Update statistics */
	if (handle != task->source.handle && addr) {
		if (handle->type == UV_UDP)
			task->worker->stats.udp += 1;
381
382
		else
			task->worker->stats.tcp += 1;
383
384
		if (addr->sa_family == AF_INET6)
			task->worker->stats.ipv6 += 1;
385
386
		else
			task->worker->stats.ipv4 += 1;
Marek Vavruša's avatar
Marek Vavruša committed
387
	}
388
	return ret;
389
390
}

391
static void on_connect(uv_connect_t *req, int status)
392
{
393
	struct worker_ctx *worker = get_worker();
394
	struct qr_task *task = req->data;
395
396
397
398
399
	uv_stream_t *handle = req->handle;
	if (qr_valid_handle(task, (uv_handle_t *)req->handle)) {
		struct sockaddr_in6 addr;
		int addrlen = sizeof(addr); /* Retrieve endpoint IP for statistics */
		uv_tcp_getpeername((uv_tcp_t *)handle, (struct sockaddr *)&addr, &addrlen);
400
401
402
		if (status == 0) {
			qr_task_send(task, (uv_handle_t *)handle, (struct sockaddr *)&addr, task->pktbuf);
		} else {
403
			qr_task_step(task, (struct sockaddr *)&addr, NULL);
404
		}
405
	}
406
	qr_task_unref(task);
407
	ioreq_release(worker, (struct ioreq *)req);
408
409
}

410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
static void on_retransmit(uv_timer_t *req)
{
	struct qr_task *task = req->data;
	/* Create connection for iterative query */
	if (!uv_is_closing((uv_handle_t *)req) && task->addrlist) {
		uv_handle_t *subreq = ioreq_spawn(task, SOCK_DGRAM);
		if (subreq) {
			struct sockaddr_in6 *choice = &((struct sockaddr_in6 *)task->addrlist)[task->addrlist_turn];
			if (qr_task_send(task, subreq, (struct sockaddr *)choice, task->pktbuf) == 0) {
				task->addrlist_turn = (task->addrlist_turn + 1) % task->addrlist_count; /* Round robin */
				return;
			}
		}
	}
	/* Not possible to spawn request, stop trying */
	uv_timer_stop(req);
}

428
static int qr_task_finalize(struct qr_task *task, int state)
429
430
{
	kr_resolve_finish(&task->req, state);
431
	task->req.state = KNOT_STATE_NOOP;
432
	/* Send back answer */
433
	(void) qr_task_send(task, task->source.handle, (struct sockaddr *)&task->source.addr, task->req.answer);
434
435
436
	return state == KNOT_STATE_DONE ? 0 : kr_error(EIO);
}

437
static int qr_task_step(struct qr_task *task, const struct sockaddr *packet_source, knot_pkt_t *packet)
438
{
439
440
	/* Close pending I/O requests */
	uv_timer_stop(&task->retry);
441
	uv_timer_stop(&task->timeout);
442
	ioreq_killall(task);
443

444
445
	/* Consume input and produce next query */
	int sock_type = -1;
446
447
448
	task->addrlist = NULL;
	task->addrlist_count = 0;
	task->addrlist_turn = 0;
449
	int state = kr_resolve_consume(&task->req, packet_source, packet);
450
	while (state == KNOT_STATE_PRODUCE) {
451
		state = kr_resolve_produce(&task->req, &task->addrlist, &sock_type, task->pktbuf);
452
		if (unlikely(++task->iter_count > KR_ITER_LIMIT)) {
453
454
			return qr_task_finalize(task, KNOT_STATE_FAIL);
		}
455
456
457
458
459
	}

	/* We're done, no more iterations needed */
	if (state & (KNOT_STATE_DONE|KNOT_STATE_FAIL)) {
		return qr_task_finalize(task, state);
460
	} else if (!task->addrlist || sock_type < 0) {
461
		return qr_task_step(task, NULL, NULL);
462
463
	}

464
465
466
467
468
	/* Count available address choices */
	struct sockaddr_in6 *choice = (struct sockaddr_in6 *)task->addrlist;
	for (size_t i = 0; i < KR_NSREP_MAXADDR && choice->sin6_family != AF_UNSPEC; ++i) {
		task->addrlist_count += 1;
		choice += 1;
469
470
	}

471
	/* Start fast retransmit with UDP, otherwise connect. */
472
	if (sock_type == SOCK_DGRAM) {
473
474
475
476
		uv_timer_start(&task->retry, on_retransmit, 0, KR_CONN_RETRY);
	} else {
		struct ioreq *conn = ioreq_take(task->worker);
		if (!conn) {
477
			return qr_task_step(task, NULL, NULL);
478
		}
479
480
481
		uv_handle_t *client = ioreq_spawn(task, sock_type);
		if (!client) {
			ioreq_release(task->worker, conn);
482
			return qr_task_step(task, NULL, NULL);
483
		}
484
485
486
		conn->as.connect.data = task;
		if (uv_tcp_connect(&conn->as.connect, (uv_tcp_t *)client, task->addrlist, on_connect) != 0) {
			ioreq_release(task->worker, conn);
487
			return qr_task_step(task, NULL, NULL);
488
		}
489
		/* Connect request borrows task */
490
		qr_task_ref(task);
491
492
	}

493
	/* Start next step with timeout */
494
	uv_timer_start(&task->timeout, on_timeout, KR_CONN_RTT_MAX, 0);
495
	return kr_ok();
496
497
}

498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
static int parse_query(knot_pkt_t *query)
{
	/* Parse query packet. */
	int ret = knot_pkt_parse(query, 0);
	if (ret != KNOT_EOK) {
		return kr_error(EPROTO); /* Ignore malformed query. */
	}

	/* Check if at least header is parsed. */
	if (query->parsed < query->size) {
		return kr_error(EMSGSIZE);
	}

	return kr_ok();
}

514
int worker_exec(struct worker_ctx *worker, uv_handle_t *handle, knot_pkt_t *query, const struct sockaddr* addr)
515
516
517
518
519
520
521
522
{
	if (!worker) {
		return kr_error(EINVAL);
	}

	/* Parse query */
	int ret = parse_query(query);

523
	/* Start new task on master sockets, or resume existing */
524
	struct qr_task *task = handle->data;
525
526
	bool is_master_socket = (!task);
	if (is_master_socket) {
527
528
		/* Ignore badly formed queries or responses. */
		if (ret != 0 || knot_wire_get_qr(query->wire)) {
529
530
			return kr_error(EINVAL); /* Ignore. */
		}
531
		task = qr_task_create(worker, handle, query, addr);
532
533
534
535
536
537
		if (!task) {
			return kr_error(ENOMEM);
		}
	}

	/* Consume input and produce next query */
538
	return qr_task_step(task, addr, query);
539
}
540

541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
/* Return DNS/TCP message size. */
static int msg_size(const uint8_t *msg, size_t len)
{
		if (len < 2) {
			return kr_error(EMSGSIZE);
		}
		uint16_t nbytes = wire_read_u16(msg);
		if (nbytes > len - 2) {
			return kr_error(EMSGSIZE);
		}
		return nbytes;
}

int worker_process_tcp(struct worker_ctx *worker, uv_handle_t *handle, const uint8_t *msg, size_t len)
{
	if (!worker || !handle || !msg) {
		return kr_error(EINVAL);
	}

	int nbytes = msg_size(msg, len);
	struct qr_task *task = handle->data;
	const bool start_assembly = (task && task->bytes_remaining == 0);

	/* Message is a query (we have no context to buffer it) or complete. */
	if (!task || (start_assembly && nbytes == len - 2)) {
		if (nbytes <= 0) {
			return worker_exec(worker, (uv_handle_t *)handle, NULL, NULL);	
		}
		knot_pkt_t *pkt_nocopy = knot_pkt_new((void *)(msg + 2), nbytes, &worker->pkt_pool);
		return worker_exec(worker, handle, pkt_nocopy, NULL);
	}
	/* Starting a new message assembly */
	knot_pkt_t *pkt_buf = task->pktbuf;
	if (start_assembly) {
		if (nbytes <= 0) {
			return worker_exec(worker, (uv_handle_t *)handle, NULL, NULL);	
		}	
		knot_pkt_clear(pkt_buf);
		pkt_buf->size = 0;
		/* Cut off message length */
		task->bytes_remaining = nbytes;
		len -= 2;
		msg += 2;
	}
	/* Message is too long, can't process it. */
	if (len > pkt_buf->max_size - pkt_buf->size) {
		task->bytes_remaining = 0;
		return worker_exec(worker, handle, NULL, NULL);
	}
	/* Buffer message and check if it's complete */
	memcpy(pkt_buf->wire + pkt_buf->size, msg, len);
	pkt_buf->size += len;
	if (len >= task->bytes_remaining) {
		task->bytes_remaining = 0;
		return worker_exec(worker, handle, pkt_buf, NULL);
	}
	/* Return number of bytes remaining to receive. */
	task->bytes_remaining -= len;
	return task->bytes_remaining;
}

602
int worker_resolve(struct worker_ctx *worker, knot_pkt_t *query, unsigned options, worker_cb_t on_complete, void *baton)
603
{
604
	if (!worker || !query) {
605
606
607
608
609
610
611
612
		return kr_error(EINVAL);
	}

	/* Create task */
	struct qr_task *task = qr_task_create(worker, NULL, query, NULL);
	if (!task) {
		return kr_error(ENOMEM);
	}
613
614
	task->baton = baton;
	task->on_complete = on_complete;
615
	task->req.options |= options;
616
	return qr_task_step(task, NULL, query);
617
618
}

619
620
621
int worker_reserve(struct worker_ctx *worker, size_t ring_maxlen)
{
	array_init(worker->pools);
Marek Vavruša's avatar
Marek Vavruša committed
622
623
624
625
626
627
628
	array_init(worker->ioreqs);
	array_reserve(worker->pools, ring_maxlen);
	array_reserve(worker->ioreqs, ring_maxlen);
	memset(&worker->pkt_pool, 0, sizeof(worker->pkt_pool));
	worker->pkt_pool.ctx = mp_new (4 * sizeof(knot_pkt_t));
	worker->pkt_pool.alloc = (mm_alloc_t) mp_alloc;
	return kr_ok();
629
630
}

631
#define reclaim_freelist(list, type, cb) \
632
	for (unsigned i = 0; i < list.len; ++i) { \
633
634
635
		type *elm = list.at[i]; \
		kr_asan_unpoison(elm, sizeof(type)); \
		cb(elm); \
636
637
638
	} \
	array_clear(list)

639
640
void worker_reclaim(struct worker_ctx *worker)
{
641
642
	reclaim_freelist(worker->pools, struct mempool, mp_delete);
	reclaim_freelist(worker->ioreqs, struct ioreq, free);
Marek Vavruša's avatar
Marek Vavruša committed
643
644
	mp_delete(worker->pkt_pool.ctx);
	worker->pkt_pool.ctx = NULL;
645
}