Skip to content
Snippets Groups Projects
Commit 91a2945e authored by Marek Vavruša's avatar Marek Vavruša
Browse files

daemon/worker: ring buffer to recycle mempools

this avoids repetitive allocation/free cycles on load
parent 73ff103f
No related branches found
No related tags found
No related merge requests found
......@@ -20,6 +20,9 @@
#ifndef DEFAULT_NSREP_SIZE
#define DEFAULT_NSREP_SIZE 4096 /**< Default NS reputation cache size */
#endif
#ifndef DEFAULT_RING_SIZE
#define DEFAULT_RING_SIZE 16 /**< Maximum size of the worker ring size */
#endif
/*
* @internal These are forward decls to allow building modules with engine but without Lua.
......
......@@ -150,6 +150,8 @@ int main(int argc, char **argv)
.mm = NULL
};
loop->data = &worker;
array_init(worker.bufs.ring);
worker_reserve(&worker, DEFAULT_RING_SIZE);
/* Bind to sockets. */
if (addr != NULL) {
......@@ -182,6 +184,7 @@ int main(int argc, char **argv)
/* Cleanup. */
fprintf(stderr, "\n[system] quitting\n");
engine_deinit(&engine);
worker_reclaim(&worker);
if (ret != 0) {
ret = EXIT_FAILURE;
......
......@@ -27,6 +27,7 @@
struct qr_task
{
struct kr_request req;
struct worker_ctx *worker;
knot_pkt_t *next_query;
uv_handle_t *next_handle;
uv_timer_t timeout;
......@@ -62,8 +63,15 @@ static int parse_query(knot_pkt_t *query)
static struct qr_task *qr_task_create(struct worker_ctx *worker, uv_handle_t *handle, knot_pkt_t *query, const struct sockaddr *addr)
{
/* Recycle mempool from ring or create it */
mm_ctx_t pool;
mm_ctx_mempool(&pool, MM_DEFAULT_BLKSIZE);
mempool_ring_t *ring = &worker->bufs.ring;
if (ring->len > 0) {
pool = array_tail(*ring);
array_pop(*ring);
} else {
mm_ctx_mempool(&pool, KNOT_WIRE_MAX_PKTSIZE);
}
/* Create worker task */
struct engine *engine = worker->engine;
......@@ -73,6 +81,7 @@ static struct qr_task *qr_task_create(struct worker_ctx *worker, uv_handle_t *ha
mp_delete(pool.ctx);
return NULL;
}
task->worker = worker;
task->req.pool = pool;
task->source.handle = handle;
if (addr) {
......@@ -118,7 +127,15 @@ static void qr_task_free(uv_handle_t *handle)
uv_ref(task->source.handle);
io_start_read(task->source.handle);
}
mp_delete(task->req.pool.ctx);
/* Return mempool to ring or free it if it's full */
struct worker_ctx *worker = task->worker;
mempool_ring_t *ring = &worker->bufs.ring;
if (ring->len < ring->cap) {
mp_flush(task->req.pool.ctx);
array_push(*ring, task->req.pool);
} else {
mp_delete(task->req.pool.ctx);
}
}
static void qr_task_timeout(uv_timer_t *req)
......@@ -262,3 +279,17 @@ int worker_exec(struct worker_ctx *worker, uv_handle_t *handle, knot_pkt_t *quer
/* Consume input and produce next query */
return qr_task_step(task, query);
}
int worker_reserve(struct worker_ctx *worker, size_t ring_maxlen)
{
return array_reserve(worker->bufs.ring, ring_maxlen);
}
void worker_reclaim(struct worker_ctx *worker)
{
mempool_ring_t *ring = &worker->bufs.ring;
for (unsigned i = 0; i < ring->len; ++i) {
mp_delete(ring->at[i].ctx);
}
array_clear(*ring);
}
......@@ -19,6 +19,9 @@
#include <libknot/internal/mempattern.h>
#include "daemon/engine.h"
#include "lib/generic/array.h"
typedef array_t(mm_ctx_t) mempool_ring_t;
/**
* Query resolution worker.
......@@ -29,6 +32,7 @@ struct worker_ctx {
mm_ctx_t *mm;
struct {
uint8_t wire[KNOT_WIRE_MAX_PKTSIZE];
mempool_ring_t ring;
} bufs;
};
......@@ -43,3 +47,9 @@ struct worker_ctx {
* @return 0, error code
*/
int worker_exec(struct worker_ctx *worker, uv_handle_t *handle, knot_pkt_t *query, const struct sockaddr* addr);
/** Reserve worker buffers */
int worker_reserve(struct worker_ctx *worker, size_t ring_maxlen);
/** Collect worker mempools */
void worker_reclaim(struct worker_ctx *worker);
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment