Skip to content
Snippets Groups Projects
Commit c88d10de authored by Marek Vavruša's avatar Marek Vavruša
Browse files

Merge branch 'uv-closefail-cache-errors'

parents 39f7e080 dd9cf499
Branches
Tags
No related merge requests found
...@@ -117,6 +117,7 @@ static void qr_task_timeout(uv_timer_t *req) ...@@ -117,6 +117,7 @@ static void qr_task_timeout(uv_timer_t *req)
{ {
struct qr_task *task = req->data; struct qr_task *task = req->data;
if (task->next_handle) { if (task->next_handle) {
uv_cancel((uv_req_t *)&task->ioreq);
io_stop_read(task->next_handle); io_stop_read(task->next_handle);
qr_task_step(task, NULL); qr_task_step(task, NULL);
} }
...@@ -182,7 +183,9 @@ static int qr_task_step(struct qr_task *task, knot_pkt_t *packet) ...@@ -182,7 +183,9 @@ static int qr_task_step(struct qr_task *task, knot_pkt_t *packet)
{ {
/* Cancel timeout if active, close handle. */ /* Cancel timeout if active, close handle. */
if (task->next_handle) { if (task->next_handle) {
uv_close(task->next_handle, (uv_close_cb) free); if (!uv_is_closing(task->next_handle)) {
uv_close(task->next_handle, (uv_close_cb) free);
}
uv_timer_stop(&task->timeout); uv_timer_stop(&task->timeout);
task->next_handle = NULL; task->next_handle = NULL;
} }
...@@ -214,10 +217,10 @@ static int qr_task_step(struct qr_task *task, knot_pkt_t *packet) ...@@ -214,10 +217,10 @@ static int qr_task_step(struct qr_task *task, knot_pkt_t *packet)
task->next_handle->data = task; task->next_handle->data = task;
if (sock_type == SOCK_STREAM) { if (sock_type == SOCK_STREAM) {
uv_connect_t *connect = &task->ioreq.connect; uv_connect_t *connect = &task->ioreq.connect;
connect->data = task;
if (uv_tcp_connect(connect, (uv_tcp_t *)task->next_handle, addr, qr_task_on_connect) != 0) { if (uv_tcp_connect(connect, (uv_tcp_t *)task->next_handle, addr, qr_task_on_connect) != 0) {
return qr_task_step(task, NULL); return qr_task_step(task, NULL);
} }
connect->data = task;
} else { } else {
if (qr_task_send(task, task->next_handle, addr, next_query) != 0) { if (qr_task_send(task, task->next_handle, addr, next_query) != 0) {
return qr_task_step(task, NULL); return qr_task_step(task, NULL);
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#define DEBUG_MSG(fmt...) QRDEBUG(kr_rplan_current(rplan), " pc ", fmt) #define DEBUG_MSG(fmt...) QRDEBUG(kr_rplan_current(rplan), " pc ", fmt)
#define DEFAULT_MAXTTL (15 * 60) #define DEFAULT_MAXTTL (15 * 60)
#define DEFAULT_NOTTL (5) /* Short-time "no data" retention to avoid bursts */
static inline uint8_t get_tag(knot_pkt_t *pkt) static inline uint8_t get_tag(knot_pkt_t *pkt)
{ {
...@@ -47,7 +48,9 @@ static void adjust_ttl(knot_rrset_t *rr, uint32_t drift) ...@@ -47,7 +48,9 @@ static void adjust_ttl(knot_rrset_t *rr, uint32_t drift)
for (uint16_t i = 0; i < rr->rrs.rr_count; ++i) { for (uint16_t i = 0; i < rr->rrs.rr_count; ++i) {
knot_rdata_t *rd = knot_rdataset_at(&rr->rrs, i); knot_rdata_t *rd = knot_rdataset_at(&rr->rrs, i);
uint32_t ttl = knot_rdata_ttl(rd); uint32_t ttl = knot_rdata_ttl(rd);
knot_rdata_set_ttl(rd, ttl - drift); if (ttl >= drift) {
knot_rdata_set_ttl(rd, ttl - drift);
}
} }
} }
...@@ -96,6 +99,9 @@ static int peek(knot_layer_t *ctx, knot_pkt_t *pkt) ...@@ -96,6 +99,9 @@ static int peek(knot_layer_t *ctx, knot_pkt_t *pkt)
if (!qry || ctx->state & (KNOT_STATE_DONE|KNOT_STATE_FAIL)) { if (!qry || ctx->state & (KNOT_STATE_DONE|KNOT_STATE_FAIL)) {
return ctx->state; return ctx->state;
} }
if (knot_pkt_qclass(pkt) != KNOT_CLASS_IN) {
return ctx->state; /* Only IN class */
}
/* Fetch packet from cache */ /* Fetch packet from cache */
namedb_txn_t txn; namedb_txn_t txn;
...@@ -119,7 +125,7 @@ static int peek(knot_layer_t *ctx, knot_pkt_t *pkt) ...@@ -119,7 +125,7 @@ static int peek(knot_layer_t *ctx, knot_pkt_t *pkt)
static uint32_t packet_ttl(knot_pkt_t *pkt) static uint32_t packet_ttl(knot_pkt_t *pkt)
{ {
uint32_t ttl = 0; uint32_t ttl = DEFAULT_NOTTL;
/* Fetch SOA from authority. */ /* Fetch SOA from authority. */
const knot_pktsection_t *ns = knot_pkt_section(pkt, KNOT_AUTHORITY); const knot_pktsection_t *ns = knot_pkt_section(pkt, KNOT_AUTHORITY);
for (unsigned i = 0; i < ns->count; ++i) { for (unsigned i = 0; i < ns->count; ++i) {
...@@ -134,6 +140,10 @@ static uint32_t packet_ttl(knot_pkt_t *pkt) ...@@ -134,6 +140,10 @@ static uint32_t packet_ttl(knot_pkt_t *pkt)
const knot_pktsection_t *sec = knot_pkt_section(pkt, i); const knot_pktsection_t *sec = knot_pkt_section(pkt, i);
for (unsigned k = 0; k < sec->count; ++k) { for (unsigned k = 0; k < sec->count; ++k) {
const knot_rrset_t *rr = knot_pkt_rr(sec, k); const knot_rrset_t *rr = knot_pkt_rr(sec, k);
/* Skip OPT and TSIG */
if (rr->type == KNOT_RRTYPE_OPT || rr->type == KNOT_RRTYPE_TSIG) {
continue;
}
for (uint16_t i = 0; i < rr->rrs.rr_count; ++i) { for (uint16_t i = 0; i < rr->rrs.rr_count; ++i) {
knot_rdata_t *rd = knot_rdataset_at(&rr->rrs, i); knot_rdata_t *rd = knot_rdataset_at(&rr->rrs, i);
if (knot_rdata_ttl(rd) < ttl) { if (knot_rdata_ttl(rd) < ttl) {
...@@ -149,12 +159,16 @@ static int stash(knot_layer_t *ctx) ...@@ -149,12 +159,16 @@ static int stash(knot_layer_t *ctx)
{ {
struct kr_request *req = ctx->data; struct kr_request *req = ctx->data;
struct kr_rplan *rplan = &req->rplan; struct kr_rplan *rplan = &req->rplan;
if (EMPTY_LIST(rplan->resolved) || ctx->state == KNOT_STATE_FAIL) { if (EMPTY_LIST(rplan->resolved) || ctx->state & KNOT_STATE_FAIL) {
return ctx->state; /* Don't cache anything if failed. */ return ctx->state; /* Don't cache anything if failed. */
} }
knot_pkt_t *pkt = req->answer;
struct kr_query *qry = TAIL(rplan->resolved); struct kr_query *qry = TAIL(rplan->resolved);
if (qry->flags & QUERY_CACHED || !(kr_response_classify(pkt) & (PKT_NODATA|PKT_NXDOMAIN))) { knot_pkt_t *pkt = req->answer;
if (knot_pkt_qclass(pkt) != KNOT_CLASS_IN) {
return ctx->state; /* Only IN class */
}
int pkt_class = kr_response_classify(pkt);
if (qry->flags & QUERY_CACHED || !(pkt_class & (PKT_NODATA|PKT_NXDOMAIN))) {
return ctx->state; /* Cache only negative, not-cached answers. */ return ctx->state; /* Cache only negative, not-cached answers. */
} }
uint32_t ttl = packet_ttl(pkt); uint32_t ttl = packet_ttl(pkt);
......
...@@ -347,6 +347,8 @@ int kr_resolve_consume(struct kr_request *request, knot_pkt_t *packet) ...@@ -347,6 +347,8 @@ int kr_resolve_consume(struct kr_request *request, knot_pkt_t *packet)
/* Pop query if resolved. */ /* Pop query if resolved. */
if (qry->flags & QUERY_RESOLVED) { if (qry->flags & QUERY_RESOLVED) {
kr_rplan_pop(rplan, qry); kr_rplan_pop(rplan, qry);
} else { /* Clear query flags for next attempt */
qry->flags &= ~QUERY_CACHED;
} }
knot_overlay_reset(&request->overlay); knot_overlay_reset(&request->overlay);
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment