diff --git a/NEWS b/NEWS index 91e79cecc52def98ea00444811426c807579f141..33ecc593f48c8dbf10087e34fc7d197c0ee18c0d 100644 --- a/NEWS +++ b/NEWS @@ -1,8 +1,32 @@ +Knot Resolver 2.3.0 (2018-04-23) +================================ + +Security +-------- +- fix CVE-2018-1110: denial of service triggered by malformed DNS messages + (!550, !558, security!2, security!4) +- increase resilience against slow lorris attack (security!5) + +Incompatible changes +-------------------- +- rename serve stale configuration option cache_touted_ns_clean_interval + to cache_ns_tout (!537) Bugfixes -------- -- validation: fix SERVFAIL in case of CNAME to NXDOMAIN in a single zone -- validation: fix SERVFAIL for DS . query +- validation: fix SERVFAIL in case of CNAME to NXDOMAIN in a single zone (!538) +- validation: fix SERVFAIL for DS . query (!544) +- lib/resolve: don't send unecessary queries to parent zone (!513) +- iterate: fix validation for zones where parent and child share NS (!543) +- TLS: improve error handling and documentation (!536, !555, !559) + +Improvements +------------ +- prefill: new module to periodically import root zone into cache + (replacement for RFC 7706, !511) +- network_listen_fd: always create end point for supervisor supplied file descriptor +- daemon: improved TLS error handling +- use CPPFLAGS build environment variable if set (!547) Knot Resolver 2.2.0 (2018-03-28) diff --git a/config.mk b/config.mk index 5a09a86668f61495ba7d1b15976f10f8c329d820..a4f5137582f418f30203f09446283c27655b3a4c 100644 --- a/config.mk +++ b/config.mk @@ -1,6 +1,6 @@ # Project MAJOR := 2 -MINOR := 2 +MINOR := 3 PATCH := 0 EXTRA := ABIVER := 7 diff --git a/daemon/io.c b/daemon/io.c index fa1ac7f4f3b4b2cbad484cfa45305e24bbdf8cee..63bca9f71716aa88cde0759426f795fa7a938a1d 100644 --- a/daemon/io.c +++ b/daemon/io.c @@ -252,7 +252,8 @@ static void tcp_recv(uv_stream_t *handle, ssize_t nread, const uv_buf_t *buf) if (s->tasks.len == 0) { worker_session_close(s); } else { /* If there are tasks running, defer until they finish. */ - uv_timer_start(&s->timeout, tcp_timeout_trigger, 1, KR_CONN_RTT_MAX/2); + uv_timer_start(&s->timeout, tcp_timeout_trigger, + MAX_TCP_INACTIVITY, MAX_TCP_INACTIVITY); } } /* Connection spawned at least one request, reset its deadline for next query. @@ -294,14 +295,18 @@ static void _tcp_accept(uv_stream_t *master, int status, bool tls) return; } + uint64_t timeout = KR_CONN_RTT_MAX / 2; session->has_tls = tls; - if (tls && !session->tls_ctx) { - session->tls_ctx = tls_new(master->loop->data); - session->tls_ctx->c.session = session; - session->tls_ctx->c.handshake_state = TLS_HS_IN_PROGRESS; + if (tls) { + timeout += KR_CONN_RTT_MAX * 3; + if (!session->tls_ctx) { + session->tls_ctx = tls_new(master->loop->data); + session->tls_ctx->c.session = session; + session->tls_ctx->c.handshake_state = TLS_HS_IN_PROGRESS; + } } uv_timer_t *timer = &session->timeout; - uv_timer_start(timer, tcp_timeout_trigger, KR_CONN_RTT_MAX/2, KR_CONN_RTT_MAX/2); + uv_timer_start(timer, tcp_timeout_trigger, timeout, timeout); io_start_read((uv_handle_t *)client); } diff --git a/daemon/tls.c b/daemon/tls.c index 77ab64644988fe306c1e488289fb4147d23c8c69..4e7b457b649fe31681c4fe594b36ab45f269d5a9 100644 --- a/daemon/tls.c +++ b/daemon/tls.c @@ -890,7 +890,7 @@ int tls_client_connect_start(struct tls_client_ctx_t *client_ctx, struct tls_common_ctx *ctx = &client_ctx->c; gnutls_session_set_ptr(ctx->tls_session, client_ctx); - gnutls_handshake_set_timeout(ctx->tls_session, 5000); + gnutls_handshake_set_timeout(ctx->tls_session, KR_CONN_RTT_MAX * 3); session->tls_client_ctx = client_ctx; ctx->handshake_cb = handshake_cb; ctx->handshake_state = TLS_HS_IN_PROGRESS; diff --git a/daemon/worker.c b/daemon/worker.c index 6edd39baf95757f882024a9d39849df463374c8b..8826c4b1ce4e358da7824571b012516a64d3451e 100644 --- a/daemon/worker.c +++ b/daemon/worker.c @@ -826,14 +826,7 @@ static int qr_task_on_send(struct qr_task *task, uv_handle_t *handle, int status if (session->waiting.len > 0) { struct qr_task *t = session->waiting.at[0]; int ret = qr_task_send(t, handle, &session->peer.ip, t->pktbuf); - if (ret == kr_ok()) { - uv_timer_t *timer = &session->timeout; - uv_timer_stop(timer); - session->timeout.data = session; - timer_start(session, on_tcp_watchdog_timeout, MAX_TCP_INACTIVITY, 0); - } else { - uv_timer_t *timer = &session->timeout; - uv_timer_stop(timer); + if (ret != kr_ok()) { while (session->waiting.len > 0) { struct qr_task *t = session->waiting.at[0]; if (session->outgoing) { @@ -1081,8 +1074,6 @@ static int session_next_waiting_send(struct session *session) struct qr_task *task = session->waiting.at[0]; ret = qr_task_send(task, session->handle, &peer->ip, task->pktbuf); } - session->timeout.data = session; - timer_start(session, on_tcp_watchdog_timeout, MAX_TCP_INACTIVITY, 0); return ret; } @@ -1121,6 +1112,10 @@ static int session_tls_hs_cb(struct session *session, int status) worker_del_tcp_connected(worker, &peer->ip); assert(session->tasks.len == 0); session_close(session); + } else { + uv_timer_stop(&session->timeout); + session->timeout.data = session; + timer_start(session, on_tcp_watchdog_timeout, MAX_TCP_INACTIVITY, 0); } return kr_ok(); } @@ -1144,9 +1139,7 @@ static void on_connect(uv_connect_t *req, int status) struct worker_ctx *worker = get_worker(); uv_stream_t *handle = req->handle; struct session *session = handle->data; - union inaddr *peer = &session->peer; - uv_timer_stop(&session->timeout); if (status == UV_ECANCELED) { worker_del_tcp_waiting(worker, &peer->ip); @@ -1162,6 +1155,8 @@ static void on_connect(uv_connect_t *req, int status) return; } + uv_timer_stop(&session->timeout); + if (status != 0) { worker_del_tcp_waiting(worker, &peer->ip); while (session->waiting.len > 0) { @@ -1188,6 +1183,8 @@ static void on_connect(uv_connect_t *req, int status) struct qr_task *task = session->waiting.at[0]; session_del_tasks(session, task); array_del(session->waiting, 0); + ioreq_kill_pending(task); + assert(task->pending_count == 0); qr_task_finalize(task, KR_STATE_FAIL); qr_task_unref(task); } @@ -1224,6 +1221,7 @@ static void on_connect(uv_connect_t *req, int status) if (ret == kr_ok()) { ret = session_next_waiting_send(session); if (ret == kr_ok()) { + timer_start(session, on_tcp_watchdog_timeout, MAX_TCP_INACTIVITY, 0); worker_add_tcp_connected(worker, &session->peer.ip, session); iorequest_release(worker, req); return; @@ -1234,6 +1232,8 @@ static void on_connect(uv_connect_t *req, int status) struct qr_task *task = session->waiting.at[0]; session_del_tasks(session, task); array_del(session->waiting, 0); + ioreq_kill_pending(task); + assert(task->pending_count == 0); qr_task_finalize(task, KR_STATE_FAIL); qr_task_unref(task); } @@ -1291,7 +1291,6 @@ static void on_tcp_watchdog_timeout(uv_timer_t *timer) assert(session->outgoing); uv_timer_stop(timer); struct worker_ctx *worker = get_worker(); - if (session->outgoing) { if (session->has_tls) { worker_del_tcp_waiting(worker, &session->peer.ip); @@ -1304,6 +1303,8 @@ static void on_tcp_watchdog_timeout(uv_timer_t *timer) worker->stats.timeout += 1; array_del(session->waiting, 0); session_del_tasks(session, task); + ioreq_kill_pending(task); + assert(task->pending_count == 0); qr_task_finalize(task, KR_STATE_FAIL); qr_task_unref(task); } @@ -1315,6 +1316,8 @@ static void on_tcp_watchdog_timeout(uv_timer_t *timer) worker->stats.timeout += 1; assert(task->refs > 1); array_del(session->tasks, 0); + ioreq_kill_pending(task); + assert(task->pending_count == 0); qr_task_finalize(task, KR_STATE_FAIL); qr_task_unref(task); } @@ -1649,6 +1652,9 @@ static int qr_task_step(struct qr_task *task, subreq_finalize(task, packet_source, packet); return qr_task_finalize(task, KR_STATE_FAIL); } + assert(task->pending_count == 0); + task->pending[task->pending_count] = session->handle; + task->pending_count += 1; } else if ((session = worker_find_tcp_connected(ctx->worker, addr)) != NULL) { /* Connection has been already established */ assert(session->outgoing); @@ -1693,8 +1699,11 @@ static int qr_task_step(struct qr_task *task, session_close(session); return qr_task_finalize(task, KR_STATE_FAIL); } - ret = timer_start(session, on_tcp_watchdog_timeout, - KR_CONN_RTT_MAX, 0); + if (session->tasks.len == 1) { + uv_timer_stop(&session->timeout); + ret = timer_start(session, on_tcp_watchdog_timeout, + MAX_TCP_INACTIVITY, 0); + } if (ret < 0) { session_del_waiting(session, task); session_del_tasks(session, task); @@ -1708,6 +1717,7 @@ static int qr_task_step(struct qr_task *task, return qr_task_finalize(task, KR_STATE_FAIL); } } + assert(task->pending_count == 0); task->pending[task->pending_count] = session->handle; task->pending_count += 1; } else { @@ -2097,11 +2107,6 @@ int worker_process_tcp(struct worker_ctx *worker, uv_stream_t *handle, return kr_ok(); } - if (session->outgoing) { - uv_timer_stop(&session->timeout); - timer_start(session, on_tcp_watchdog_timeout, MAX_TCP_INACTIVITY, 0); - } - if (session->bytes_to_skip) { assert(session->buffering == NULL); ssize_t min_len = MIN(session->bytes_to_skip, len); @@ -2286,23 +2291,34 @@ int worker_process_tcp(struct worker_ctx *worker, uv_stream_t *handle, session->buffering = NULL; session->msg_hdr_idx = 0; if (session->outgoing) { + assert ((task->pending_count == 1) && (task->pending[0] == session->handle)); + task->pending_count = 0; session_del_tasks(session, task); } /* Parse the packet and start resolving complete query */ int ret = parse_packet(pkt_buf); - if (ret == 0 && !session->outgoing) { - /* Start only new queries, - * not subrequests that are already pending */ - ret = request_start(task->ctx, pkt_buf); - assert(ret == 0); - if (ret == 0) { - ret = qr_task_register(task, session); - } - if (ret == 0) { - submitted += 1; - } - if (task->leading) { - assert(false); + if (ret == 0) { + if (session->outgoing) { + /* To prevent slow lorris attack restart watchdog only after + * the whole message was successfully assembled and parsed */ + if (session->tasks.len > 0 || session->waiting.len > 0) { + uv_timer_stop(&session->timeout); + timer_start(session, on_tcp_watchdog_timeout, MAX_TCP_INACTIVITY, 0); + } + } else { + /* Start only new queries, + * not subrequests that are already pending */ + ret = request_start(task->ctx, pkt_buf); + assert(ret == 0); + if (ret == 0) { + ret = qr_task_register(task, session); + } + if (ret == 0) { + submitted += 1; + } + if (task->leading) { + assert(false); + } } } if (ret == 0) { diff --git a/doc/modules.rst b/doc/modules.rst index 1d4491f0e25c7c6b2d3d38a5e5b9d635cae1f6e2..3f278ae61cdbd80703816c096fd8d6118449e8ee 100644 --- a/doc/modules.rst +++ b/doc/modules.rst @@ -28,7 +28,8 @@ Knot DNS Resolver modules .. include:: ../modules/ta_signal_query/README.rst .. include:: ../modules/ta_sentinel/README.rst .. include:: ../modules/priming/README.rst -.. include:: ../modules/serve_stale/README.rst .. include:: ../modules/detect_time_skew/README.rst .. include:: ../modules/detect_time_jump/README.rst +.. include:: ../modules/rfc7706.rst .. include:: ../modules/prefill/README.rst +.. include:: ../modules/serve_stale/README.rst diff --git a/lib/cache/api.c b/lib/cache/api.c index 8f584523cb134ade53a5c61f0e6b36248c1c3e2f..8d67f88a4d199956d642d272bb337755675b3617 100644 --- a/lib/cache/api.c +++ b/lib/cache/api.c @@ -237,21 +237,38 @@ int32_t kr_cache_ttl(const struct kr_cache_p *peek, const struct kr_query *qry, -/** Check that no label contains a zero character. +/** Check that no label contains a zero character, incl. a log trace. * * We refuse to work with those, as LF and our cache keys might become ambiguous. * Assuming uncompressed name, as usual. * CACHE_KEY_DEF */ -static bool check_dname_for_lf(const knot_dname_t *n) +static bool check_dname_for_lf(const knot_dname_t *n, const struct kr_query *qry/*logging*/) { - return knot_dname_size(n) == strlen((const char *)n) + 1; + const bool ret = knot_dname_size(n) == strlen((const char *)n) + 1; + if (!ret) { WITH_VERBOSE(qry) { + auto_free char *n_str = kr_dname_text(n); + VERBOSE_MSG(qry, "=> skipping zero-containing name %s\n", n_str); + } } + return ret; +} + +/** Return false on types to be ignored. Meant both for sname and direct cache requests. */ +static bool check_rrtype(uint16_t type, const struct kr_query *qry/*logging*/) +{ + const bool ret = !knot_rrtype_is_metatype(type) + && type != KNOT_RRTYPE_RRSIG; + if (!ret) { WITH_VERBOSE(qry) { + auto_free char *type_str = kr_rrtype_text(type); + VERBOSE_MSG(qry, "=> skipping RR type %s\n", type_str); + } } + return ret; } /** Like key_exact_type() but omits a couple checks not holding for pkt cache. */ knot_db_val_t key_exact_type_maypkt(struct key *k, uint16_t type) { - assert(!knot_rrtype_is_metatype(type)); + assert(check_rrtype(type, NULL)); switch (type) { case KNOT_RRTYPE_RRSIG: /* no RRSIG query caching, at least for now */ assert(false); @@ -308,7 +325,7 @@ int cache_peek(kr_layer_t *ctx, knot_pkt_t *pkt) if (ctx->state & (KR_STATE_FAIL|KR_STATE_DONE) || qry->flags.NO_CACHE || (qry->flags.CACHE_TRIED && !qry->stale_cb) - || qry->stype == KNOT_RRTYPE_RRSIG /* LATER: some other behavior for this STYPE? */ + || !check_rrtype(qry->stype, qry) /* LATER: some other behavior for some of these? */ || qry->sclass != KNOT_CLASS_IN) { return ctx->state; /* Already resolved/failed or already tried, etc. */ } @@ -337,11 +354,7 @@ static int cache_peek_real(kr_layer_t *ctx, knot_pkt_t *pkt) VERBOSE_MSG(qry, "=> skipping stype NSEC\n"); return ctx->state; } - if (!check_dname_for_lf(qry->sname)) { - WITH_VERBOSE(qry) { - auto_free char *sname_str = kr_dname_text(qry->sname); - VERBOSE_MSG(qry, "=> skipping zero-containing sname %s\n", sname_str); - } + if (!check_dname_for_lf(qry->sname, qry)) { return ctx->state; } int ret = kr_dname_lf(k->buf, qry->sname, false); @@ -610,7 +623,6 @@ static int cache_peek_real(kr_layer_t *ctx, knot_pkt_t *pkt) return KR_STATE_DONE; } - /** It's simply inside of cycle taken out to decrease indentation. \return error code. */ static int stash_rrset(const ranked_rr_array_t *arr, int arr_i, const struct kr_query *qry, struct kr_cache *cache, @@ -622,12 +634,10 @@ int cache_stash(kr_layer_t *ctx, knot_pkt_t *pkt) struct kr_query *qry = req->current_query; struct kr_cache *cache = &req->ctx->cache; - const uint16_t pkt_type = knot_pkt_qtype(pkt); - const bool type_bad = knot_rrtype_is_metatype(pkt_type) - || pkt_type == KNOT_RRTYPE_RRSIG; /* Note: we cache even in KR_STATE_FAIL. For example, * BOGUS answer can go to +cd cache even without +cd request. */ - if (!qry || qry->flags.CACHED || type_bad || qry->sclass != KNOT_CLASS_IN) { + if (!qry || qry->flags.CACHED || !check_rrtype(knot_pkt_qtype(pkt), qry) + || qry->sclass != KNOT_CLASS_IN) { return ctx->state; } /* Do not cache truncated answers, at least for now. LATER */ @@ -676,18 +686,10 @@ static int stash_rrset(const ranked_rr_array_t *arr, int arr_i, return kr_ok(); } const knot_rrset_t *rr = entry->rr; - if (!rr) { + if (!rr || rr->rclass != KNOT_CLASS_IN) { assert(!EINVAL); return kr_error(EINVAL); } - if (!check_dname_for_lf(rr->owner)) { - WITH_VERBOSE(qry) { - auto_free char *owner_str = kr_dname_text(rr->owner); - VERBOSE_MSG(qry, "=> skipping zero-containing name %s\n", - owner_str); - } - return kr_ok(); - } #if 0 WITH_VERBOSE { @@ -697,13 +699,9 @@ static int stash_rrset(const ranked_rr_array_t *arr, int arr_i, } #endif - switch (rr->type) { - case KNOT_RRTYPE_RRSIG: - case KNOT_RRTYPE_NSEC3: - // for now; LATER NSEC3 + if (!check_dname_for_lf(rr->owner, qry) || !check_rrtype(rr->type, qry) + || rr->type == KNOT_RRTYPE_NSEC3 /*for now; LATER NSEC3*/) { return kr_ok(); - default: - break; } /* Try to find corresponding signatures, always. LATER(optim.): speed. */ @@ -951,6 +949,9 @@ static int try_wild(struct key *k, struct answer *ans, const knot_dname_t *clenc static int peek_exact_real(struct kr_cache *cache, const knot_dname_t *name, uint16_t type, struct kr_cache_p *peek) { + if (!check_rrtype(type, NULL) || !check_dname_for_lf(name, NULL)) { + return kr_error(ENOTSUP); + } struct key k_storage, *k = &k_storage; int ret = kr_dname_lf(k->buf, name, false); diff --git a/lib/generic/pack.h b/lib/generic/pack.h index 90c1b7218872e4f49f86012a6a2cdc4a2cdac32b..52722c9f349476f0f5988a02df227d81950540f2 100644 --- a/lib/generic/pack.h +++ b/lib/generic/pack.h @@ -96,13 +96,17 @@ typedef array_t(uint8_t) pack_t; #define pack_reserve_mm(pack, objs_count, objs_len, reserve, baton) \ array_reserve_mm((pack), (pack).len + (sizeof(pack_objlen_t)*(objs_count) + (objs_len)), (reserve), (baton)) -/** Return pointer to first packed object. */ +/** Return pointer to first packed object. + * + * Recommended way to iterate: + * for (uint8_t *it = pack_head(pack); it != pack_tail(pack); it = pack_obj_next(it)) + */ #define pack_head(pack) \ - ((pack).len > 0 ? &((pack).at[0]) : NULL) + (&(pack).at[0]) /** Return pack end pointer. */ #define pack_tail(pack) \ - &((pack).at[(pack).len]) + (&(pack).at[(pack).len]) /** Return packed object length. */ static inline pack_objlen_t pack_obj_len(uint8_t *it) @@ -147,9 +151,13 @@ static inline uint8_t *pack_last(pack_t pack) */ static inline int pack_obj_push(pack_t *pack, const uint8_t *obj, pack_objlen_t len) { + if (pack == NULL || obj == NULL) { + assert(false); + return kr_error(EINVAL); + } size_t packed_len = len + sizeof(len); - if (pack == NULL || (pack->len + packed_len) > pack->cap) { - return -1; + if (pack->len + packed_len > pack->cap) { + return kr_error(ENOSPC); } uint8_t *endp = pack_tail(*pack); @@ -164,6 +172,10 @@ static inline int pack_obj_push(pack_t *pack, const uint8_t *obj, pack_objlen_t */ static inline uint8_t *pack_obj_find(pack_t *pack, const uint8_t *obj, pack_objlen_t len) { + if (pack == NULL || obj == NULL) { + assert(false); + return NULL; + } uint8_t *endp = pack_tail(*pack); uint8_t *it = pack_head(*pack); while (it != endp) { @@ -181,6 +193,10 @@ static inline uint8_t *pack_obj_find(pack_t *pack, const uint8_t *obj, pack_objl */ static inline int pack_obj_del(pack_t *pack, const uint8_t *obj, pack_objlen_t len) { + if (pack == NULL || obj == NULL) { + assert(false); + return kr_error(EINVAL); + } uint8_t *endp = pack_tail(*pack); uint8_t *it = pack_obj_find(pack, obj, len); if (it) { diff --git a/lib/layer/iterate.c b/lib/layer/iterate.c index 7a4ad83a91c2cb3687de8b1cc309ac9b46d9eca6..f0823557cc4b04317b2267cdfdf08ab4bdf58586 100644 --- a/lib/layer/iterate.c +++ b/lib/layer/iterate.c @@ -356,7 +356,7 @@ static int pick_authority(knot_pkt_t *pkt, struct kr_request *req, bool to_wire) for (unsigned i = 0; i < ns->count; ++i) { const knot_rrset_t *rr = knot_pkt_rr(ns, i); - if (!knot_dname_in(zonecut_name, rr->owner)) { + if (rr->rclass != KNOT_CLASS_IN || !knot_dname_in(zonecut_name, rr->owner)) { continue; } uint8_t rank = get_initial_rank(rr, qry, false, @@ -492,7 +492,8 @@ static int unroll_cname(knot_pkt_t *pkt, struct kr_request *req, bool referral, const bool type_OK = rr->type == query->stype || type == query->stype || type == KNOT_RRTYPE_CNAME || type == KNOT_RRTYPE_DNAME; /* TODO: actually handle DNAMEs */ - if (!type_OK || !knot_dname_is_equal(rr->owner, cname)) { + if (rr->rclass != KNOT_CLASS_IN || !type_OK + || !knot_dname_is_equal(rr->owner, cname)) { continue; } diff --git a/lib/resolve.c b/lib/resolve.c index d5240d86ee6037d09bfb7c6569d7f81e7c25c860..f7b313869449010c1d7cfe4e35882b88ddd6113e 100644 --- a/lib/resolve.c +++ b/lib/resolve.c @@ -742,12 +742,15 @@ static int resolve_query(struct kr_request *request, const knot_pkt_t *packet) uint16_t qtype = knot_pkt_qtype(packet); bool cd_is_set = knot_wire_get_cd(packet->wire); struct kr_query *qry = NULL; + struct kr_context *ctx = request->ctx; + struct kr_cookie_ctx *cookie_ctx = ctx ? &ctx->cookie_ctx : NULL; if (qname != NULL) { qry = kr_rplan_push(rplan, NULL, qname, qclass, qtype); - } else if (knot_wire_get_qdcount(packet->wire) == 0 && - knot_pkt_has_edns(packet) && - knot_edns_has_option(packet->opt_rr, KNOT_EDNS_OPTION_COOKIE)) { + } else if (cookie_ctx && cookie_ctx->srvr.enabled && + knot_wire_get_qdcount(packet->wire) == 0 && + knot_pkt_has_edns(packet) && + knot_edns_has_option(packet->opt_rr, KNOT_EDNS_OPTION_COOKIE)) { /* Plan empty query only for cookies. */ qry = kr_rplan_push_empty(rplan, NULL); } @@ -755,12 +758,14 @@ static int resolve_query(struct kr_request *request, const knot_pkt_t *packet) return KR_STATE_FAIL; } - /* Deferred zone cut lookup for this query. */ - qry->flags.AWAIT_CUT = true; - /* Want DNSSEC if it's posible to secure this name (e.g. is covered by any TA) */ - if ((knot_wire_get_ad(packet->wire) || knot_pkt_has_dnssec(packet)) && - kr_ta_covers_qry(request->ctx, qname, qtype)) { - qry->flags.DNSSEC_WANT = true; + if (qname != NULL) { + /* Deferred zone cut lookup for this query. */ + qry->flags.AWAIT_CUT = true; + /* Want DNSSEC if it's posible to secure this name (e.g. is covered by any TA) */ + if ((knot_wire_get_ad(packet->wire) || knot_pkt_has_dnssec(packet)) && + kr_ta_covers_qry(request->ctx, qname, qtype)) { + qry->flags.DNSSEC_WANT = true; + } } /* Initialize answer packet */ @@ -780,8 +785,13 @@ static int resolve_query(struct kr_request *request, const knot_pkt_t *packet) request->qsource.packet = packet; ITERATE_LAYERS(request, qry, begin); request->qsource.packet = NULL; - if (request->state == KR_STATE_DONE) { + if ((request->state & KR_STATE_DONE) != 0) { kr_rplan_pop(rplan, qry); + } else if (qname == NULL) { + /* it is an empty query which must be resolved by + `begin` layer of cookie module. + If query isn't resolved, fail. */ + request->state = KR_STATE_FAIL; } return request->state; } diff --git a/modules/cookies/cookiemonster.c b/modules/cookies/cookiemonster.c index dabd2854f0a46a577516ce7b187c23035b524c11..e4663f6e1b0feff1a598741ba4c543b39d253ede 100644 --- a/modules/cookies/cookiemonster.c +++ b/modules/cookies/cookiemonster.c @@ -355,6 +355,10 @@ int check_request(kr_layer_t *ctx) struct kr_request *req = ctx->req; struct kr_cookie_settings *srvr_sett = &req->ctx->cookie_ctx.srvr; + if (!srvr_sett->enabled) { + return ctx->state; + } + knot_pkt_t *answer = req->answer; if (ctx->state & (KR_STATE_DONE | KR_STATE_FAIL)) { diff --git a/modules/prefill/README.rst b/modules/prefill/README.rst index 678e9f2d51113084b638d73d77914fe9daea9095..e30e0c3ad1ab366d3eee495db35e89349f16827f 100644 --- a/modules/prefill/README.rst +++ b/modules/prefill/README.rst @@ -1,3 +1,5 @@ +.. _mod-prefill: + Cache prefilling ---------------- diff --git a/modules/rfc7706.rst b/modules/rfc7706.rst new file mode 100644 index 0000000000000000000000000000000000000000..1615f3302c1abb74fc78f49dc6b1416d320a9b3e --- /dev/null +++ b/modules/rfc7706.rst @@ -0,0 +1,3 @@ +Root on lookback (RFC 7706) +--------------------------- +Knot Resolver developers decided that pure implementation of :rfc:`7706` is a bad idea so it is not implemented in the form envisioned by the RFC. You can get the very similar effect without its downsides by combining `prefill <mod-prefill>`_ and `serve_stale <mod-serve_stale>`_ modules with Aggressive Use of DNSSEC-Validated Cache (:rfc:`8198`) behavior which is enabled automatically together with DNSSEC validation.