Skip to content
Snippets Groups Projects
Commit a2b9d06d authored by Marek Vavruša's avatar Marek Vavruša
Browse files

lib/cache: more granular control for rank check on insertion

for pktcache same or better rank is required (because it’s a direct answer)
for rrcache better rank is required (unless doing write-through)

for both cases, no cache rank check is needed when inserting secure data

security note: this mitigates possible non-auth NS hijacking
parent 51547bec
Branches
Tags
No related merge requests found
......@@ -32,7 +32,7 @@
#include "lib/utils.h"
/* Cache version */
#define KEY_VERSION "V\x01"
#define KEY_VERSION "V\x02"
/* Key size */
#define KEY_HSIZE (sizeof(uint8_t) + sizeof(uint16_t))
#define KEY_SIZE (KEY_HSIZE + KNOT_DNAME_MAXLEN)
......@@ -243,17 +243,6 @@ int kr_cache_insert(struct kr_cache_txn *txn, uint8_t tag, const knot_dname_t *n
namedb_val_t entry = { NULL, sizeof(*header) + data.len };
const namedb_api_t *db_api = txn_api(txn);
/* Do not overwrite entries that are higher ranked and not expired. */
namedb_val_t old_entry = { NULL, 0 };
int ret = txn_api(txn)->find(&txn->t, &key, &old_entry, 0);
if (ret == 0) {
struct kr_cache_entry *old = old_entry.data;
uint32_t timestamp = header->timestamp;
if (kr_cache_rank_cmp(old->rank, header->rank) > 0 && check_lifetime(old, &timestamp) == 0) {
return kr_error(EPERM);
}
}
/* LMDB can do late write and avoid copy */
txn->owner->stats.insert += 1;
if (db_api == namedb_lmdb_api()) {
......@@ -324,6 +313,18 @@ int kr_cache_peek_rr(struct kr_cache_txn *txn, knot_rrset_t *rr, uint16_t *rank,
return kr_ok();
}
int kr_cache_peek_rank(struct kr_cache_txn *txn, uint8_t tag, const knot_dname_t *name, uint16_t type, uint32_t timestamp)
{
struct kr_cache_entry *found = lookup(txn, tag, name, type);
if (!found) {
return kr_error(ENOENT);
}
if (check_lifetime(found, &timestamp) != 0) {
return kr_error(ESTALE);
}
return found->rank;
}
int kr_cache_materialize(knot_rrset_t *dst, const knot_rrset_t *src, uint32_t drift, mm_ctx_t *mm)
{
if (!dst || !src) {
......
......@@ -27,17 +27,20 @@ enum kr_cache_tag {
KR_CACHE_USER = 0x80
};
/** Cache entry rank */
/**
* Cache entry rank.
* @note Be careful about chosen cache rank nominal values.
* - AUTH must be > than NONAUTH
* - AUTH INSECURE must be > than AUTH (because it attempted validation)
* - NONAUTH SECURE must be > than AUTH (because it's valid)
*/
enum kr_cache_rank {
KR_RANK_BAD = 0, /* BAD cache, do not use. */
KR_RANK_INSECURE = 1, /* Entry is DNSSEC insecure (e.g. RRSIG not exists). */
KR_RANK_SECURE = 2, /* Entry is DNSSEC valid (e.g. RRSIG exists). */
/* <= Lower 3 bits reserved for various flags. */
KR_RANK_NONAUTH = 8, /* Entry from authority section (i.e. parent-side) */
KR_RANK_AUTH = 16, /* Entry from answer (authoritative data) */
KR_RANK_SECURE = 256, /* Entry is DNSSEC valid (e.g. RRSIG exists). */
};
/* Compare ranks (ignore flags) */
#define kr_cache_rank_cmp(x, y) (((x) >> 2) - ((y) >> 2))
/**
* Serialized form of the RRSet with inception timestamp and maximum TTL.
......@@ -128,6 +131,8 @@ void kr_cache_txn_abort(struct kr_cache_txn *txn);
int kr_cache_peek(struct kr_cache_txn *txn, uint8_t tag, const knot_dname_t *name, uint16_t type,
struct kr_cache_entry **entry, uint32_t *timestamp);
/**
* Insert asset into cache, replacing any existing data.
* @param txn transaction instance
......@@ -158,6 +163,17 @@ int kr_cache_remove(struct kr_cache_txn *txn, uint8_t tag, const knot_dname_t *n
*/
int kr_cache_clear(struct kr_cache_txn *txn);
/**
* Peek the cache for given key and retrieve it's rank.
* @param txn transaction instance
* @param tag asset tag
* @param name asset name
* @param type record type
* @param timestamp current time
* @return rank (0 or positive), or an error (negative number)
*/
int kr_cache_peek_rank(struct kr_cache_txn *txn, uint8_t tag, const knot_dname_t *name, uint16_t type, uint32_t timestamp);
/**
* Peek the cache for given RRSet (name, type)
* @note The 'drift' is the time passed between the cache time of the RRSet and now (in seconds).
......
......@@ -204,6 +204,15 @@ static int stash(knot_layer_t *ctx, knot_pkt_t *pkt)
header.rank = KR_RANK_INSECURE;
}
/* Check if we can replace (allow current or better rank, SECURE is always accepted). */
if (header.rank < KR_RANK_SECURE) {
int cached_rank = kr_cache_peek_rank(&txn, KR_CACHE_PKT, qname, qtype, header.timestamp);
if (cached_rank > header.rank) {
kr_cache_txn_abort(&txn);
return ctx->state;
}
}
/* Stash answer in the cache */
int ret = kr_cache_insert(&txn, KR_CACHE_PKT, qname, qtype, &header, data);
if (ret != 0) {
......
......@@ -184,8 +184,14 @@ static int commit_rr(const char *key, void *val, void *data)
if (KEY_COVERING_RRSIG(key)) {
return commit_rrsig(baton, rank, rr);
}
/* Accept only better rank (if not overriding) */
if (!(rank & KR_RANK_SECURE) && !(baton->qry->flags & QUERY_NO_CACHE)) {
int cached_rank = kr_cache_peek_rank(baton->txn, KR_CACHE_RR, rr->owner, rr->type, baton->timestamp);
if (cached_rank >= rank) {
return kr_ok();
}
}
/* Check if already cached */
knot_rrset_t query_rr;
knot_rrset_init(&query_rr, rr->owner, rr->type, rr->rclass);
return kr_cache_insert_rr(baton->txn, rr, rank, baton->timestamp);
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment