Skip to content
Snippets Groups Projects
Commit bbb262ef authored by Marek Vavruša's avatar Marek Vavruša
Browse files

layer/rrcache: cleanup cruft

parent a70cdfbc
Branches
Tags
No related merge requests found
......@@ -27,48 +27,31 @@
#define DEBUG_MSG(fmt...) QRDEBUG(kr_rplan_current(rplan), " rc ", fmt)
/* Stash key flags */
#define KEY_FLAG_NO 0x01
#define KEY_FLAG_RRSIG 0x02
#define KEY_FLAG_SET(key, flag) key[0] = (flag);
#define KEY_COVERING_RRSIG(key) (key[0] & KEY_FLAG_RRSIG)
static int begin(knot_layer_t *ctx, void *module_param)
{
ctx->data = module_param;
return ctx->state;
}
static int loot_rrsig(struct kr_cache_txn *txn, knot_pkt_t *pkt, const knot_dname_t *name,
uint16_t rrclass, uint16_t typec, struct kr_query *qry)
{
if (KNOT_RRTYPE_RRSIG == typec) {
return kr_ok();
}
/* Check if RRSIG record exists in cache. */
uint32_t timestamp = qry->timestamp.tv_sec;
knot_rrset_t cache_rr;
knot_rrset_init(&cache_rr, (knot_dname_t *)name, typec, rrclass);
int ret = kr_cache_peek_rrsig(txn, &cache_rr, &timestamp);
if (0 != ret) {
return ret;
}
/* Update packet answer */
knot_rrset_t rr_copy;
ret = kr_cache_materialize(&rr_copy, &cache_rr, timestamp, &pkt->mm);
if (0 == ret) {
ret = knot_pkt_put(pkt, KNOT_COMPR_HINT_NONE, &rr_copy, KNOT_PF_FREE);
if (ret != 0) {
knot_rrset_clear(&rr_copy, &pkt->mm);
}
}
return ret;
}
static int loot_rr(struct kr_cache_txn *txn, knot_pkt_t *pkt, const knot_dname_t *name,
uint16_t rrclass, uint16_t rrtype, struct kr_query *qry, bool dobit)
uint16_t rrclass, uint16_t rrtype, struct kr_query *qry, bool fetch_rrsig)
{
/* Check if record exists in cache */
int ret = 0;
uint32_t timestamp = qry->timestamp.tv_sec;
knot_rrset_t cache_rr;
knot_rrset_init(&cache_rr, (knot_dname_t *)name, rrtype, rrclass);
int ret = kr_cache_peek_rr(txn, &cache_rr, &timestamp);
if (fetch_rrsig) {
ret = kr_cache_peek_rrsig(txn, &cache_rr, &timestamp);
} else {
ret = kr_cache_peek_rr(txn, &cache_rr, &timestamp);
}
if (ret != 0) {
return ret;
}
......@@ -88,9 +71,6 @@ static int loot_rr(struct kr_cache_txn *txn, knot_pkt_t *pkt, const knot_dname_t
knot_rrset_clear(&rr_copy, &pkt->mm);
}
}
if (dobit) {
ret = loot_rrsig(txn, pkt, name, rrclass, rrtype, qry);
}
return ret;
}
......@@ -103,9 +83,15 @@ static int loot_cache(struct kr_cache *cache, knot_pkt_t *pkt, struct kr_query *
return ret;
}
/* Lookup direct match first */
ret = loot_rr(&txn, pkt, qry->sname, qry->sclass, qry->stype, qry, dobit);
if (ret != 0 && qry->stype != KNOT_RRTYPE_CNAME) { /* Chase CNAME if no direct hit */
ret = loot_rr(&txn, pkt, qry->sname, qry->sclass, KNOT_RRTYPE_CNAME, qry, dobit);
uint16_t rrtype = qry->stype;
ret = loot_rr(&txn, pkt, qry->sname, qry->sclass, rrtype, qry, 0);
if (ret != 0 && rrtype != KNOT_RRTYPE_CNAME) { /* Chase CNAME if no direct hit */
rrtype = KNOT_RRTYPE_CNAME;
ret = loot_rr(&txn, pkt, qry->sname, qry->sclass, rrtype, qry, 0);
}
/* Loot RRSIG if matched. */
if (ret == 0 && dobit) {
ret = loot_rr(&txn, pkt, qry->sname, qry->sclass, rrtype, qry, true);
}
kr_cache_txn_abort(&txn);
return ret;
......@@ -142,26 +128,6 @@ static int peek(knot_layer_t *ctx, knot_pkt_t *pkt)
return ctx->state;
}
/** @internal Stashed data container. */
struct stash_data
{
map_t rrs;
map_t rrsigs;
};
static void stash_data_init(struct stash_data *stashd, mm_ctx_t *pool)
{
stashd->rrs = map_make();
stashd->rrs.malloc = (map_alloc_f) mm_alloc;
stashd->rrs.free = (map_free_f) mm_free;
stashd->rrs.baton = pool;
stashd->rrsigs = map_make();
stashd->rrsigs.malloc = (map_alloc_f) mm_alloc;
stashd->rrsigs.free = (map_free_f) mm_free;
stashd->rrsigs.baton = pool;
}
/** @internal Baton for stash_commit */
struct stash_baton
{
......@@ -169,26 +135,16 @@ struct stash_baton
unsigned timestamp;
};
static int commit_rrsig(const char *key, void *val, void *data)
static int commit_rrsig(struct stash_baton *baton, knot_rrset_t *rr)
{
/* Insert RRSIGs in special cache. */
knot_rrset_t *rrsig = val;
struct stash_baton *baton = data;
if (knot_rrset_ttl(rrsig) < 1) {
return kr_ok(); /* Ignore cache busters */
}
/* Check if already cached */
/** @todo This should check if less trusted data is in the cache,
for that the cache would need to trace data trust level.
*/
/* Check if already cached */
uint16_t covered = knot_rrsig_type_covered(&rr->rrs, 0);
unsigned drift = baton->timestamp;
knot_rrset_t query_rrsig;
knot_rrset_init(&query_rrsig, rrsig->owner, rrsig->type, rrsig->rclass);
knot_rrset_init(&query_rrsig, rr->owner, covered, rr->rclass);
if (kr_cache_peek_rrsig(baton->txn, &query_rrsig, &drift) == 0) {
return kr_ok();
}
return kr_cache_insert_rrsig(baton->txn, rrsig, rrsig->type, baton->timestamp);
return kr_cache_insert_rrsig(baton->txn, rr, covered, baton->timestamp);
}
static int commit_rr(const char *key, void *val, void *data)
......@@ -198,11 +154,15 @@ static int commit_rr(const char *key, void *val, void *data)
if (knot_rrset_ttl(rr) < 1) {
return kr_ok(); /* Ignore cache busters */
}
/* Save RRSIG in a special cache. */
unsigned drift = baton->timestamp;
if (KEY_COVERING_RRSIG(key)) {
return commit_rrsig(baton, rr);
}
/* Check if already cached */
/** @todo This should check if less trusted data is in the cache,
for that the cache would need to trace data trust level.
*/
unsigned drift = baton->timestamp;
knot_rrset_t query_rr;
knot_rrset_init(&query_rr, rr->owner, rr->type, rr->rclass);
if (kr_cache_peek_rr(baton->txn, &query_rr, &drift) == 0) {
......@@ -211,125 +171,41 @@ static int commit_rr(const char *key, void *val, void *data)
return kr_cache_insert_rr(baton->txn, rr, baton->timestamp);
}
static int stash_commit(struct stash_data *stash, unsigned timestamp, struct kr_cache_txn *txn)
static int stash_commit(map_t *stash, unsigned timestamp, struct kr_cache_txn *txn)
{
struct stash_baton baton = {
.txn = txn,
.timestamp = timestamp
};
int ret = map_walk(&stash->rrs, &commit_rr, &baton);
if (ret == 0) {
ret = map_walk(&stash->rrsigs, &commit_rrsig, &baton);
}
return ret;
}
static int merge_in_rrsigs(knot_rrset_t *cache_rr, const knot_rrset_t *rrsigset, const knot_rrset_t *rr,
mm_ctx_t *pool)
{
int ret = KNOT_EOK;
/* Find rrset corresponding to RRSIG. */
for (unsigned i = 0; i < rrsigset->rrs.rr_count; ++i) {
if ((knot_rrsig_type_covered(&rrsigset->rrs, i) == rr->type) &&
knot_dname_is_equal(cache_rr->owner, rrsigset->owner)) {
const knot_rdata_t *rdata = knot_rdataset_at(&rrsigset->rrs, i);
ret = knot_rdataset_add(&cache_rr->rrs, rdata, pool);
if (KNOT_EOK != ret) {
return ret;
}
}
}
return ret;
}
static int scan_for_rrsigs(knot_rrset_t *cache_rrsig, const knot_pktsection_t *section,
const knot_rrset_t *rr, mm_ctx_t *pool)
{
knot_rrset_init(cache_rrsig, rr->owner, rr->type, rr->rclass);
for (uint16_t i = 0; i < section->count; ++i) {
const knot_rrset_t *rrset = knot_pkt_rr(section, i);
if (KNOT_RRTYPE_RRSIG != rrset->type) {
continue;
}
int ret = merge_in_rrsigs(cache_rrsig, rrset, rr, pool);
if (KNOT_EOK != ret) {
knot_rrset_clear(cache_rrsig, pool);
return ret;
}
}
return kr_ok();
}
static int stash_add_rrsig(const knot_pktsection_t *section, map_t *stash,
const knot_rrset_t *rr, mm_ctx_t *pool)
{
/* Can't store RRSIG of RRSIG. */
if (rr->type == KNOT_RRTYPE_RRSIG) {
return kr_ok();
}
/* Stash key = {[1-255] owner, [1-5] type covered, [1] \x00 } */
char key[8 + KNOT_DNAME_MAXLEN];
int ret = knot_dname_to_wire((uint8_t *)key, rr->owner, KNOT_DNAME_MAXLEN);
if (ret <= 0) {
return ret;
}
knot_dname_to_lower((uint8_t *)key);
ret = snprintf(key + ret - 1, sizeof(key) - KNOT_DNAME_MAXLEN, "%hu", rr->type);
if (ret <= 0 || ret >= KNOT_DNAME_MAXLEN) {
return kr_error(EILSEQ);
}
/* Check if already exists */
knot_rrset_t *stashed = map_get(stash, key);
if (stashed) {
return kr_ok();
}
/* Construct RRSIG RRSet containing related data. */
knot_rrset_t cache_rrsig;
ret = scan_for_rrsigs(&cache_rrsig, section, rr, pool);
if (ret != 0) {
return ret;
}
if (cache_rrsig.rrs.rr_count) {
stashed = knot_rrset_copy(&cache_rrsig, pool);
if (!stashed) {
return kr_error(ENOMEM);
}
}
knot_rrset_clear(&cache_rrsig, pool);
if (stashed) {
return map_set(stash, key, stashed);
}
return kr_ok();
return map_walk(stash, &commit_rr, &baton);
}
static int stash_add(const knot_pkt_t *pkt, map_t *stash, const knot_rrset_t *rr, mm_ctx_t *pool)
{
/* Do not stash DNSSEC data if not secured. */
/* Stash key = {[1] flags, [1-255] owner, [1-5] type, [1] \x00 } */
char key[9 + KNOT_DNAME_MAXLEN];
bool dobit = knot_pkt_has_dnssec(pkt);
if (!dobit && knot_rrtype_is_dnssec(rr->type)) {
return kr_ok();
}
/* Do not stash alone RRSIGs, these must be stashed together with signed RRs. */
if (dobit && (rr->type == KNOT_RRTYPE_RRSIG)) {
return kr_ok();
uint16_t rrtype = rr->type;
KEY_FLAG_SET(key, KEY_FLAG_NO);
/* Stash RRSIGs in a special cache, flag them and set type to its covering RR.
* This way it the stash won't merge RRSIGs together. */
if (knot_rrtype_is_dnssec(rr->type)) {
if (!dobit || rr->type != KNOT_RRTYPE_RRSIG) {
return kr_ok(); /* Ignore other (and unsolicited) DNSSEC records. */
}
rrtype = knot_rrsig_type_covered(&rr->rrs, 0);
KEY_FLAG_SET(key, KEY_FLAG_RRSIG);
}
/* Stash key = {[1-255] owner, [1-5] type, [1] \x00 } */
char key[8 + KNOT_DNAME_MAXLEN];
int ret = knot_dname_to_wire((uint8_t *)key, rr->owner, KNOT_DNAME_MAXLEN);
uint8_t *key_buf = (uint8_t *)key + 1;
int ret = knot_dname_to_wire(key_buf, rr->owner, KNOT_DNAME_MAXLEN);
if (ret <= 0) {
return ret;
}
knot_dname_to_lower((uint8_t *)key);
ret = snprintf(key + ret - 1, sizeof(key) - KNOT_DNAME_MAXLEN, "%hu", rr->type);
knot_dname_to_lower(key_buf);
/* Must convert to string, as the key must not contain 0x00 */
ret = snprintf((char *)key_buf + ret - 1, sizeof(key) - KNOT_DNAME_MAXLEN, "%hu", rrtype);
if (ret <= 0 || ret >= KNOT_DNAME_MAXLEN) {
return kr_error(EILSEQ);
}
......@@ -347,7 +223,7 @@ static int stash_add(const knot_pkt_t *pkt, map_t *stash, const knot_rrset_t *rr
return knot_rdataset_merge(&stashed->rrs, &rr->rrs, pool);
}
static void stash_glue(struct stash_data *stash, knot_pkt_t *pkt, const knot_dname_t *ns_name, mm_ctx_t *pool)
static void stash_glue(map_t *stash, knot_pkt_t *pkt, const knot_dname_t *ns_name, mm_ctx_t *pool)
{
const knot_pktsection_t *additional = knot_pkt_section(pkt, KNOT_ADDITIONAL);
for (unsigned i = 0; i < additional->count; ++i) {
......@@ -356,11 +232,11 @@ static void stash_glue(struct stash_data *stash, knot_pkt_t *pkt, const knot_dna
!knot_dname_is_equal(rr->owner, ns_name)) {
continue;
}
stash_add(pkt, &stash->rrs, rr, pool);
stash_add(pkt, stash, rr, pool);
}
}
static int stash_authority(struct kr_query *qry, knot_pkt_t *pkt, struct stash_data *stash, mm_ctx_t *pool)
static int stash_authority(struct kr_query *qry, knot_pkt_t *pkt, map_t *stash, mm_ctx_t *pool)
{
const knot_pktsection_t *authority = knot_pkt_section(pkt, KNOT_AUTHORITY);
for (unsigned i = 0; i < authority->count; ++i) {
......@@ -374,24 +250,24 @@ static int stash_authority(struct kr_query *qry, knot_pkt_t *pkt, struct stash_d
stash_glue(stash, pkt, knot_ns_name(&rr->rrs, 0), pool);
}
/* Stash record */
stash_add(pkt, &stash->rrs, rr, pool);
stash_add_rrsig(authority, &stash->rrsigs, rr, pool);
stash_add(pkt, stash, rr, pool);
}
return kr_ok();
}
static int stash_answer(struct kr_query *qry, knot_pkt_t *pkt, struct stash_data *stash, mm_ctx_t *pool)
static int stash_answer(struct kr_query *qry, knot_pkt_t *pkt, map_t *stash, mm_ctx_t *pool)
{
const knot_dname_t *cname = qry->sname;
const knot_pktsection_t *answer = knot_pkt_section(pkt, KNOT_ANSWER);
for (unsigned i = 0; i < answer->count; ++i) {
/* Stash direct answers (equal to current QNAME/CNAME) */
/* Stash direct answers (equal to current QNAME/CNAME),
* accept out-of-order RRSIGS. */
const knot_rrset_t *rr = knot_pkt_rr(answer, i);
if (!knot_dname_is_equal(rr->owner, cname)) {
if (!knot_dname_is_equal(rr->owner, cname)
&& rr->type != KNOT_RRTYPE_RRSIG) {
continue;
}
stash_add(pkt, &stash->rrs, rr, pool);
stash_add_rrsig(answer, &stash->rrsigs, rr, pool);
stash_add(pkt, stash, rr, pool);
/* Follow CNAME chain */
if (rr->type == KNOT_RRTYPE_CNAME) {
cname = knot_cname_name(&rr->rrs);
......@@ -416,8 +292,10 @@ static int stash(knot_layer_t *ctx, knot_pkt_t *pkt)
return ctx->state;
}
/* Stash in-bailiwick data from the AUTHORITY and ANSWER. */
struct stash_data stash;
stash_data_init(&stash, rplan->pool);
map_t stash = map_make();
stash.malloc = (map_alloc_f) mm_alloc;
stash.free = (map_free_f) mm_free;
stash.baton = rplan->pool;
int ret = stash_authority(qry, pkt, &stash, rplan->pool);
if (ret == 0 && knot_wire_get_aa(pkt->wire)) {
ret = stash_answer(qry, pkt, &stash, rplan->pool);
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment