Skip to content
Snippets Groups Projects
Commit 7421c70f authored by Marek Vavruša's avatar Marek Vavruša
Browse files

lib: cache semantics - require at least 1 record is valid, selective rr remove

parent 77dda7b9
Branches
Tags
1 merge request!14Faking time in tests, OS X integration tests, selective cache remove
......@@ -99,7 +99,7 @@ static struct kr_cache_rrset *cache_rr(namedb_txn_t *txn, const knot_dname_t *na
return (struct kr_cache_rrset *)val.data;
}
int kr_cache_query(namedb_txn_t *txn, knot_rrset_t *rr, uint32_t *timestamp)
int kr_cache_peek(namedb_txn_t *txn, knot_rrset_t *rr, uint32_t *timestamp)
{
/* Check if the RRSet is in the cache. */
struct kr_cache_rrset *found_rr = cache_rr(txn, rr->owner, rr->type);
......@@ -120,23 +120,52 @@ int kr_cache_query(namedb_txn_t *txn, knot_rrset_t *rr, uint32_t *timestamp)
return KNOT_EOK;
}
/* Check if all RRs are still valid. */
/* Check if at least one RR is still valid. */
uint32_t drift = *timestamp - found_rr->timestamp;
for (unsigned i = 0; i < rr->rrs.rr_count; ++i) {
const knot_rdata_t *rd = knot_rdataset_at(&rr->rrs, i);
if (drift >= knot_rdata_ttl(rd)) {
return KNOT_ENOENT;
if (knot_rdata_ttl(rd) > drift) {
*timestamp = drift;
return KNOT_EOK;
}
}
*timestamp = drift;
return KNOT_EOK;
return KNOT_ENOENT;
}
/* Not found. */
return KNOT_ENOENT;
}
knot_rrset_t kr_cache_materialize(const knot_rrset_t *src, uint32_t drift, mm_ctx_t *mm)
{
/* Make RRSet copy. */
knot_rrset_t copy;
knot_rrset_init(&copy, NULL, src->type, src->rclass);
copy.owner = knot_dname_copy(src->owner, mm);
if (copy.owner == NULL) {
return copy;
}
for (uint16_t i = 0; i < src->rrs.rr_count; ++i) {
knot_rdata_t *rd = knot_rdataset_at(&src->rrs, i);
if (knot_rdata_ttl(rd) > drift) {
if (knot_rdataset_add(&copy.rrs, rd, mm) != KNOT_EOK) {
knot_rrset_clear(&copy, mm);
return copy;
}
}
}
/* Update TTLs. */
for (uint16_t i = 0; i < copy.rrs.rr_count; ++i) {
knot_rdata_t *rd = knot_rdataset_at(&copy.rrs, i);
knot_rdata_set_ttl(rd, knot_rdata_ttl(rd) - drift);
}
return copy;
}
int kr_cache_insert(namedb_txn_t *txn, const knot_rrset_t *rr, uint32_t timestamp)
{
/* Ignore empty records. */
......
......@@ -70,14 +70,23 @@ int kr_cache_txn_commit(namedb_txn_t *txn);
void kr_cache_txn_abort(namedb_txn_t *txn);
/*!
* \brief Query the cache for given RRSet (name, type, class)
* \brief Peek the cache for given RRSet (name, type, class)
* \note The 'drift' is the time passed between the cache time of the RRSet and now (in seconds).
* \param txn transaction instance
* \param rr query RRSet (its rdataset may be changed depending on the result)
* \param timestamp current time (will be replaced with drift if successful)
* \return KNOT_E*
*/
int kr_cache_query(namedb_txn_t *txn, knot_rrset_t *rr, uint32_t *timestamp);
int kr_cache_peek(namedb_txn_t *txn, knot_rrset_t *rr, uint32_t *timestamp);
/*!
* \brief Clone read-only RRSet and adjust TTLs.
* \param src read-only RRSet (its rdataset may be changed depending on the result)
* \param drift time passed between cache time and now
* \param mm memory context
* \return materialized (or empty) RRSet
*/
knot_rrset_t kr_cache_materialize(const knot_rrset_t *src, uint32_t drift, mm_ctx_t *mm);
/*!
* \brief Insert RRSet into cache, replacing any existing data.
......
......@@ -49,37 +49,20 @@ static int update_answer(const knot_rrset_t *rr, unsigned drift, struct kr_layer
{
knot_pkt_t *answer = param->answer;
/* Make RRSet copy. */
knot_rrset_t rr_copy;
knot_rrset_init(&rr_copy, NULL, rr->type, rr->rclass);
rr_copy.owner = knot_dname_copy(rr->owner, &answer->mm);
int ret = knot_rdataset_copy(&rr_copy.rrs, &rr->rrs, &answer->mm);
if (ret != KNOT_EOK) {
knot_rrset_clear(&rr_copy, &answer->mm);
/* Materialize RR set */
knot_rrset_t rr_copy = kr_cache_materialize(rr, drift, &answer->mm);
if (rr_copy.rrs.rr_count == 0) {
return KNOT_NS_PROC_FAIL;
}
/* Adjust the TTL of the records. */
for (unsigned i = 0; i < rr_copy.rrs.rr_count; ++i) {
knot_rdata_t *rd = knot_rdataset_at(&rr_copy.rrs, i);
knot_rdata_set_ttl(rd, knot_rdata_ttl(rd) - drift);
}
/* Write copied RR to the result packet. */
ret = knot_pkt_put(answer, KNOT_COMPR_HINT_NONE, &rr_copy, KNOT_PF_FREE);
if (ret != KNOT_EOK) {
knot_rrset_clear(&rr_copy, &answer->mm);
knot_wire_set_tc(answer->wire);
}
return KNOT_NS_PROC_DONE;
return rr_update_answer(&rr_copy, 0, param);
}
static int read_cache_rr(namedb_txn_t *txn, knot_rrset_t *cache_rr, uint32_t timestamp,
rr_callback_t cb, struct kr_layer_param *param)
{
/* Query cache for requested record */
if (kr_cache_query(txn, cache_rr, &timestamp) != KNOT_EOK) {
if (kr_cache_peek(txn, cache_rr, &timestamp) != KNOT_EOK) {
return KNOT_NS_PROC_NOOP;
}
......@@ -171,7 +154,7 @@ static int write_cache_rr(const knot_pktsection_t *section, knot_rrset_t *rr, na
/* Check if already cached. */
knot_rrset_t query_rr;
knot_rrset_init(&query_rr, rr->owner, rr->type, rr->rclass);
if (kr_cache_query(txn, &query_rr, &timestamp) == KNOT_EOK) {
if (kr_cache_peek(txn, &query_rr, &timestamp) == KNOT_EOK) {
return KNOT_EOK;
}
......
......@@ -44,17 +44,39 @@ static int invalidate_ns(struct kr_rplan *rplan, struct kr_query *qry)
if (txn == NULL) {
return KNOT_EOK;
}
/* Fetch current nameserver cache. */
uint32_t drift = qry->timestamp.tv_sec;
knot_rrset_t cached;
knot_rrset_init(&cached, qry->zone_cut.name, KNOT_RRTYPE_NS, KNOT_CLASS_IN);
if (kr_cache_peek(txn, &cached, &drift) != KNOT_EOK) {
kr_init_zone_cut(&qry->zone_cut);
return KNOT_EOK;
}
cached = kr_cache_materialize(&cached, drift, rplan->pool);
/* Find a matching RD. */
knot_rdataset_t to_remove;
knot_rdataset_init(&to_remove);
for (unsigned i = 0; i < cached.rrs.rr_count; ++i) {
knot_rdata_t *rd = knot_rdataset_at(&cached.rrs, i);
if (knot_dname_is_equal(knot_rdata_data(rd), qry->zone_cut.ns)) {
knot_rdataset_add(&to_remove, rd, rplan->pool);
}
}
knot_rdataset_subtract(&cached.rrs, &to_remove, rplan->pool);
knot_rdataset_clear(&to_remove, rplan->pool);
/* Remove record(s) */
if (cached.rrs.rr_count == 0) {
(void) kr_cache_remove(txn, &cached);
} else {
(void) kr_cache_insert(txn, &cached, qry->timestamp.tv_sec);
}
knot_rrset_clear(&cached, rplan->pool);
/* TODO: selective removal */
knot_rrset_t removed_rr;
knot_rrset_init(&removed_rr, qry->zone_cut.name, KNOT_RRTYPE_NS, KNOT_CLASS_IN);
(void) kr_cache_remove(txn, &removed_rr);
/* Find new zone cut / nameserver */
kr_find_zone_cut(&qry->zone_cut, qry->sname, txn, qry->timestamp.tv_sec);
/* Continue with querying */
return KNOT_EOK;
/* Update zone cut and continue. */
return kr_find_zone_cut(&qry->zone_cut, qry->sname, txn, qry->timestamp.tv_sec);
}
static int iterate(struct knot_requestor *requestor, struct kr_layer_param *param)
......
......@@ -56,40 +56,12 @@ static const struct hint_info SBELT[HINT_COUNT] = {
{ U8("\x01""m""\x0c""root-servers""\x03""net"), "202.12.27.33" }
};
/*! \brief Fetch address record for nameserver. */
static int prefetch_ns_addr(struct kr_zonecut *cut, knot_rrset_t *cached_rr, namedb_txn_t *txn, uint32_t timestamp)
int kr_init_zone_cut(struct kr_zonecut *cut)
{
/* Fetch nameserver address from cache. */
cached_rr->type = KNOT_RRTYPE_A;
if (kr_cache_query(txn, cached_rr, &timestamp) != KNOT_EOK) {
cached_rr->type = KNOT_RRTYPE_AAAA;
if (kr_cache_query(txn, cached_rr, &timestamp) != KNOT_EOK) {
return KNOT_ENOENT;
}
}
return kr_rrset_to_addr(&cut->addr, cached_rr);
}
/*! \brief Fetch best NS for zone cut. */
static int fetch_ns(struct kr_zonecut *cut, const knot_dname_t *name, namedb_txn_t *txn, uint32_t timestamp)
{
knot_rrset_t cached_rr;
knot_rrset_init(&cached_rr, (knot_dname_t *)name, KNOT_RRTYPE_NS, KNOT_CLASS_IN);
int ret = kr_cache_query(txn, &cached_rr, &timestamp);
if (ret == KNOT_EOK) {
/* Accept only if has address records cached. */
kr_set_zone_cut(cut, name, knot_ns_name(&cached_rr.rrs, 0));
knot_rrset_init(&cached_rr, cut->ns, 0, KNOT_CLASS_IN);
ret = prefetch_ns_addr(cut, &cached_rr, txn, timestamp);
if (cut == NULL) {
return KNOT_EINVAL;
}
return ret;
}
/*! \brief Set zone cut to '.' and choose a random root nameserver from the SBELT. */
static int set_sbelt_zone_cut(struct kr_zonecut *cut)
{
const unsigned hint_id = knot_random_uint16_t() % HINT_COUNT;
const struct hint_info *hint = &SBELT[hint_id];
......@@ -139,6 +111,55 @@ int kr_set_zone_cut_addr(struct kr_zonecut *cut, const knot_rrset_t *rr, uint16_
return ret;
}
/*! \brief Fetch address for zone cut. */
static int fetch_addr(struct kr_zonecut *cut, namedb_txn_t *txn, uint32_t timestamp)
{
/* Fetch nameserver address from cache. */
knot_rrset_t cached_rr;
knot_rrset_init(&cached_rr, cut->ns, 0, KNOT_CLASS_IN);
cached_rr.type = KNOT_RRTYPE_A;
if (kr_cache_peek(txn, &cached_rr, &timestamp) != KNOT_EOK) {
cached_rr.type = KNOT_RRTYPE_AAAA;
if (kr_cache_peek(txn, &cached_rr, &timestamp) != KNOT_EOK) {
return KNOT_ENOENT;
}
}
/* Find first valid record. */
uint16_t i = 0;
for (; i < cached_rr.rrs.rr_count; ++i) {
knot_rdata_t *rd = knot_rdataset_at(&cached_rr.rrs, i);
if (knot_rdata_ttl(rd) > timestamp) {
break;
}
}
return kr_set_zone_cut_addr(cut, &cached_rr, i);
}
/*! \brief Fetch best NS for zone cut. */
static int fetch_ns(struct kr_zonecut *cut, const knot_dname_t *name, namedb_txn_t *txn, uint32_t timestamp)
{
uint32_t drift = timestamp;
knot_rrset_t cached_rr;
knot_rrset_init(&cached_rr, (knot_dname_t *)name, KNOT_RRTYPE_NS, KNOT_CLASS_IN);
int ret = kr_cache_peek(txn, &cached_rr, &drift);
if (ret != KNOT_EOK) {
return ret;
}
/* Accept only if has address records cached. */
for (unsigned i = 0; i < cached_rr.rrs.rr_count; ++i) {
kr_set_zone_cut(cut, name, knot_ns_name(&cached_rr.rrs, i));
ret = fetch_addr(cut, txn, timestamp);
if (ret == KNOT_EOK) {
break;
}
}
return ret;
}
int kr_find_zone_cut(struct kr_zonecut *cut, const knot_dname_t *name, namedb_txn_t *txn, uint32_t timestamp)
{
if (cut == NULL || name == NULL) {
......@@ -147,7 +168,7 @@ int kr_find_zone_cut(struct kr_zonecut *cut, const knot_dname_t *name, namedb_tx
/* No cache, start with SBELT. */
if (txn == NULL) {
return set_sbelt_zone_cut(cut);
return kr_init_zone_cut(cut);
}
/* Start at QNAME. */
......@@ -163,5 +184,5 @@ int kr_find_zone_cut(struct kr_zonecut *cut, const knot_dname_t *name, namedb_tx
}
/* Name server not found, start with SBELT. */
return set_sbelt_zone_cut(cut);
return kr_init_zone_cut(cut);
}
......@@ -85,7 +85,7 @@ static void test_query(void **state)
knot_rrset_init(&cache_rr, global_rr.owner, global_rr.type, global_rr.rclass);
namedb_txn_t *txn = test_txn_rdonly(state);
int query_ret = kr_cache_query(txn, &cache_rr, &timestamp);
int query_ret = kr_cache_peek(txn, &cache_rr, &timestamp);
bool rr_equal = knot_rrset_equal(&global_rr, &cache_rr, KNOT_RRSET_COMPARE_WHOLE);
kr_cache_txn_abort(txn);
......@@ -101,7 +101,7 @@ static void test_query_aged(void **state)
knot_rrset_init(&cache_rr, global_rr.owner, global_rr.type, global_rr.rclass);
namedb_txn_t *txn = test_txn_rdonly(state);
int ret = kr_cache_query(txn, &cache_rr, &timestamp);
int ret = kr_cache_peek(txn, &cache_rr, &timestamp);
assert_int_equal(ret, KNOT_ENOENT);
}
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment