cache.c 7.75 KB
Newer Older
Marek Vavruša's avatar
Marek Vavruša committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*  Copyright (C) 2014 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>

    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

17 18
#include <assert.h>
#include <time.h>
19 20 21
#include <sys/stat.h>
#include <unistd.h>
#include <errno.h>
22

23
#include <libknot/internal/mempattern.h>
24
#include <libknot/internal/namedb/namedb_lmdb.h>
Marek Vavruša's avatar
Marek Vavruša committed
25
#include <libknot/errcode.h>
26 27 28 29 30
#include <libknot/descriptor.h>

#include "lib/cache.h"
#include "lib/defines.h"

31 32 33
/* Key size */
#define KEY_SIZE (sizeof(uint8_t) + KNOT_DNAME_MAXLEN + sizeof(uint16_t))

34
int kr_cache_open(struct kr_cache *cache, const namedb_api_t *api, void *opts, mm_ctx_t *mm)
35
{
36 37 38
	if (!cache) {
		return kr_error(EINVAL);
	}
39 40
	cache->api = (api == NULL) ? namedb_lmdb_api() : api;
	int ret = cache->api->init(&cache->db, mm, opts);
41
	if (ret != 0) {
42
		return ret;
43
	}
44 45
	memset(&cache->stats, 0, sizeof(cache->stats));
	return kr_ok();
46 47
}

48
void kr_cache_close(struct kr_cache *cache)
49
{
50
	if (cache && cache->db) {
51 52
		if (cache->api) {
			cache->api->deinit(cache->db);
53 54
		}
		cache->db = NULL;
55
	}
56 57
}

58
int kr_cache_txn_begin(struct kr_cache *cache, struct kr_cache_txn *txn, unsigned flags)
59
{
60
	if (!cache || !cache->db || !cache->api || !txn ) {
61
		return kr_error(EINVAL);
62 63
	}

64 65 66 67 68 69
	if (flags & NAMEDB_RDONLY) {
		cache->stats.txn_read += 1;
	} else {
		cache->stats.txn_write += 1;
	}
	txn->owner = cache;
70
	return cache->api->txn_begin(cache->db, (namedb_txn_t *)txn, flags);
71 72
}

73
int kr_cache_txn_commit(struct kr_cache_txn *txn)
74
{
75
	if (!txn || !txn->owner || !txn->owner->api) {
76
		return kr_error(EINVAL);
77 78
	}

79
	int ret = txn->owner->api->txn_commit((namedb_txn_t *)txn);
80
	if (ret != 0) {
81 82 83
		kr_cache_txn_abort(txn);
	}
	return ret;
84 85
}

86
void kr_cache_txn_abort(struct kr_cache_txn *txn)
87
{
88 89
	if (txn && txn->owner && txn->owner->api) {
		txn->owner->api->txn_abort((namedb_txn_t *)txn);
90
	}
91 92
}

93 94
/** @internal Composed key as { u8 tag, u8[1-255] name, u16 type } */
static size_t cache_key(uint8_t *buf, uint8_t tag, const knot_dname_t *name, uint16_t type)
95
{
96 97 98 99 100
	knot_dname_lf(buf, name, NULL);
	size_t len = buf[0] + 1;
	memcpy(buf + len, &type, sizeof(type));
	buf[0] = tag;
	return len + sizeof(type);
101 102
}

103
static struct kr_cache_entry *cache_entry(struct kr_cache_txn *txn, uint8_t tag, const knot_dname_t *name, uint16_t type)
104
{
105 106
	uint8_t keybuf[KEY_SIZE];
	size_t key_len = cache_key(keybuf, tag, name, type);
107
	if (!txn || !txn->owner || !txn->owner->api) {
108 109
		return NULL;
	}
110

111 112 113
	/* Look up and return value */
	namedb_val_t key = { keybuf, key_len };
	namedb_val_t val = { NULL, 0 };
114
	int ret = txn->owner->api->find((namedb_txn_t *)txn, &key, &val, 0);
115 116
	if (ret != KNOT_EOK) {
		return NULL;
117 118
	}

119
	return (struct kr_cache_entry *)val.data;
120 121
}

122
struct kr_cache_entry *kr_cache_peek(struct kr_cache_txn *txn, uint8_t tag, const knot_dname_t *name,
123
                                     uint16_t type, uint32_t *timestamp)
124
{
125
	if (!txn || !txn->owner || !tag || !name) {
126
		return NULL;
127 128
	}

129
	struct kr_cache_entry *entry = cache_entry(txn, tag, name, type);
130
	if (!entry) {
131
		txn->owner->stats.miss += 1;
132 133 134 135 136
		return NULL;
	}	

	/* No time constraint */
	if (!timestamp) {
137
		txn->owner->stats.hit += 1;
138 139 140 141
		return entry;
	} else if (*timestamp <= entry->timestamp) {
		/* John Connor record cached in the future. */
		*timestamp = 0;
142
		txn->owner->stats.hit += 1;
143 144 145 146 147 148
		return entry;
	} else {
		/* Check if the record is still valid. */
		uint32_t drift = *timestamp - entry->timestamp;
		if (drift < entry->ttl) {
			*timestamp = drift;
149
			txn->owner->stats.hit += 1;
150 151 152
			return entry;
		}
	}
153

154
	txn->owner->stats.miss += 1;
155 156
	return NULL;	
}
Marek Vavruša's avatar
Marek Vavruša committed
157

158 159 160 161 162 163
static void entry_write(struct kr_cache_entry *dst, struct kr_cache_entry *header, namedb_val_t data)
{
	assert(dst);
	memcpy(dst, header, sizeof(*header));
	memcpy(dst->data, data.data, data.len);
}
164

165
int kr_cache_insert(struct kr_cache_txn *txn, uint8_t tag, const knot_dname_t *name, uint16_t type,
166 167
                    struct kr_cache_entry *header, namedb_val_t data)
{
168
	if (!txn || !txn->owner || !txn->owner->api || !name || !tag || !header) {
169 170
		return kr_error(EINVAL);
	}
171

172 173 174 175 176
	/* Insert key */
	uint8_t keybuf[KEY_SIZE];
	size_t key_len = cache_key(keybuf, tag, name, type);
	namedb_val_t key = { keybuf, key_len };
	namedb_val_t entry = { NULL, sizeof(*header) + data.len };
177
	const namedb_api_t *db_api = txn->owner->api;
178 179

	/* LMDB can do late write and avoid copy */
Marek Vavruša's avatar
Marek Vavruša committed
180
	txn->owner->stats.insert += 1;
181 182
	if (db_api == namedb_lmdb_api()) {
		int ret = db_api->insert((namedb_txn_t *)txn, &key, &entry, 0);
183 184
		if (ret != 0) {
			return ret;
185
		}
186 187 188 189 190 191 192 193
		entry_write(entry.data, header, data);
	} else {
		/* Other backends must prepare contiguous data first */
		entry.data = malloc(entry.len);
		if (!entry.data) {
			return kr_error(ENOMEM);
		}
		entry_write(entry.data, header, data);
194
		int ret = db_api->insert((namedb_txn_t *)txn, &key, &entry, 0);
195 196 197 198 199 200 201 202
		free(entry.data);
		if (ret != 0) {
			return ret;
		}
	}

	return kr_ok();
}
203

204
int kr_cache_remove(struct kr_cache_txn *txn, uint8_t tag, const knot_dname_t *name, uint16_t type)
205
{
206
	if (!txn || !txn->owner || !txn->owner->api || !tag || !name ) {
207 208 209 210 211 212
		return kr_error(EINVAL);
	}

	uint8_t keybuf[KEY_SIZE];
	size_t key_len = cache_key(keybuf, tag, name, type);
	namedb_val_t key = { keybuf, key_len };
213
	txn->owner->stats.delete += 1;
214
	return txn->owner->api->del((namedb_txn_t *)txn, &key);
215 216
}

217
int kr_cache_clear(struct kr_cache_txn *txn)
218
{
219
	if (!txn || !txn->owner || !txn->owner->api) {
220 221 222
		return kr_error(EINVAL);
	}

223
	return txn->owner->api->clear((namedb_txn_t *)txn);
224 225
}

226
int kr_cache_peek_rr(struct kr_cache_txn *txn, knot_rrset_t *rr, uint32_t *timestamp)
227 228 229 230 231 232 233 234 235 236 237
{
	if (!txn || !rr || !timestamp) {
		return kr_error(EINVAL);
	}

	/* Check if the RRSet is in the cache. */
	struct kr_cache_entry *entry = kr_cache_peek(txn, KR_CACHE_RR, rr->owner, rr->type, timestamp);
	if (entry) {
		rr->rrs.rr_count = entry->count;
		rr->rrs.data = entry->data;
		return kr_ok();
238 239
	}

240
	/* Not found. */
241
	return kr_error(ENOENT);
242 243
}

244 245
knot_rrset_t kr_cache_materialize(const knot_rrset_t *src, uint32_t drift, mm_ctx_t *mm)
{
246 247
	assert(src);

248 249 250 251
	/* Make RRSet copy. */
	knot_rrset_t copy;
	knot_rrset_init(&copy, NULL, src->type, src->rclass);
	copy.owner = knot_dname_copy(src->owner, mm);
252
	if (!copy.owner) {
253 254
		return copy;
	}
255

256 257 258
	for (uint16_t i = 0; i < src->rrs.rr_count; ++i) {
		knot_rdata_t *rd = knot_rdataset_at(&src->rrs, i);
		if (knot_rdata_ttl(rd) > drift) {
259
			if (knot_rdataset_add(&copy.rrs, rd, mm) != 0) {
260 261 262 263 264
				knot_rrset_clear(&copy, mm);
				return copy;
			}
		}
	}
265

266 267 268 269 270
	/* Update TTLs. */
	for (uint16_t i = 0; i < copy.rrs.rr_count; ++i) {
		knot_rdata_t *rd = knot_rdataset_at(&copy.rrs, i);
		knot_rdata_set_ttl(rd, knot_rdata_ttl(rd) - drift);
	}
271

272 273 274
	return copy;
}

275
int kr_cache_insert_rr(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint32_t timestamp)
276
{
277 278
	if (!txn || !rr) {
		return kr_error(EINVAL);
279 280
	}

281
	/* Ignore empty records */
282
	if (knot_rrset_empty(rr)) {
283
		return kr_ok();
284 285
	}

286 287 288 289 290 291 292 293 294 295 296
	/* Prepare header to write */
	struct kr_cache_entry header = {
		.timestamp = timestamp,
		.ttl = 0,
		.count = rr->rrs.rr_count
	};
	for (uint16_t i = 0; i < rr->rrs.rr_count; ++i) {
		knot_rdata_t *rd = knot_rdataset_at(&rr->rrs, i);
		if (knot_rdata_ttl(rd) > header.ttl) {
			header.ttl = knot_rdata_ttl(rd);
		}
297 298
	}

299 300
	namedb_val_t data = { rr->rrs.data, knot_rdataset_size(&rr->rrs) };
	return kr_cache_insert(txn, KR_CACHE_RR, rr->owner, rr->type, &header, data);
301
}