Skip to content
Snippets Groups Projects
Commit 63ec3f98 authored by Marek Vavrusa's avatar Marek Vavrusa
Browse files

Implemented proper locking and lookup using <cls,imputed(qname),netblock>.

Updated documentation and corrected default settings.
parent 674c7fab
No related branches found
No related tags found
No related merge requests found
......@@ -201,10 +201,12 @@ Default value: @kbd{0 (disabled)}
@vindex rate-limit-size
Option controls the size of a hashtable of buckets. The larger the hashtable, the lesser probability of a hash collision, but
at the expense of additional memory costs. Each bucket is estimated roughly to 16B.
at the expense of additional memory costs. Each bucket is estimated roughly to 32B.
Size should be selected as a reasonably large prime due to the better hash function distribution properties.
Hash table is internally chained and works well up to a fill rate of 90%, general rule of thumb is to select a prime near
@kbd{1.2 * maximum_qps}.
Default value: @kbd{1572869}
Default value: @kbd{393241}
@node rate-limit-slip
@subsubsection rate-limit-slip
......
......@@ -70,10 +70,10 @@ system {
# Number of hashtable buckets, set to reasonable value as default.
# We chose a reasonably large prime number as it's used for hashtable size,
# it is recommended to do so as well due to better distribution.
# Tweak if you experience a lot of hash collisions, estimated memory overhead
# is approx. 16B per bucket
# Default: 1572869
rate-limit-size 1572869;
# Rule of thumb is to set it to about 1.2 * (maximum_qps)
# Memory cost is approx. 32B per bucket
# Default: 393241
rate-limit-size 393241;
# Rate limit SLIP
# Each Nth blocked response will be sent as truncated, this is a way to allow
......
......@@ -51,7 +51,7 @@
#define CONFIG_HANDSHAKE_WD 10 /*!< [secs] for connection to make a request.*/
#define CONFIG_IDLE_WD 60 /*!< [secs] of allowed inactivity between requests */
#define CONFIG_RRL_SLIP 2 /*!< Default slip value. */
#define CONFIG_RRL_SIZE 1572869 /*!< Htable default size. */
#define CONFIG_RRL_SIZE 393241 /*!< Htable default size. */
/*!
* \brief Configuration for the interface
......
......@@ -43,21 +43,6 @@
/* Enable RRL logging. */
#define RRL_ENABLE_LOG
/* RRL granular locking. */
static int rrl_lock_mx(rrl_table_t *t, int lk_id)
{
assert(lk_id > -1);
dbg_rrl_verb("%s: locking id '%d'\n", __func__, lk_id);
return pthread_mutex_lock(&t->lk[lk_id].mx);
}
static int rrl_unlock_mx(rrl_table_t *t, int lk_id)
{
assert(lk_id > -1);
dbg_rrl_verb("%s: unlocking id '%d'\n", __func__, lk_id);
return pthread_mutex_unlock(&t->lk[lk_id].mx);
}
/* Classification */
enum {
CLS_NULL = 0 << 0, /* Empty bucket. */
......@@ -201,8 +186,11 @@ static int rrl_classify(char *dst, size_t maxlen, const sockaddr_t *a,
blklen += sizeof(nb);
/* Name */
uint16_t *nlen = (uint16_t*)(dst + blklen);
blklen += sizeof(uint16_t);
int len = rrl_clsname(dst + blklen, maxlen - blklen, cls, p, z);
if (len < 0) return len;
*nlen = len;
blklen += len;
/* Seed. */
......@@ -218,12 +206,14 @@ static int bucket_free(rrl_item_t *b, uint32_t now) {
return b->cls == CLS_NULL || (b->time + 1 < now);
}
static int bucket_match(rrl_item_t *b, uint8_t cls, uint64_t addr)
static int bucket_match(rrl_item_t *b, rrl_item_t *m)
{
return b->cls == cls && b->pref == addr;
return b->cls == m->cls &&
b->netblk == m->netblk &&
b->qname == m->qname;
}
static unsigned find_free(rrl_table_t *t, unsigned i, uint8_t cls, uint64_t addr, uint32_t now)
static int find_free(rrl_table_t *t, unsigned i, uint32_t now)
{
rrl_item_t *np = t->arr + t->size;
rrl_item_t *b = NULL;
......@@ -244,6 +234,51 @@ static unsigned find_free(rrl_table_t *t, unsigned i, uint8_t cls, uint64_t addr
return i;
}
static inline unsigned find_match(rrl_table_t *t, uint32_t id, rrl_item_t *m)
{
unsigned f = 0;
unsigned d = 0;
unsigned match = t->arr[id].hop;
while (match != 0) {
d = __builtin_ctz(match);
f = (id + d) % t->size;
if (bucket_match(t->arr + f, m)) {
return d;
} else {
match &= ~(1<<d); /* clear potential match */
}
}
return HOP_LEN + 1;
}
static inline unsigned reduce_dist(rrl_table_t *t, unsigned id, unsigned d, unsigned *f)
{
unsigned rd = HOP_LEN - 1;
while (rd > 0) {
unsigned s = (t->size + *f - rd) % t->size; /* bucket to be vacated */
unsigned o = __builtin_ctz(t->arr[s].hop); /* offset of first valid bucket */
if (t->arr[s].hop != 0 && o < rd) { /* only offsets in <s, f> are interesting */
unsigned e = (s + o) % t->size; /* this item will be displaced to [f] */
unsigned keep_hop = t->arr[*f].hop; /* unpredictable padding */
memcpy(t->arr + *f, t->arr + e, sizeof(rrl_item_t));
t->arr[*f].hop = keep_hop;
t->arr[e].cls = CLS_NULL;
t->arr[s].hop &= ~(1<<o);
t->arr[s].hop |= 1<<rd;
*f = e;
return d - (rd - o);
}
--rd;
}
assert(rd == 0); /* this happens with p=1/fact(HOP_LEN) */
*f = id;
d = 0; /* force vacate initial element */
dbg_rrl("%s: no potential relocation, freeing bucket %u\n", __func__, id);
return d;
}
static void rrl_log_state(const sockaddr_t *a, uint16_t flags, uint8_t cls)
{
#ifdef RRL_ENABLE_LOG
......@@ -260,18 +295,14 @@ static void rrl_log_state(const sockaddr_t *a, uint16_t flags, uint8_t cls)
#endif
}
rrl_table_t *rrl_create(size_t size)
{
const size_t tbl_len = sizeof(rrl_table_t) + size * sizeof(rrl_item_t);
rrl_table_t *t = malloc(tbl_len);
if (!t) return NULL;
memset(t, 0, tbl_len);
t->rate = 0;
t->seed = (uint32_t)(tls_rand() * (double)UINT32_MAX);
memset(t, 0, sizeof(rrl_table_t));
t->size = size;
rrl_reseed(t);
dbg_rrl("%s: created table size '%zu'\n", __func__, t->size);
return t;
}
......@@ -290,25 +321,31 @@ uint32_t rrl_rate(rrl_table_t *rrl)
return rrl->rate;
}
int rrl_setlocks(rrl_table_t *rrl, size_t granularity)
int rrl_setlocks(rrl_table_t *rrl, unsigned granularity)
{
if (!rrl) return KNOT_EINVAL;
assert(!rrl->lk); /* Cannot change while locks are used. */
assert(granularity <= rrl->size / 10); /* Due to int. division err. */
if (pthread_mutex_init(&rrl->ll, NULL) < 0) {
return KNOT_ENOMEM;
}
/* Alloc new locks. */
rrl->lk = malloc(granularity * sizeof(rrl_lock_t));
rrl->lk = malloc(granularity * sizeof(pthread_mutex_t));
if (!rrl->lk) return KNOT_ENOMEM;
memset(rrl->lk, 0, granularity * sizeof(rrl_lock_t));
memset(rrl->lk, 0, granularity * sizeof(pthread_mutex_t));
/* Initialize. */
for (size_t i = 0; i < granularity; ++i) {
if (pthread_mutex_init(&rrl->lk[i].mx, NULL) < 0) break;
if (pthread_mutex_init(rrl->lk + i, NULL) < 0) break;
++rrl->lk_count;
}
/* Incomplete initialization */
if (rrl->lk_count != granularity) {
for (size_t i = 0; i < rrl->lk_count; ++i) {
pthread_mutex_destroy(&rrl->lk[i].mx);
pthread_mutex_destroy(rrl->lk + i);
}
free(rrl->lk);
rrl->lk_count = 0;
......@@ -321,7 +358,7 @@ int rrl_setlocks(rrl_table_t *rrl, size_t granularity)
}
rrl_item_t* rrl_hash(rrl_table_t *t, const sockaddr_t *a, rrl_req_t *p,
const knot_zone_t *zone, uint32_t stamp, int *lk)
const knot_zone_t *zone, uint32_t stamp, int *lock)
{
char buf[RRL_CLSBLK_MAXLEN];
int len = rrl_classify(buf, sizeof(buf), a, p, zone, t->seed);
......@@ -331,83 +368,54 @@ rrl_item_t* rrl_hash(rrl_table_t *t, const sockaddr_t *a, rrl_req_t *p,
uint32_t id = hash(buf, len) % t->size;
/*! \todo locking is wrong now, need to map to regions and check */
*lk = -1;
if (t->lk_count > 0) {
*lk = id % t->lk_count;
rrl_lock_mx(t, *lk);
/* Lock for lookup. */
pthread_mutex_lock(&t->ll);
/* Find an exact match in <id, id + HOP_LEN). */
uint16_t *qname = (uint16_t*)(buf + sizeof(uint8_t) + sizeof(uint64_t));
rrl_item_t match = {
0, *((uint64_t*)(buf + 1)), t->rate, /* hop, netblk, ntok */
buf[0], RRL_BF_NULL, /* cls, flags */
hash((char*)(qname + 1), *qname), stamp /* qname, time*/
};
unsigned d = find_match(t, id, &match);
if (d > HOP_LEN) { /* not an exact match, find free element [f] */
d = find_free(t, id, stamp);
}
/* Find an exact match in <id, id+K). */
unsigned f = 0;
unsigned d = 0;
uint64_t nprefix = *((uint64_t*)(buf + sizeof(uint8_t)));
unsigned match = t->arr[id].hop;
while (match != 0) {
d = __builtin_ctz(match);
f = (id + d) % t->size;
if (bucket_match(t->arr + f, buf[0], nprefix)) {
break;
} else {
match &= ~(1<<d); /* clear potential match */
}
}
if (match == 0) { /* not an exact match, find free element [f] */
d = find_free(t, id, buf[0], nprefix, stamp);
f = (id + d) % t->size;
}
/* Reduce distance to fit <id, id + HOP_LEN) */
unsigned f = (id + d) % t->size;
while (d >= HOP_LEN) {
unsigned rd = HOP_LEN - 1;
while (rd > 0) {
unsigned s = (t->size + f - rd) % t->size; /* bucket to be vacated */
unsigned o = __builtin_ctz(t->arr[s].hop); /* offset of first valid bucket */
if (t->arr[s].hop != 0 && o < rd) { /* only offsets in <s, f> are interesting */
unsigned e = (s + o) % t->size; /* this item will be displaced to [f] */
unsigned keep_hop = t->arr[f].hop; /* unpredictable padding */
memcpy(t->arr + f, t->arr + e, sizeof(rrl_item_t));
t->arr[f].hop = keep_hop;
t->arr[e].cls = CLS_NULL;
t->arr[s].hop &= ~(1<<o);
t->arr[s].hop |= 1<<rd;
f = e;
d -= (rd - o);
break;
}
--rd;
}
if (rd == 0) { /* this happens with p=1/fact(HOP_LEN) */
f = id;
d = 0; /* force vacate initial element */
dbg_rrl("%s: no potential relocation, freeing bucket %u\n", __func__, id);
}
d = reduce_dist(t, id, d, &f);
}
/* Assign granular lock and unlock lookup. */
*lock = f % t->lk_count;
rrl_lock(t, *lock);
pthread_mutex_unlock(&t->ll);
/* found free elm 'k' which is in <id, id + HOP_LEN) */
t->arr[id].hop |= (1 << d);
rrl_item_t* b = t->arr + f;
assert(f == (id+d) % t->size);
dbg_rrl("%s: classified pkt as %u '%u+%u' bucket=%p \n", __func__, f, id, d, b);
dbg_rrl("%s: classified pkt as %4x '%u+%u' bucket=%p \n", __func__, f, id, d, b);
/* Inspect bucket state. */
unsigned hop = b->hop;
if (b->cls == CLS_NULL) {
b->cls = *buf; /* Stored as a first byte in clsblock. */
b->flags = RRL_BF_NULL;
b->ntok = t->rate;
b->time = stamp;
b->pref = nprefix; /* Invalidate */
memcpy(b, &match, sizeof(rrl_item_t));
b->hop = hop;
}
/* Check for collisions. */
/* \todo <prefix, imputed(qname), cls> */
if (!bucket_match(b, buf[0], nprefix)) {
dbg_rrl("%s: collision in bucket '0x%4x'\n", __func__, id);
if (!bucket_match(b, &match)) {
dbg_rrl("%s: collision in bucket '%4x'\n", __func__, id);
if (!(b->flags & RRL_BF_SSTART)) {
b->pref = nprefix;
b->cls = *buf;
b->flags = RRL_BF_NULL; /* Reset flags. */
b->time = stamp; /* Reset time */
memcpy(b, &match, sizeof(rrl_item_t));
b->hop = hop;
b->ntok = t->rate + t->rate / RRL_SSTART;
b->flags |= RRL_BF_SSTART;
dbg_rrl("%s: bucket '0x%4x' slow-start\n", __func__, id);
dbg_rrl("%s: bucket '%4x' slow-start\n", __func__, id);
}
}
......@@ -425,8 +433,8 @@ int rrl_query(rrl_table_t *rrl, const sockaddr_t *a, rrl_req_t *req,
uint32_t now = time(NULL);
rrl_item_t *b = rrl_hash(rrl, a, req, zone, now, &lock);
if (!b) {
assert(lock < 0);
dbg_rrl("%s: failed to compute bucket from packet\n", __func__);
if (lock > -1) rrl_unlock(rrl, lock);
return KNOT_ERROR;
}
......@@ -450,7 +458,6 @@ int rrl_query(rrl_table_t *rrl, const sockaddr_t *a, rrl_req_t *req,
/* Add new tokens. */
uint32_t dn = rrl->rate * dt;
if (b->flags & RRL_BF_SSTART) { /* Bucket in slow-start. */
dn /= RRL_SSTART;
b->flags &= ~RRL_BF_SSTART;
dbg_rrl("%s: bucket '0x%x' slow-start finished\n",
__func__, (unsigned)(b - rrl->arr));
......@@ -473,12 +480,8 @@ int rrl_query(rrl_table_t *rrl, const sockaddr_t *a, rrl_req_t *req,
} else if (b->ntok == 0) {
ret = KNOT_ELIMIT;
}
/* Unlock bucket. */
if (lock > -1) {
rrl_unlock_mx(rrl, lock);
}
if (lock > -1) rrl_unlock(rrl, lock);
return ret;
}
......@@ -486,8 +489,9 @@ int rrl_destroy(rrl_table_t *rrl)
{
if (rrl) {
dbg_rrl("%s: freeing table %p\n", __func__, rrl);
if (rrl->lk_count > 0) pthread_mutex_destroy(&rrl->ll);
for (size_t i = 0; i < rrl->lk_count; ++i) {
pthread_mutex_destroy(&rrl->lk[i].mx);
pthread_mutex_destroy(rrl->lk + i);
}
free(rrl->lk);
}
......@@ -495,3 +499,47 @@ int rrl_destroy(rrl_table_t *rrl)
free(rrl);
return KNOT_EOK;
}
int rrl_reseed(rrl_table_t *rrl)
{
/* Lock entire table. */
if (rrl->lk_count > 0) {
pthread_mutex_lock(&rrl->ll);
for (unsigned i = 0; i < rrl->lk_count; ++i) {
rrl_lock(rrl, i);
}
}
memset(rrl->arr, 0, rrl->size * sizeof(rrl_item_t));
rrl->seed = (uint32_t)(tls_rand() * (double)UINT32_MAX);
dbg_rrl("%s: reseed to '%u'\n", __func__, rrl->seed);
if (rrl->lk_count > 0) {
for (unsigned i = 0; i < rrl->lk_count; ++i) {
rrl_unlock(rrl, i);
}
pthread_mutex_unlock(&rrl->ll);
}
return KNOT_EOK;
}
int rrl_lock(rrl_table_t *t, int lk_id)
{
assert(lk_id > -1);
dbg_rrl_verb("%s: locking id '%d'\n", __func__, lk_id);
if (pthread_mutex_lock(t->lk + lk_id) != 0) {
return KNOT_ERROR;
}
return KNOT_EOK;
}
int rrl_unlock(rrl_table_t *t, int lk_id)
{
assert(lk_id > -1);
dbg_rrl_verb("%s: unlocking id '%d'\n", __func__, lk_id);
if (pthread_mutex_unlock(t->lk + lk_id)!= 0) {
return KNOT_ERROR;
}
return KNOT_EOK;
}
......@@ -34,24 +34,21 @@
#include "libknot/zone/zone.h"
/* Defaults */
#define RRL_LOCK_GRANULARITY 10 /* Last digit granularity */
#define RRL_LOCK_GRANULARITY 32 /* Last digit granularity */
/*!
* \brief RRL hash bucket.
*/
typedef struct rrl_item {
unsigned hop; /* Hop bitmap. */
uint64_t pref; /* Prefix associated. */
uint64_t netblk; /* Prefix associated. */
uint16_t ntok; /* Tokens available */
uint8_t cls; /* Bucket class */
uint8_t flags; /* Flags */
uint32_t qname; /* imputed(QNAME) hash */
uint32_t time; /* Timestamp */
} rrl_item_t;
typedef struct rrl_lock { /* Wrapper around lock struct. */
pthread_mutex_t mx;
} rrl_lock_t;
/*!
* \brief RRL hash bucket table.
*
......@@ -67,8 +64,9 @@ typedef struct rrl_lock { /* Wrapper around lock struct. */
typedef struct rrl_table {
uint32_t rate; /* Configured RRL limit */
uint32_t seed; /* Pseudorandom seed for hashing. */
rrl_lock_t *lk; /* Table locks. */
size_t lk_count; /* Table lock count (granularity). */
pthread_mutex_t ll;
pthread_mutex_t *lk; /* Table locks. */
unsigned lk_count; /* Table lock count (granularity). */
size_t size; /* Number of buckets */
rrl_item_t arr[]; /* Buckets */
} rrl_table_t;
......@@ -116,7 +114,7 @@ uint32_t rrl_setrate(rrl_table_t *rrl, uint32_t rate);
* \retval KNOT_EOK
* \retval KNOT_EINVAL
*/
int rrl_setlocks(rrl_table_t *rrl, size_t granularity);
int rrl_setlocks(rrl_table_t *rrl, unsigned granularity);
/*!
* \brief Get bucket for current combination of parameters.
......@@ -125,11 +123,11 @@ int rrl_setlocks(rrl_table_t *rrl, size_t granularity);
* \param p RRL request.
* \param zone Relate zone.
* \param stamp Timestamp (current time).
* \param lk Destination for assigned lock (*lk will be set to a value).
* \param lock Held lock.
* \return assigned bucket
*/
rrl_item_t* rrl_hash(rrl_table_t *t, const sockaddr_t *a, rrl_req_t *p,
const knot_zone_t *zone, uint32_t stamp, int *lk);
const knot_zone_t *zone, uint32_t stamp, int* lock);
/*!
* \brief Query the RRL table for accept or deny, when the rate limit is reached.
......@@ -151,6 +149,30 @@ int rrl_query(rrl_table_t *rrl, const sockaddr_t *a, rrl_req_t *req,
*/
int rrl_destroy(rrl_table_t *rrl);
/*!
* \brief Reseed RRL table secret.
* \param rrl RRL table.
* \return KNOT_EOK
*/
int rrl_reseed(rrl_table_t *rrl);
/*!
* \brief Lock specified element lock.
* \param rrl RRL table.
* \param lk_id Specified lock.
* \retval KNOT_EOK
* \retval KNOT_ERROR
*/
int rrl_lock(rrl_table_t *rrl, int lk_id);
/*!
* \brief Unlock specified element lock.
* \param rrl RRL table.
* \param lk_id Specified lock.
* \retval KNOT_EOK
* \retval KNOT_ERROR
*/
int rrl_unlock(rrl_table_t *rrl, int lk_id);
#endif /* _KNOTD_RRL_H_ */
......
......@@ -18,6 +18,7 @@
#include <sys/socket.h>
#include "tests/knot/rrl_tests.h"
#include "knot/server/rrl.h"
#include "knot/server/dthreads.h"
#include "knot/common.h"
#include "libknot/packet/response.h"
#include "libknot/packet/query.h"
......@@ -26,13 +27,65 @@
/* Enable time-dependent tests. */
//#define ENABLE_TIMED_TESTS
#define RRL_SIZE (786433) /* prime */
#define RRL_INSERTS (629146) /* fill=0.8 */
#define RRL_SIZE 196613
#define RRL_THREADS 16
#define RRL_INSERTS (0.5*(RRL_SIZE/RRL_THREADS)) /* lf = 0.5 */
#define RRL_LOCKS 64
struct bucketmap_t {
unsigned i;
uint64_t x;
};
/*! \brief Unit runnable. */
struct runnable_data {
int passed;
rrl_table_t *rrl;
sockaddr_t *addr;
rrl_req_t *rq;
knot_zone_t *zone;
};
static void* rrl_runnable(void *arg)
{
struct runnable_data* d = (struct runnable_data*)arg;
sockaddr_t addr;
memcpy(&addr, d->addr, sizeof(sockaddr_t));
sockaddr_update(&addr);
int lock = -1;
uint32_t now = time(NULL);
struct bucketmap_t *m = malloc(RRL_INSERTS * sizeof(struct bucketmap_t));
for (unsigned i = 0; i < RRL_INSERTS; ++i) {
m[i].i = tls_rand() * UINT32_MAX;
addr.addr4.sin_addr.s_addr = m[i].i;
rrl_item_t *b = rrl_hash(d->rrl, &addr, d->rq, d->zone, now, &lock);
rrl_unlock(d->rrl, lock);
m[i].x = b->netblk;
}
for (unsigned i = 0; i < RRL_INSERTS; ++i) {
addr.addr4.sin_addr.s_addr = m[i].i;
rrl_item_t *b = rrl_hash(d->rrl, &addr, d->rq, d->zone, now, &lock);
rrl_unlock(d->rrl, lock);
if (b->netblk != m[i].x) {
d->passed = 0;
}
}
free(m);
return NULL;
}
static void rrl_hopscotch(struct runnable_data* rd)
{
rd->passed = 1;
pthread_t thr[RRL_THREADS];
for (unsigned i = 0; i < RRL_THREADS; ++i) {
pthread_create(thr + i, NULL, &rrl_runnable, rd);
}
for (unsigned i = 0; i < RRL_THREADS; ++i) {
pthread_join(thr[i], NULL);
}
}
static int rrl_tests_count(int argc, char *argv[]);
static int rrl_tests_run(int argc, char *argv[]);
......@@ -51,7 +104,7 @@ unit_api rrl_tests_api = {
static int rrl_tests_count(int argc, char *argv[])
{
int c = 7;
int c = 10;
#ifndef ENABLE_TIMED_TESTS
c -= 2;
#endif
......@@ -91,15 +144,19 @@ static int rrl_tests_run(int argc, char *argv[])
uint32_t rate = 10;
rrl_setrate(rrl, rate);
ok(rate == rrl_rate(rrl), "rrl: setrate");
/* 3. setlocks */
int ret = rrl_setlocks(rrl, RRL_LOCKS);
ok(ret == KNOT_EOK, "rrl: setlocks");
/* 3. N unlimited requests. */
/* 4. N unlimited requests. */
knot_dname_t *apex = knot_dname_new_from_str("rrl.", 4, NULL);
knot_zone_t *zone = knot_zone_new(knot_node_new(apex, NULL, 0), 0, 0);
sockaddr_t addr;
sockaddr_t addr6;
sockaddr_set(&addr, AF_INET, "1.2.3.4", 0);
sockaddr_set(&addr6, AF_INET6, "1122:3344:5566:7788::aabb", 0);
int ret = 0;
ret = 0;
for (unsigned i = 0; i < rate; ++i) {
if (rrl_query(rrl, &addr, &rq, zone) != KNOT_EOK ||
rrl_query(rrl, &addr6, &rq, zone) != KNOT_EOK) {
......@@ -110,16 +167,16 @@ static int rrl_tests_run(int argc, char *argv[])
ok(ret == 0, "rrl: unlimited IPv4/v6 requests");
#ifdef ENABLE_TIMED_TESTS
/* 4. limited request */
/* 5. limited request */
ret = rrl_query(rrl, &addr, &rq, zone);
ok(ret != 0, "rrl: throttled IPv4 request");
/* 5. limited IPv6 request */
/* 6. limited IPv6 request */
ret = rrl_query(rrl, &addr6, &rq, zone);
ok(ret != 0, "rrl: throttled IPv6 request");
#endif
/* 6. invalid values. */
/* 7. invalid values. */
ret = 0;
lives_ok( {
rrl_create(0); // NULL
......@@ -132,27 +189,19 @@ static int rrl_tests_run(int argc, char *argv[])
ret += rrl_destroy(0); // -1
}, "rrl: not crashed while executing functions on NULL context");
/* 7. hopscotch test */
int lk = 0;
int passed = 1;
uint32_t now = time(NULL);
struct bucketmap_t *m = malloc(RRL_INSERTS * sizeof(struct bucketmap_t));
for (unsigned i = 0; i < RRL_INSERTS; ++i) {
m[i].i = tls_rand() * UINT32_MAX;
addr.addr4.sin_addr.s_addr = m[i].i;
rrl_item_t *b = rrl_hash(rrl, &addr, &rq, zone, now, &lk);
m[i].x = b->pref;
}
for (unsigned i = 0; i < RRL_INSERTS; ++i) {
addr.addr4.sin_addr.s_addr = m[i].i;
rrl_item_t *b = rrl_hash(rrl, &addr, &rq, zone, now, &lk);
if (b->pref != m[i].x) {
passed = 0;
//break;
}
}
free(m);
ok(passed, "rrl: hashtable is ~ consistent\n");
/* 8. hopscotch test */
struct runnable_data rd = {
1, rrl, &addr, &rq, zone
};
rrl_hopscotch(&rd);
ok(rd.passed, "rrl: hashtable is ~ consistent");
/* 9. reseed */
ok(rrl_reseed(rrl) == 0, "rrl: reseed");
/* 10. hopscotch after reseed. */
rrl_hopscotch(&rd);
ok(rd.passed, "rrl: hashtable is ~ consistent");
knot_dname_release(qst.qname);
knot_dname_release(apex);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment