Skip to content
Snippets Groups Projects
Commit 867349e8 authored by Daniel Salzman's avatar Daniel Salzman
Browse files

common: remove slab

parent f8afcf1e
Branches
Tags
No related merge requests found
Doxyfile
KNOWN_ISSUES
Makefile.am Makefile.am
README README
configure.ac configure.ac
...@@ -179,10 +181,18 @@ src/knot/worker/queue.c ...@@ -179,10 +181,18 @@ src/knot/worker/queue.c
src/knot/worker/queue.h src/knot/worker/queue.h
src/knot/zone/contents.c src/knot/zone/contents.c
src/knot/zone/contents.h src/knot/zone/contents.h
src/knot/zone/events/events.c
src/knot/zone/events/events.h
src/knot/zone/events/handlers.c
src/knot/zone/events/handlers.h
src/knot/zone/events/replan.c
src/knot/zone/events/replan.h
src/knot/zone/node.c src/knot/zone/node.c
src/knot/zone/node.h src/knot/zone/node.h
src/knot/zone/semantic-check.c src/knot/zone/semantic-check.c
src/knot/zone/semantic-check.h src/knot/zone/semantic-check.h
src/knot/zone/timers.c
src/knot/zone/timers.h
src/knot/zone/zone-diff.c src/knot/zone/zone-diff.c
src/knot/zone/zone-diff.h src/knot/zone/zone-diff.h
src/knot/zone/zone-dump.c src/knot/zone/zone-dump.c
...@@ -199,14 +209,6 @@ src/knot/zone/zonedb.c ...@@ -199,14 +209,6 @@ src/knot/zone/zonedb.c
src/knot/zone/zonedb.h src/knot/zone/zonedb.h
src/knot/zone/zonefile.c src/knot/zone/zonefile.c
src/knot/zone/zonefile.h src/knot/zone/zonefile.h
src/knot/zone/events/events.c
src/knot/zone/events/events.h
src/knot/zone/events/handlers.h
src/knot/zone/events/handlers.c
src/knot/zone/events/replan.c
src/knot/zone/events/replan.h
src/knot/zone/timers.c
src/knot/zone/timers.h
src/libknot/binary.c src/libknot/binary.c
src/libknot/binary.h src/libknot/binary.h
src/libknot/common.h src/libknot/common.h
...@@ -350,7 +352,7 @@ tests/wire.c ...@@ -350,7 +352,7 @@ tests/wire.c
tests/worker_pool.c tests/worker_pool.c
tests/worker_queue.c tests/worker_queue.c
tests/zone_events.c tests/zone_events.c
tests/zone_timers.c
tests/zone_update.c tests/zone_update.c
tests/zonedb.c tests/zonedb.c
tests/ztree.c tests/ztree.c
tests/zone_timers.c
...@@ -85,9 +85,6 @@ libknots_la_SOURCES = \ ...@@ -85,9 +85,6 @@ libknots_la_SOURCES = \
common-knot/print.h \ common-knot/print.h \
common-knot/ref.c \ common-knot/ref.c \
common-knot/ref.h \ common-knot/ref.h \
common-knot/slab/alloc-common.h \
common-knot/slab/slab.c \
common-knot/slab/slab.h \
common-knot/sockaddr.c \ common-knot/sockaddr.c \
common-knot/sockaddr.h \ common-knot/sockaddr.h \
common-knot/strlcat.c \ common-knot/strlcat.c \
......
/* Copyright (C) 2011 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*!
* \file alloc-common.h
*
* \author Lubos Slovak <lubos.slovak@nic.cz>
*
* \brief Common macros for alloc.
*
* \addtogroup alloc
* @{
*/
#pragma once
#include <stdio.h>
//#define MEM_DEBUG
//#define MEM_NOSLAB
//#define MEM_POISON
#define MEM_SLAB_CAP 5 // Cap slab_cache empty slab count (undefined = inf)
#define MEM_COLORING // Slab cache coloring
//#define MEM_SLAB_DEPOT // Use slab depot for slab caching (not thread-safe)
/* Eliminate compiler warning with unused parameters. */
#ifndef UNUSED
#define UNUSED(param) (void)(param)
#endif
/* Optimisation macros. */
#ifndef knot_likely
#define knot_likely(x) __builtin_expect((x),1)
#endif
#ifndef knot_unlikely
#define knot_unlikely(x) __builtin_expect((x),0)
#endif
#ifdef MEM_DEBUG
#define dbg_mem(msg...) fprintf(stderr, msg)
#else
#define dbg_mem(msg...)
#endif
/*! @} */
/* Copyright (C) 2011 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <assert.h>
#include <stdlib.h>
#include <sys/mman.h>
#include "common-knot/slab/alloc-common.h"
#include "common-knot/slab/slab.h"
/*
* Magic constants.
*/
#define SLAB_MAGIC 0x51 /*!< "Sl" magic byte (slab type). */
#define POISON_DWORD 0xdeadbeef /*!< Memory boundary guard magic. */
#define SLAB_MINCOLOR 64 /*!< Minimum space reserved for cache coloring. */
/*! \brief Return binary logarithm of a number, which is a power of 2. */
static inline unsigned fastlog2(unsigned v)
{
// Works if we know the size is a power of 2
register unsigned int r = (v & 0xAAAAAAAA) != 0;
r |= ((v & 0xFFFF0000) != 0) << 4;
r |= ((v & 0xFF00FF00) != 0) << 3;
r |= ((v & 0xF0F0F0F0) != 0) << 2;
r |= ((v & 0xCCCCCCCC) != 0) << 1;
return r;
}
/*
* Slab run-time constants.
*/
size_t SLAB_SZ = 0; /*!< Slab size. */
size_t SLAB_MASK = 0; /*!< \brief Slab address mask (for computing offsets). */
/*!
* Depot is a caching sub-allocator of slabs.
* It mitigates performance impact of sequentially allocating and freeing
* from a slab with just a few slab items by caching N slabs before returning
* them to the system.
*
*/
#ifdef MEM_SLAB_DEPOT
static slab_depot_t _depot_g; /*! \brief Global slab depot. */
#endif // MEM_SLAB_DEPOT
/*!
* \brief Allocate a slab of given bufsize from depot.
*
* \retval Reserved memory for slab on success.
* \retval NULL on errors.
*/
static void* slab_depot_alloc(size_t bufsize)
{
void *page = 0;
#ifdef MEM_SLAB_DEPOT
if (_depot_g.available) {
for (int i = _depot_g.available - 1; i > -1 ; --i) {
if(_depot_g.cache[i]->bufsize == bufsize) {
page = _depot_g.cache[i];
_depot_g.cache[i] = _depot_g.cache[--_depot_g.available];
return page;
}
}
page = _depot_g.cache[--_depot_g.available];
} else {
if(posix_memalign(&page, SLAB_SIZE, SLAB_SIZE) == 0) {
((slab_t*)page)->bufsize = 0;
} else {
page = 0;
}
}
#else // MEM_SLAB_DEPOT
UNUSED(bufsize);
if(posix_memalign(&page, SLAB_SZ, SLAB_SZ) == 0) {
((slab_t*)page)->bufsize = 0;
} else {
page = 0;
}
#endif // MEM_SLAB_DEPOT
UNUSED(bufsize);
return page;
}
/*!
* \brief Return a slab to the depot.
*
* \note If the depot is full, slab gets immediately freed.
*/
static inline void slab_depot_free(void* slab)
{
#ifdef MEM_SLAB_DEPOT
if (_depot_g.available < SLAB_DEPOT_SIZE) {
_depot_g.cache[_depot_g.available++] = slab;
} else {
free(slab);
}
#else // MEM_SLAB_DEPOT
free(slab);
#endif // MEM_SLAB_DEPOT
}
/*! \brief Initialize slab depot. */
static void slab_depot_init()
{
#ifdef MEM_SLAB_DEPOT
_depot_g.available = 0;
#endif // MEM_SLAB_DEPOT
}
/*! \brief Destroy slab depot. */
static void slab_depot_destroy()
{
#ifdef MEM_SLAB_DEPOT
while(_depot_g.available) {
free(_depot_g.cache[--_depot_g.available]);
}
#endif // MEM_SLAB_DEPOT
}
/*
* Initializers.
*/
/*! \brief Initializes slab subsystem (it is called automatically). */
void __attribute__ ((constructor)) slab_init()
{
long slab_size = sysconf(_SC_PAGESIZE);
if (slab_size < 0) {
slab_size = SLAB_MINSIZE;
}
// Fetch page size
SLAB_SZ = (size_t)slab_size;
unsigned slab_logsz = fastlog2(SLAB_SZ);
// Compute slab page mask
SLAB_MASK = 0;
for (unsigned i = 0; i < slab_logsz; ++i) {
SLAB_MASK |= 1 << i;
}
SLAB_MASK = ~SLAB_MASK;
// Initialize depot
slab_depot_init();
}
/*! \brief Deinitializes slab subsystem (it is called automatically). */
void __attribute__ ((destructor)) slab_deinit()
{
// Deinitialize depot
if (SLAB_MASK) {
slab_depot_destroy();
}
}
/*
* Cache helper functions.
*/
/* \note Not used right now.
static void slab_dump(slab_t* slab) {
printf("%s: buffers (bufsize=%zuB, %u/%u free): \n",
__func__, slab->cache->bufsize, slab->bufs_free,
slab->bufs_count);
void** buf = slab->head;
int i = 0, n = 0;
while(buf != 0) {
size_t diff = (size_t)((char*)buf - (char*)slab->base);
printf("-> %lu", diff / slab->cache->bufsize);
buf = (void**)(*buf);
if (++i == 10) {
printf("\n");
i = 0;
}
++n;
}
printf("\n");
}
*/
/*!
* \brief Free all slabs from a slab cache.
* \return Number of freed slabs.
*/
static inline int slab_cache_free_slabs(slab_t* slab)
{
int count = 0;
while (slab) {
slab_t* next = slab->next;
slab_destroy(&slab);
++count;
slab = next;
}
return count;
}
/*
* Slab helper functions.
*/
/*! \brief Remove slab from a linked list. */
static void slab_list_remove(slab_t* slab)
{
// Disconnect from list
if (slab->prev) {
slab->prev->next = slab->next;
}
if(slab->next) {
slab->next->prev = slab->prev;
}
// Disconnect from cache
slab_cache_t* cache = slab->cache;
{
if (cache->slabs_free == slab) {
cache->slabs_free = slab->next;
} else if (cache->slabs_full == slab) {
cache->slabs_full = slab->next;
}
}
}
/*! \brief Insert slab into a linked list. */
static void slab_list_insert(slab_t** list, slab_t* item)
{
// If list exists, push to the top
item->prev = 0;
item->next = *list;
if(*list) {
(*list)->prev = item;
}
*list = item;
}
/*! \brief Move slab from one linked list to another. */
static inline void slab_list_move(slab_t** target, slab_t* slab)
{
slab_list_remove(slab);
slab_list_insert(target, slab);
}
/*
* API functions.
*/
slab_t* slab_create(slab_cache_t* cache)
{
const size_t size = SLAB_SZ;
slab_t* slab = slab_depot_alloc(cache->bufsize);
if (knot_unlikely(slab == 0)) {
dbg_mem("%s: failed to allocate aligned memory block\n",
__func__);
return 0;
}
/* Initialize slab. */
slab->magic = SLAB_MAGIC;
slab->cache = cache;
slab_list_insert(&cache->slabs_free, slab);
#ifdef MEM_SLAB_CAP
++cache->empty;
#endif
/* Already initialized? */
if (slab->bufsize == cache->bufsize) {
return slab;
} else {
slab->bufsize = cache->bufsize;
}
/* Ensure the item size can hold at least a size of ptr. */
size_t item_size = slab->bufsize;
if (knot_unlikely(item_size < SLAB_MIN_BUFLEN)) {
item_size = SLAB_MIN_BUFLEN;
}
/* Ensure at least some space for coloring */
size_t data_size = size - sizeof(slab_t);
#ifdef MEM_COLORING
size_t free_space = data_size % item_size;
if (knot_unlikely(free_space < SLAB_MINCOLOR)) {
free_space = SLAB_MINCOLOR;
}
/// unsigned short color = __sync_fetch_and_add(&cache->color, 1);
unsigned short color = (cache->color += sizeof(void*));
color = color % free_space;
#else
const unsigned short color = 0;
#endif
/* Calculate useable data size */
data_size -= color;
slab->bufs_count = data_size / item_size;
slab->bufs_free = slab->bufs_count;
// Save first item as next free
slab->base = (char*)slab + sizeof(slab_t) + color;
slab->head = (void**)slab->base;
// Create freelist, skip last member, which is set to NULL
char* item = (char*)slab->head;
for(unsigned i = 0; i < slab->bufs_count - 1; ++i) {
*((void**)item) = item + item_size;
item += item_size;
}
// Set last buf to NULL (tail)
*((void**)item) = (void*)0;
// Ensure the last item has a NULL next
dbg_mem("%s: created slab (%p, %p) (%zu B)\n",
__func__, slab, slab + size, size);
return slab;
}
void slab_destroy(slab_t** slab)
{
/* Disconnect from the list */
slab_list_remove(*slab);
/* Free slab */
slab_depot_free(*slab);
/* Invalidate pointer. */
dbg_mem("%s: deleted slab %p\n", __func__, *slab);
*slab = 0;
}
void* slab_alloc(slab_t* slab)
{
// Fetch first free item
void **item = 0;
{
if((item = slab->head)) {
slab->head = (void**)*item;
--slab->bufs_free;
} else {
// No more free items
return 0;
}
}
#ifdef MEM_DEBUG
// Increment statistics
__sync_add_and_fetch(&slab->cache->stat_allocs, 1);
#endif
// Move to full?
if (knot_unlikely(slab->bufs_free == 0)) {
slab_list_move(&slab->cache->slabs_full, slab);
} else {
#ifdef MEM_SLAB_CAP
// Mark not empty?
if (knot_unlikely(slab->bufs_free == slab->bufs_count - 1)) {
--slab->cache->empty;
}
#endif
}
return item;
}
void slab_free(void* ptr)
{
// Null pointer check
if (knot_unlikely(!ptr)) {
return;
}
// Get slab start address
slab_t* slab = slab_from_ptr(ptr);
assert(slab);
// Check if it exists in directory
if (slab->magic == SLAB_MAGIC) {
// Return buf to slab
*((void**)ptr) = (void*)slab->head;
slab->head = (void**)ptr;
++slab->bufs_free;
#ifdef MEM_DEBUG
// Increment statistics
__sync_add_and_fetch(&slab->cache->stat_frees, 1);
#endif
// Return to partial
if(knot_unlikely(slab->bufs_free == 1)) {
slab_list_move(&slab->cache->slabs_free, slab);
} else {
#ifdef MEM_SLAB_CAP
// Recycle if empty
if(knot_unlikely(slab_isempty(slab))) {
if(slab->cache->empty == MEM_SLAB_CAP) {
slab_destroy(&slab);
} else {
++slab->cache->empty;
}
}
#endif
}
} else {
// Pointer is not a slab
// Presuming it's a large block
slab_obj_t* bs = (slab_obj_t*)ptr - 1;
#ifdef MEM_POISON
// Remove memory barrier
mprotect(ptr + bs->size, sizeof(int), PROT_READ|PROT_WRITE);
#endif
// Unmap
dbg_mem("%s: unmapping large block of %zu bytes at %p\n",
__func__, bs->size, ptr);
free(bs);
}
}
int slab_cache_init(slab_cache_t* cache, size_t bufsize)
{
if (knot_unlikely(!bufsize)) {
return -1;
}
memset(cache, 0, sizeof(slab_cache_t));
cache->bufsize = bufsize;
dbg_mem("%s: created cache of size %zu\n",
__func__, bufsize);
return 0;
}
void slab_cache_destroy(slab_cache_t* cache) {
// Free slabs
unsigned free_s = slab_cache_free_slabs(cache->slabs_free);
unsigned full_s = slab_cache_free_slabs(cache->slabs_full);
#ifndef MEM_DEBUG
UNUSED(free_s);
UNUSED(full_s);
#else
dbg_mem("%s: %u empty/partial, %u full caches\n",
__func__, free_s, full_s);
#endif
// Invalidate cache
cache->bufsize = 0;
cache->slabs_free = cache->slabs_full = 0;
}
void* slab_cache_alloc(slab_cache_t* cache)
{
slab_t* slab = cache->slabs_free;
if(!cache->slabs_free) {
slab = slab_create(cache);
if (slab == NULL) {
return NULL;
}
}
return slab_alloc(slab);
}
int slab_cache_reap(slab_cache_t* cache)
{
// For now, just free empty slabs
slab_t* slab = cache->slabs_free;
int count = 0;
while (slab) {
slab_t* next = slab->next;
if (slab_isempty(slab)) {
slab_destroy(&slab);
++count;
}
slab = next;
}
cache->empty = 0;
return count;
}
/* Copyright (C) 2011 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*!
* \file slab.h
*
* \author Marek Vavrusa <marek.vavusa@nic.cz>
*
* \brief SLAB allocator.
*
* SLAB cache works with either custom SLAB sizes and
* Next-Highest-Power-Of-2 sizes.
*
* Slab size is a multiple of PAGE_SIZE and uses
* system allocator for larger blocks.
*
* Allocated SLABs are PAGE_SIZE aligned for a fast O(1)
* address-from-item lookup. This results in nearly none memory
* overhead for a very small blocks (<64B), but it requires the
* underlying allocator to be effective in allocating page size aligned memory
* as well. The major disadvantage is that each Slab must be aligned to it's
* size as opposed to boundary tags.
*
* Slab implements simple coloring mechanism to improve
* cache line utilisation.
*
* \ref SLAB_SIZE is a fixed size of a slab. As a rule of thumb, the slab is
* effective when the maximum allocated block size is below 1/4 of a SLAB_SIZE.
* f.e. 16kB SLAB is most effective up to 4kB block size.
*
* \ref MEM_POISON flag enables checking read/writes after the allocated memory
* and segmentation fault. This poses a significant time and space overhead.
* Enable only when debugging.
*
* \ref MEM_SLAB_CAP defines a maximum limit of a number of empty slabs that a cache
* can keep at a time. This results in a slight performance regression,
* but actively recycles unuse memory.
*
* \ref MEM_DEPOT_COUNT defines how many recycled slabs will be cached for a later
* use instead of returning them immediately to the OS. This significantly
* reduces a number of syscalls in some cases.
* f.e. 16 means 16 * SLAB_SIZE cache, for 16kB slabs = 256kB cache
*
* \ref MEM_COLORING enables simple cache coloring. This is generally a useful
* feature since all slabs are page size aligned and
* (depending on architecture) this slightly improves performance
* and cacheline usage at the cost of a minimum of 64 bytes per slab of
* overhead. Undefine MEM_COLORING in common.h to disable coloring.
*
* Optimal usage for a specific behavior (similar allocation sizes):
* \code
* slab_cache_t cache;
* slab_cache_init(&cache, N); // Initialize, N means cache chunk size
* ...
* void* mem = slab_cache_alloc(&cache); // Allocate N bytes
* ...
* slab_free(mem); // Recycle memory
* ...
* slab_cache_destroy(&cache); // Deinitialize cache
* \endcode
*
*
* \todo Allocate slab headers elsewhere and use just first sizeof(void*) bytes
* in each slab as a pointer to slab header. This could improve the
* performance (issue #1583).
*
* \note Slab allocation is not thread safe for performance reasons.
*
* \addtogroup alloc
* @{
*/
#pragma once
#include <pthread.h>
#include <stdint.h>
/* Constants. */
#define SLAB_MINSIZE 4096 //!< Slab minimal size (4K blocks)
#define SLAB_MIN_BUFLEN 8 //!< Minimal allocation block size is 8B.
#define SLAB_DEPOT_SIZE 16 //!< N slabs cached = N*SLAB_SIZE kB cap
struct slab_cache_t;
extern size_t SLAB_MASK;
/* Macros. */
/*! \brief Return slab base address from pointer. */
#define slab_from_ptr(p) ((void*)((size_t)(p) & SLAB_MASK))
/*! \brief Return true if slab is empty. */
#define slab_isempty(s) ((s)->bufs_free == (s)->bufs_count)
/*!
* \brief Slab descriptor.
*
* Slab is a block of memory used for an allocation of
* smaller objects (bufs) later on.
* Each slab is currently aligned to page size to easily
* determine slab address from buf pointer.
*
* \warning Do not use slab_t directly as it cannot grow, see slab_cache_t.
*/
typedef struct slab_t {
char magic; /*!< Identifies memory block type. */
unsigned short bufsize; /*!< Slab bufsize. */
struct slab_cache_t *cache; /*!< Owner cache. */
struct slab_t *prev, *next; /*!< Neighbours in slab lists. */
unsigned bufs_count; /*!< Number of bufs in slab. */
unsigned bufs_free; /*!< Number of available bufs. */
void **head; /*!< Pointer to first available buf. */
char* base; /*!< Base address for bufs. */
} slab_t;
/*!
* \brief Slab depot.
*
* To mitigate slab initialization costs, depot keeps a finite number of
* stacked slabs before returning them to the system.
*/
typedef struct slab_depot_t {
size_t available; /*!< Number of available pages. */
slab_t* cache[SLAB_DEPOT_SIZE]; /*!< Stack of free slabs. */
} slab_depot_t;
/*!
* \brief Large object descriptor.
*
* Large object differs from slab with magic byte and
* contains object size.
*
* Magic needs to be first to overlap with slab_t magic byte.
*/
typedef struct slab_obj_t {
char magic; /*!< Identifies memory block type. */
size_t size; /*!< Object size. */
} slab_obj_t;
/*!
* \brief Slab cache descriptor.
*
* Slab cache is a list of 0..n slabs with the same buf size.
* It is responsible for slab state keeping.
*
* Once a slab is created, it is moved to free list.
* When it is full, it is moved to full list.
* Once a buf from full slab is freed, the slab is moved to
* free list again (there may be some hysteresis for mitigating
* a sequential alloc/free).
*
* Allocation of new slabs is on-demand, empty slabs are reused if possible.
*
* \note Slab implementation is different from Bonwick (Usenix 2001)
* http://www.usenix.org/event/usenix01/bonwick.html
* as it doesn't feature empty and partial list.
* This is due to fact, that user space allocator rarely
* needs to count free slabs. There is no way the OS could
* notify the application, that the memory is scarce.
* A slight performance increased is measured in benchmark.
*
* \note Statistics are only available if MEM_DEBUG is enabled.
*/
typedef struct slab_cache_t {
unsigned short color; /*!< Current cache color. */
unsigned short empty; /*!< Number of empty slabs. */
size_t bufsize; /*!< Cache object (buf) size. */
slab_t *slabs_free; /*!< List of free slabs. */
slab_t *slabs_full; /*!< List of full slabs. */
/* Statistics. */
unsigned long stat_allocs; /*!< Allocation count. */
unsigned long stat_frees; /*!< Free count. */
} slab_cache_t;
/*!
* \brief Create a slab of predefined size.
*
* At the moment, slabs are equal to page size and page size aligned.
* This enables quick and efficient buf to slab lookup by pointer arithmetic.
*
* Slab uses simple coloring scheme with and the memory block is always
* sizeof(void*) aligned.
*
* \param cache Parent cache.
* \retval Slab instance on success.
* \retval NULL on error.
*/
slab_t* slab_create(slab_cache_t* cache);
/*!
* \brief Destroy slab instance.
*
* Slab is disconnected from any list and freed.
* Dereferenced slab parameter is set to NULL.
*
* \param slab Pointer to given slab.
*/
void slab_destroy(slab_t** slab);
/*!
* \brief Allocate a buf from slab.
*
* Returns a pointer to allocated memory or NULL on error.
*
* \param slab Given slab instance.
* \retval Pointer to allocated memory.
* \retval NULL on error.
*/
void* slab_alloc(slab_t* slab);
/*!
* \brief Recycle memory.
*
* Given memory is returned to owner slab.
* Memory content may be rewritten.
*
* \param ptr Returned memory.
*/
void slab_free(void* ptr);
/*!
* \brief Create a slab cache.
*
* Create a slab cache with no allocated slabs.
* Slabs are allocated on-demand.
*
* \param cache Pointer to uninitialized cache.
* \param bufsize Single item size for later allocs.
* \retval 0 on success.
* \retval -1 on error;
*/
int slab_cache_init(slab_cache_t* cache, size_t bufsize);
/*!
* \brief Destroy a slab cache.
*
* Destroy a slab cache and all associated slabs.
*
* \param cache Pointer to slab cache.
*/
void slab_cache_destroy(slab_cache_t* cache);
/*!
* \brief Allocate from the cache.
*
* It tries to use partially free caches first,
* empty caches second and allocates a new cache
* as a last resort.
*
* \param cache Given slab cache.
* \retval Pointer to allocated memory.
* \retval NULL on error.
*/
void* slab_cache_alloc(slab_cache_t* cache);
/*!
* \brief Free unused slabs from cache.
*
* \param cache Given slab cache.
* \return Number of freed slabs.
*/
int slab_cache_reap(slab_cache_t* cache);
/*! @} */
...@@ -34,7 +34,6 @@ rrl ...@@ -34,7 +34,6 @@ rrl
rrset rrset
rrset_wire rrset_wire
server server
slab
wire wire
worker_pool worker_pool
worker_queue worker_queue
......
...@@ -10,7 +10,6 @@ LDADD = \ ...@@ -10,7 +10,6 @@ LDADD = \
check_PROGRAMS = \ check_PROGRAMS = \
journal \ journal \
slab \
hattrie \ hattrie \
hhash \ hhash \
dthreads \ dthreads \
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment