#include "slap.h"
+/*
+ * This allocator returns temporary memory from a slab in a given memory
+ * context, aligned on a 2-int boundary. It cannot be used for data
+ * which will outlive the task allocating it.
+ *
+ * A new memory context attaches to the creator's thread context, if any.
+ * Threads cannot use other threads' memory contexts; there are no locks.
+ *
+ * The caller of slap_sl_malloc, usually a thread pool task, must
+ * slap_sl_free the memory before finishing: New tasks reuse the context
+ * and normally reset it, reclaiming memory left over from last task.
+ *
+ * The allocator helps memory fragmentation, speed and memory leaks.
+ * It is not (yet) reliable as a garbage collector:
+ *
+ * It falls back to context NULL - plain ber_memalloc() - when the
+ * context's slab is full. A reset does not reclaim such memory.
+ * Conversely, free/realloc of data not from the given context assumes
+ * context NULL. The data must not belong to another memory context.
+ *
+ * Code which has lost track of the current memory context can try
+ * slap_sl_context() or ch_malloc.c:ch_free/ch_realloc().
+ *
+ * Allocations cannot yet return failure. Like ch_malloc, they succeed
+ * or abort slapd. This will change, do fix code which assumes success.
+ */
+
+/*
+ * The stack-based allocator stores (ber_len_t)sizeof(head+block) at
+ * allocated blocks' head - and in freed blocks also at the tail, marked
+ * by ORing *next* block's head with 1. Freed blocks are only reclaimed
+ * from the last block forward. This is fast, but when a block is never
+ * freed, older blocks will not be reclaimed until the slab is reset...
+ */
+
+#ifdef SLAP_NO_SL_MALLOC /* Useful with memory debuggers like Valgrind */
+enum { No_sl_malloc = 1 };
+#else
+enum { No_sl_malloc = 0 };
+#endif
+
+#define SLAP_SLAB_SOBLOCK 64
+
+struct slab_object {
+ void *so_ptr;
+ int so_blockhead;
+ LDAP_LIST_ENTRY(slab_object) so_link;
+};
+
+struct slab_heap {
+ void *sh_base;
+ void *sh_last;
+ void *sh_end;
+ int sh_stack;
+ int sh_maxorder;
+ unsigned char **sh_map;
+ LDAP_LIST_HEAD(sh_freelist, slab_object) *sh_free;
+ LDAP_LIST_HEAD(sh_so, slab_object) sh_sopool;
+};
+
enum {
- Align = 2 * sizeof(int),
+ Align = sizeof(ber_len_t) > 2*sizeof(int)
+ ? sizeof(ber_len_t) : 2*sizeof(int),
Align_log2 = 1 + (Align>2) + (Align>4) + (Align>8) + (Align>16),
order_start = Align_log2 - 1,
pad = Align - 1
slap_sl_mem_init()
{
assert( Align == 1 << Align_log2 );
- /* Adding head+tail preserves alignment */
- assert( 2*sizeof(ber_len_t) % Align == 0 );
ber_set_option( NULL, LBER_OPT_MEMORY_FNS, &slap_sl_mfuncs );
}
-/* This allocator always returns memory aligned on a 2-int boundary.
- *
- * The stack-based allocator stores the size as a ber_len_t at both
- * the head and tail of the allocated block. When freeing a block, the
- * tail length is ORed with 1 to mark it as free. Freed space can only
- * be reclaimed from the tail forward. If the tail block is never freed,
- * nothing else will be reclaimed until the slab is reset...
- */
+/* Create, reset or just return the memory context of the current thread. */
void *
slap_sl_mem_create(
ber_len_t size,
struct slab_heap *sh;
ber_len_t size_shift;
struct slab_object *so;
+ enum { Base_offset = (unsigned) -sizeof(ber_len_t) % Align };
sh = GET_MEMCTX(thrctx, &memctx);
if ( sh && !new )
sh->sh_stack = stack;
if (stack) {
- /* insert dummy len */
- {
- ber_len_t *i = sh->sh_base;
- *i++ = 0;
- sh->sh_last = i;
- }
+ /* Align first returned block (sh_last + head) */
+ sh->sh_last = (char *) sh->sh_base + Base_offset;
+
} else {
int i, order = -1, order_end = -1;
return sh;
}
+/*
+ * Separate memory context from thread context. Future users must
+ * know the context, since ch_free/slap_sl_context() cannot find it.
+ */
void
slap_sl_mem_detach(
void *thrctx,
void *memctx
)
{
- /* separate from context */
SET_MEMCTX(thrctx, NULL, 0);
}
struct slab_heap *sh = ctx;
ber_len_t *ptr, *newptr;
-#ifdef SLAP_NO_SL_MALLOC
- newptr = ber_memalloc_x( size, NULL );
- if ( newptr ) return newptr;
- assert( 0 );
- exit( EXIT_FAILURE );
-#endif
-
/* ber_set_option calls us like this */
- if (!ctx) {
+ if (No_sl_malloc || !ctx) {
newptr = ber_memalloc_x( size, NULL );
if ( newptr ) return newptr;
+ Debug(LDAP_DEBUG_ANY, "slap_sl_malloc of %lu bytes failed\n",
+ (unsigned long) size, 0, 0);
assert( 0 );
exit( EXIT_FAILURE );
}
- /* round up to doubleword boundary, plus space for len at head and tail */
- size = (size + 2*sizeof(ber_len_t) + Align-1) & -Align;
+ /* Add room for head, ensure room for tail when freed, and
+ * round up to doubleword boundary. */
+ size = (size + sizeof(ber_len_t) + Align-1 + !size) & -Align;
if (sh->sh_stack) {
- if ((char *)sh->sh_last + size >= (char *)sh->sh_end) {
- Debug(LDAP_DEBUG_TRACE,
- "slap_sl_malloc of %lu bytes failed, using ch_malloc\n",
- (long)size, 0, 0);
- return ch_malloc(size);
+ if (size < (ber_len_t) ((char *) sh->sh_end - (char *) sh->sh_last)) {
+ newptr = sh->sh_last;
+ sh->sh_last = (char *) sh->sh_last + size;
+ *newptr++ = size;
+ return( (void *)newptr );
}
- newptr = sh->sh_last;
- sh->sh_last = (char *) sh->sh_last + size;
+
size -= sizeof(ber_len_t);
- *newptr++ = size;
- *(ber_len_t *)((char *)sh->sh_last - sizeof(ber_len_t)) = size;
- return( (void *)newptr );
+
} else {
struct slab_object *so_new, *so_left, *so_right;
ber_len_t size_shift;
order++;
} while (size_shift >>= 1);
+ size -= sizeof(ber_len_t);
+
for (i = order; i <= sh->sh_maxorder &&
LDAP_LIST_EMPTY(&sh->sh_free[i-order_start]); i++);
diff = (unsigned long)((char*)ptr -
(char*)sh->sh_base) >> (order + 1);
sh->sh_map[order-order_start][diff>>3] |= (1 << (diff & 0x7));
- *ptr++ = size - sizeof(ber_len_t);
+ *ptr++ = size;
LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, so_new, so_link);
return((void*)ptr);
} else if (i <= sh->sh_maxorder) {
(char*)sh->sh_base) >> (order+1);
sh->sh_map[order-order_start][diff>>3] |=
(1 << (diff & 0x7));
- *ptr++ = size - sizeof(ber_len_t);
+ *ptr++ = size;
LDAP_LIST_INSERT_HEAD(
&sh->sh_free[j-1-order_start], so_right, so_link);
LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, so_left, so_link);
&sh->sh_free[j-1-order_start], so_left, so_link);
}
}
- } else {
- Debug( LDAP_DEBUG_TRACE,
- "sl_malloc %lu: ch_malloc\n",
- (long)size, 0, 0);
- return (void*)ch_malloc(size);
}
+ /* FIXME: missing return; guessing we failed... */
}
- /* FIXME: missing return; guessing... */
- return NULL;
+ Debug(LDAP_DEBUG_TRACE,
+ "sl_malloc %lu: ch_malloc\n",
+ (unsigned long) size, 0, 0);
+ return ch_malloc(size);
}
+#define LIM_SQRT(t) /* some value < sqrt(max value of unsigned type t) */ \
+ ((0UL|(t)-1) >>31>>31 > 1 ? ((t)1 <<32) - 1 : \
+ (0UL|(t)-1) >>31 ? 65535U : (0UL|(t)-1) >>15 ? 255U : 15U)
+
void *
slap_sl_calloc( ber_len_t n, ber_len_t size, void *ctx )
{
void *newptr;
+ ber_len_t total = n * size;
- newptr = slap_sl_malloc( n*size, ctx );
- if ( newptr ) {
+ /* The sqrt test is a slight optimization: often avoids the division */
+ if ((n | size) <= LIM_SQRT(ber_len_t) || n == 0 || total/n == size) {
+ newptr = slap_sl_malloc( total, ctx );
memset( newptr, 0, n*size );
+ } else {
+ Debug(LDAP_DEBUG_ANY, "slap_sl_calloc(%lu,%lu) out of range\n",
+ (unsigned long) n, (unsigned long) size, 0);
+ assert(0);
+ exit(EXIT_FAILURE);
}
return newptr;
}
slap_sl_realloc(void *ptr, ber_len_t size, void *ctx)
{
struct slab_heap *sh = ctx;
- ber_len_t oldsize, *p = (ber_len_t *) ptr;
+ ber_len_t oldsize, *p = (ber_len_t *) ptr, *nextp;
void *newptr;
if (ptr == NULL)
return slap_sl_malloc(size, ctx);
-#ifdef SLAP_NO_SL_MALLOC
- newptr = ber_memrealloc_x( ptr, size, NULL );
- if ( newptr ) return newptr;
- assert( 0 );
- exit( EXIT_FAILURE );
-#endif
-
/* Not our memory? */
- if (!sh || ptr < sh->sh_base || ptr >= sh->sh_end) {
- /* duplicate of realloc behavior, oh well */
+ if (No_sl_malloc || !sh || ptr < sh->sh_base || ptr >= sh->sh_end) {
+ /* Like ch_realloc(), except not trying a new context */
newptr = ber_memrealloc_x(ptr, size, NULL);
if (newptr) {
return newptr;
}
- Debug(LDAP_DEBUG_ANY, "ch_realloc of %lu bytes failed\n",
- (long) size, 0, 0);
+ Debug(LDAP_DEBUG_ANY, "slap_sl_realloc of %lu bytes failed\n",
+ (unsigned long) size, 0, 0);
assert(0);
exit( EXIT_FAILURE );
}
oldsize = p[-1];
if (sh->sh_stack) {
- /* Round up to doubleword boundary, add room for head */
- size = ((size + Align-1) & -Align) + sizeof( ber_len_t );
+ /* Add room for head, round up to doubleword boundary */
+ size = (size + sizeof(ber_len_t) + Align-1) & -Align;
p--;
/* Never shrink blocks */
if (size <= oldsize) {
return ptr;
+ }
+ oldsize &= -2;
+ nextp = (ber_len_t *) ((char *) p + oldsize);
+
/* If reallocing the last block, try to grow it */
- } else if ((char *) ptr + oldsize == sh->sh_last) {
- if (size < (char *) sh->sh_end - (char *) ptr) {
- sh->sh_last = (char *) ptr + size;
- p[0] = size;
- p[size/sizeof(ber_len_t)] = size;
+ if (nextp == sh->sh_last) {
+ if (size < (ber_len_t) ((char *) sh->sh_end - (char *) p)) {
+ sh->sh_last = (char *) p + size;
+ p[0] = (p[0] & 1) | size;
return ptr;
}
/* Nowhere to grow, need to alloc and copy */
} else {
/* Slight optimization of the final realloc variant */
- size -= sizeof(ber_len_t);
- oldsize -= sizeof(ber_len_t);
- newptr = slap_sl_malloc(size, ctx);
- AC_MEMCPY(newptr, ptr, oldsize);
+ newptr = slap_sl_malloc(size-sizeof(ber_len_t), ctx);
+ AC_MEMCPY(newptr, ptr, oldsize-sizeof(ber_len_t));
/* Not last block, can just mark old region as free */
- p[p[0]/sizeof(ber_len_t)] |= 1;
+ nextp[-1] = oldsize;
+ nextp[0] |= 1;
return newptr;
}
{
struct slab_heap *sh = ctx;
ber_len_t size;
- ber_len_t *p = (ber_len_t *)ptr, *tmpp;
+ ber_len_t *p = ptr, *nextp, *tmpp;
if (!ptr)
return;
-#ifdef SLAP_NO_SL_MALLOC
- ber_memfree_x( ptr, NULL );
- return;
-#endif
-
- if (!sh || ptr < sh->sh_base || ptr >= sh->sh_end) {
+ if (No_sl_malloc || !sh || ptr < sh->sh_base || ptr >= sh->sh_end) {
ber_memfree_x(ptr, NULL);
+ return;
+ }
+
+ size = *(--p);
- } else if (sh->sh_stack) {
- size = p[-1];
- p = (ber_len_t *) ((char *) ptr + size);
- /* mark it free */
- p[-1] = size |= 1;
- /* reclaim free space off tail */
- if (sh->sh_last == p) {
- do {
- p = (ber_len_t *) ((char *) p - size + 1) - 1;
- size = p[-1];
- } while (size & 1);
+ if (sh->sh_stack) {
+ size &= -2;
+ nextp = (ber_len_t *) ((char *) p + size);
+ if (sh->sh_last != nextp) {
+ /* Mark it free: tail = size, head of next block |= 1 */
+ nextp[-1] = size;
+ nextp[0] |= 1;
+ } else {
+ /* Reclaim freed block(s) off tail */
+ while (*p & 1) {
+ p = (ber_len_t *) ((char *) p - p[-1]);
+ }
sh->sh_last = p;
}
unsigned long diff;
int i, inserted = 0, order = -1;
- size = *(--p);
size_shift = size + sizeof(ber_len_t) - 1;
do {
order++;
}
}
+/*
+ * Return the memory context of the current thread if the given block of
+ * memory belongs to it, otherwise return NULL.
+ */
void *
slap_sl_context( void *ptr )
{
Debug(level, "free list:\n", 0, 0, 0);
so = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
while (so) {
- Debug(level, "%lx\n", (unsigned long) so->so_ptr, 0, 0);
+ Debug(level, "%p\n", so->so_ptr, 0, 0);
so = LDAP_LIST_NEXT(so, so_link);
}
}