#include "slap.h"
+#ifdef USE_VALGRIND
+/* Get debugging help from Valgrind */
+#include <valgrind/memcheck.h>
+#define VGMEMP_MARK(m,s) VALGRIND_MAKE_MEM_NOACCESS(m,s)
+#define VGMEMP_CREATE(h,r,z) VALGRIND_CREATE_MEMPOOL(h,r,z)
+#define VGMEMP_TRIM(h,a,s) VALGRIND_MEMPOOL_TRIM(h,a,s)
+#define VGMEMP_ALLOC(h,a,s) VALGRIND_MEMPOOL_ALLOC(h,a,s)
+#define VGMEMP_CHANGE(h,a,b,s) VALGRIND_MEMPOOL_CHANGE(h,a,b,s)
+#else
+#define VGMEMP_MARK(m,s)
+#define VGMEMP_CREATE(h,r,z)
+#define VGMEMP_TRIM(h,a,s)
+#define VGMEMP_ALLOC(h,a,s)
+#define VGMEMP_CHANGE(h,a,b,s)
+#endif
+
/*
* This allocator returns temporary memory from a slab in a given memory
* context, aligned on a 2-int boundary. It cannot be used for data
/*
* The stack-based allocator stores (ber_len_t)sizeof(head+block) at
- * the head and tail of each allocated block. The tail length of a freed
- * block is ORed with 1 to mark it free. Freed blocks are only reclaimed
+ * allocated blocks' head - and in freed blocks also at the tail, marked
+ * by ORing *next* block's head with 1. Freed blocks are only reclaimed
* from the last block forward. This is fast, but when a block is never
* freed, older blocks will not be reclaimed until the slab is reset...
*/
+#ifdef SLAP_NO_SL_MALLOC /* Useful with memory debuggers like Valgrind */
+enum { No_sl_malloc = 1 };
+#else
+enum { No_sl_malloc = 0 };
+#endif
+
+#define SLAP_SLAB_SOBLOCK 64
+
+struct slab_object {
+ void *so_ptr;
+ int so_blockhead;
+ LDAP_LIST_ENTRY(slab_object) so_link;
+};
+
+struct slab_heap {
+ void *sh_base;
+ void *sh_last;
+ void *sh_end;
+ int sh_stack;
+ int sh_maxorder;
+ unsigned char **sh_map;
+ LDAP_LIST_HEAD(sh_freelist, slab_object) *sh_free;
+ LDAP_LIST_HEAD(sh_so, slab_object) sh_sopool;
+};
+
enum {
Align = sizeof(ber_len_t) > 2*sizeof(int)
? sizeof(ber_len_t) : 2*sizeof(int),
#ifdef NO_THREADS
static struct slab_heap *slheap;
# define SET_MEMCTX(thrctx, memctx, sfree) ((void) (slheap = (memctx)))
-# define GET_MEMCTX(thrctx, memctxp) (*(memctxp) = slheap))
+# define GET_MEMCTX(thrctx, memctxp) (*(memctxp) = slheap)
#else
# define memctx_key ((void *) slap_sl_mem_init)
# define SET_MEMCTX(thrctx, memctx, kfree) \
slap_sl_mem_init()
{
assert( Align == 1 << Align_log2 );
- /* Adding head+tail preserves alignment */
- assert( 2*sizeof(ber_len_t) % Align == 0 );
ber_set_option( NULL, LBER_OPT_MEMORY_FNS, &slap_sl_mfuncs );
}
struct slab_heap *sh;
ber_len_t size_shift;
struct slab_object *so;
+ char *base, *newptr;
+ enum { Base_offset = (unsigned) -sizeof(ber_len_t) % Align };
sh = GET_MEMCTX(thrctx, &memctx);
if ( sh && !new )
return sh;
- /* round up to doubleword boundary */
- size = (size + Align-1) & -Align;
+ /* Round up to doubleword boundary, then make room for initial
+ * padding, preserving expected available size for pool version */
+ size = ((size + Align-1) & -Align) + Base_offset;
if (!sh) {
sh = ch_malloc(sizeof(struct slab_heap));
- sh->sh_base = ch_malloc(size);
+ base = ch_malloc(size);
SET_MEMCTX(thrctx, sh, slap_sl_mem_destroy);
+ VGMEMP_MARK(base, size);
+ VGMEMP_CREATE(sh, 0, 0);
} else {
slap_sl_mem_destroy(NULL, sh);
- if ( size > (char *)sh->sh_end - (char *)sh->sh_base ) {
- void *newptr;
-
- newptr = ch_realloc( sh->sh_base, size );
+ base = sh->sh_base;
+ if (size > (ber_len_t) ((char *) sh->sh_end - base)) {
+ newptr = ch_realloc(base, size);
if ( newptr == NULL ) return NULL;
- sh->sh_base = newptr;
+ VGMEMP_CHANGE(sh, base, newptr, size);
+ base = newptr;
}
+ VGMEMP_TRIM(sh, sh->sh_base, 0);
}
- sh->sh_end = (char *) sh->sh_base + size;
+ sh->sh_base = base;
+ sh->sh_end = base + size;
+
+ /* Align (base + head of first block) == first returned block */
+ base += Base_offset;
+ size -= Base_offset;
sh->sh_stack = stack;
if (stack) {
- /* insert dummy len */
- {
- ber_len_t *i = sh->sh_base;
- *i++ = 0;
- sh->sh_last = i;
- }
+ sh->sh_last = base;
+
} else {
int i, order = -1, order_end = -1;
}
so = LDAP_LIST_FIRST(&sh->sh_sopool);
LDAP_LIST_REMOVE(so, so_link);
- so->so_ptr = sh->sh_base;
+ so->so_ptr = base;
LDAP_LIST_INSERT_HEAD(&sh->sh_free[order-1], so, so_link);
memset(sh->sh_map[i], 0, nummaps);
}
}
+
return sh;
}
struct slab_heap *sh = ctx;
ber_len_t *ptr, *newptr;
-#ifdef SLAP_NO_SL_MALLOC
- newptr = ber_memalloc_x( size, NULL );
- if ( newptr ) return newptr;
- assert( 0 );
- exit( EXIT_FAILURE );
-#endif
-
/* ber_set_option calls us like this */
- if (!ctx) {
+ if (No_sl_malloc || !ctx) {
newptr = ber_memalloc_x( size, NULL );
if ( newptr ) return newptr;
Debug(LDAP_DEBUG_ANY, "slap_sl_malloc of %lu bytes failed\n",
exit( EXIT_FAILURE );
}
- /* round up to doubleword boundary, plus space for len at head and tail */
- size = (size + 2*sizeof(ber_len_t) + Align-1) & -Align;
+ /* Add room for head, ensure room for tail when freed, and
+ * round up to doubleword boundary. */
+ size = (size + sizeof(ber_len_t) + Align-1 + !size) & -Align;
if (sh->sh_stack) {
if (size < (ber_len_t) ((char *) sh->sh_end - (char *) sh->sh_last)) {
newptr = sh->sh_last;
sh->sh_last = (char *) sh->sh_last + size;
- size -= sizeof(ber_len_t);
+ VGMEMP_ALLOC(sh, newptr, size);
*newptr++ = size;
- ((ber_len_t *) sh->sh_last)[-1] = size;
return( (void *)newptr );
}
- size -= 2*sizeof(ber_len_t);
+ size -= sizeof(ber_len_t);
} else {
struct slab_object *so_new, *so_left, *so_right;
slap_sl_realloc(void *ptr, ber_len_t size, void *ctx)
{
struct slab_heap *sh = ctx;
- ber_len_t oldsize, *p = (ber_len_t *) ptr;
+ ber_len_t oldsize, *p = (ber_len_t *) ptr, *nextp;
void *newptr;
if (ptr == NULL)
return slap_sl_malloc(size, ctx);
-#ifdef SLAP_NO_SL_MALLOC
- newptr = ber_memrealloc_x( ptr, size, NULL );
- if ( newptr ) return newptr;
- assert( 0 );
- exit( EXIT_FAILURE );
-#endif
-
/* Not our memory? */
- if (!sh || ptr < sh->sh_base || ptr >= sh->sh_end) {
+ if (No_sl_malloc || !sh || ptr < sh->sh_base || ptr >= sh->sh_end) {
/* Like ch_realloc(), except not trying a new context */
newptr = ber_memrealloc_x(ptr, size, NULL);
if (newptr) {
oldsize = p[-1];
if (sh->sh_stack) {
- /* Round up to doubleword boundary, add room for head */
- size = ((size + Align-1) & -Align) + sizeof( ber_len_t );
+ /* Add room for head, round up to doubleword boundary */
+ size = (size + sizeof(ber_len_t) + Align-1) & -Align;
p--;
/* Never shrink blocks */
if (size <= oldsize) {
return ptr;
+ }
+ oldsize &= -2;
+ nextp = (ber_len_t *) ((char *) p + oldsize);
+
/* If reallocing the last block, try to grow it */
- } else if ((char *) ptr + oldsize == sh->sh_last) {
- if (size < (char *) sh->sh_end - (char *) ptr) {
- sh->sh_last = (char *) ptr + size;
- p[0] = size;
- p[size/sizeof(ber_len_t)] = size;
+ if (nextp == sh->sh_last) {
+ if (size < (ber_len_t) ((char *) sh->sh_end - (char *) p)) {
+ sh->sh_last = (char *) p + size;
+ p[0] = (p[0] & 1) | size;
return ptr;
}
/* Nowhere to grow, need to alloc and copy */
} else {
/* Slight optimization of the final realloc variant */
- size -= sizeof(ber_len_t);
- oldsize -= sizeof(ber_len_t);
- newptr = slap_sl_malloc(size, ctx);
- AC_MEMCPY(newptr, ptr, oldsize);
+ newptr = slap_sl_malloc(size-sizeof(ber_len_t), ctx);
+ AC_MEMCPY(newptr, ptr, oldsize-sizeof(ber_len_t));
/* Not last block, can just mark old region as free */
- p[p[0]/sizeof(ber_len_t)] |= 1;
+ nextp[-1] = oldsize;
+ nextp[0] |= 1;
return newptr;
}
{
struct slab_heap *sh = ctx;
ber_len_t size;
- ber_len_t *p = (ber_len_t *)ptr, *tmpp;
+ ber_len_t *p = ptr, *nextp, *tmpp;
if (!ptr)
return;
-#ifdef SLAP_NO_SL_MALLOC
- ber_memfree_x( ptr, NULL );
- return;
-#endif
-
- if (!sh || ptr < sh->sh_base || ptr >= sh->sh_end) {
+ if (No_sl_malloc || !sh || ptr < sh->sh_base || ptr >= sh->sh_end) {
ber_memfree_x(ptr, NULL);
+ return;
+ }
- } else if (sh->sh_stack) {
- size = p[-1];
- p = (ber_len_t *) ((char *) ptr + size);
- /* mark it free */
- p[-1] = size |= 1;
- /* reclaim free space off tail */
- if (sh->sh_last == p) {
- do {
- p = (ber_len_t *) ((char *) p - size + 1) - 1;
- size = p[-1];
- } while (size & 1);
+ size = *(--p);
+
+ if (sh->sh_stack) {
+ size &= -2;
+ nextp = (ber_len_t *) ((char *) p + size);
+ if (sh->sh_last != nextp) {
+ /* Mark it free: tail = size, head of next block |= 1 */
+ nextp[-1] = size;
+ nextp[0] |= 1;
+ /* We can't tell Valgrind about it yet, because we
+ * still need read/write access to this block for
+ * when we eventually get to reclaim it.
+ */
+ } else {
+ /* Reclaim freed block(s) off tail */
+ while (*p & 1) {
+ p = (ber_len_t *) ((char *) p - p[-1]);
+ }
sh->sh_last = p;
+ VGMEMP_TRIM(sh, sh->sh_base, sh->sh_last - sh->sh_base);
}
} else {
unsigned long diff;
int i, inserted = 0, order = -1;
- size = *(--p);
size_shift = size + sizeof(ber_len_t) - 1;
do {
order++;