1 /* sl_malloc.c - malloc routines using a per-thread slab */
3 /* This work is part of OpenLDAP Software <http://www.openldap.org/>.
5 * Copyright 2003-2010 The OpenLDAP Foundation.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted only as authorized by the OpenLDAP
12 * A copy of this license is available in the file LICENSE in the
13 * top-level directory of the distribution or, alternatively, at
14 * <http://www.OpenLDAP.org/license.html>.
20 #include <ac/string.h>
25 * This allocator returns temporary memory from a slab in a given memory
26 * context, aligned on a 2-int boundary. It cannot be used for data
27 * which will outlive the task allocating it.
29 * A new memory context attaches to the creator's thread context, if any.
30 * Threads cannot use other threads' memory contexts; there are no locks.
32 * The caller of slap_sl_malloc, usually a thread pool task, must
33 * slap_sl_free the memory before finishing: New tasks reuse the context
34 * and normally reset it, reclaiming memory left over from last task.
36 * The allocator helps memory fragmentation, speed and memory leaks.
37 * It is not (yet) reliable as a garbage collector:
39 * It falls back to context NULL - plain ber_memalloc() - when the
40 * context's slab is full. A reset does not reclaim such memory.
41 * Conversely, free/realloc of data not from the given context assumes
42 * context NULL. The data must not belong to another memory context.
44 * Code which has lost track of the current memory context can try
45 * slap_sl_context() or ch_malloc.c:ch_free/ch_realloc().
47 * Allocations cannot yet return failure. Like ch_malloc, they succeed
48 * or abort slapd. This will change, do fix code which assumes success.
52 * The stack-based allocator stores (ber_len_t)sizeof(head+block) at
53 * allocated blocks' head - and in freed blocks also at the tail, marked
54 * by ORing *next* block's head with 1. Freed blocks are only reclaimed
55 * from the last block forward. This is fast, but when a block is never
56 * freed, older blocks will not be reclaimed until the slab is reset...
59 #ifdef SLAP_NO_SL_MALLOC /* Useful with memory debuggers like Valgrind */
60 enum { No_sl_malloc = 1 };
62 enum { No_sl_malloc = 0 };
65 #define SLAP_SLAB_SOBLOCK 64
70 LDAP_LIST_ENTRY(slab_object) so_link;
79 unsigned char **sh_map;
80 LDAP_LIST_HEAD(sh_freelist, slab_object) *sh_free;
81 LDAP_LIST_HEAD(sh_so, slab_object) sh_sopool;
85 Align = sizeof(ber_len_t) > 2*sizeof(int)
86 ? sizeof(ber_len_t) : 2*sizeof(int),
87 Align_log2 = 1 + (Align>2) + (Align>4) + (Align>8) + (Align>16),
88 order_start = Align_log2 - 1,
92 static struct slab_object * slap_replenish_sopool(struct slab_heap* sh);
94 static void print_slheap(int level, void *ctx);
97 /* Keep memory context in a thread-local var, or in a global when no threads */
99 static struct slab_heap *slheap;
100 # define SET_MEMCTX(thrctx, memctx, sfree) ((void) (slheap = (memctx)))
101 # define GET_MEMCTX(thrctx, memctxp) (*(memctxp) = slheap))
103 # define memctx_key ((void *) slap_sl_mem_init)
104 # define SET_MEMCTX(thrctx, memctx, kfree) \
105 ldap_pvt_thread_pool_setkey(thrctx,memctx_key, memctx,kfree, NULL,NULL)
106 # define GET_MEMCTX(thrctx, memctxp) \
107 ((void) (*(memctxp) = NULL), \
108 (void) ldap_pvt_thread_pool_getkey(thrctx,memctx_key, memctxp,NULL), \
110 #endif /* NO_THREADS */
113 /* Destroy the context, or if key==NULL clean it up for reuse. */
120 struct slab_heap *sh = data;
121 struct slab_object *so;
125 for (i = 0; i <= sh->sh_maxorder - order_start; i++) {
126 so = LDAP_LIST_FIRST(&sh->sh_free[i]);
128 struct slab_object *so_tmp = so;
129 so = LDAP_LIST_NEXT(so, so_link);
130 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, so_tmp, so_link);
132 ch_free(sh->sh_map[i]);
134 ch_free(sh->sh_free);
137 so = LDAP_LIST_FIRST(&sh->sh_sopool);
139 struct slab_object *so_tmp = so;
140 so = LDAP_LIST_NEXT(so, so_link);
141 if (!so_tmp->so_blockhead) {
142 LDAP_LIST_REMOVE(so_tmp, so_link);
145 so = LDAP_LIST_FIRST(&sh->sh_sopool);
147 struct slab_object *so_tmp = so;
148 so = LDAP_LIST_NEXT(so, so_link);
154 ber_memfree_x(sh->sh_base, NULL);
155 ber_memfree_x(sh, NULL);
159 BerMemoryFunctions slap_sl_mfuncs =
160 { slap_sl_malloc, slap_sl_calloc, slap_sl_realloc, slap_sl_free };
165 assert( Align == 1 << Align_log2 );
167 ber_set_option( NULL, LBER_OPT_MEMORY_FNS, &slap_sl_mfuncs );
170 /* Create, reset or just return the memory context of the current thread. */
180 struct slab_heap *sh;
181 ber_len_t size_shift;
182 struct slab_object *so;
184 enum { Base_offset = (unsigned) -sizeof(ber_len_t) % Align };
186 sh = GET_MEMCTX(thrctx, &memctx);
190 /* Round up to doubleword boundary, then make room for initial
191 * padding, preserving expected available size for pool version */
192 size = ((size + Align-1) & -Align) + Base_offset;
195 sh = ch_malloc(sizeof(struct slab_heap));
196 base = ch_malloc(size);
197 SET_MEMCTX(thrctx, sh, slap_sl_mem_destroy);
199 slap_sl_mem_destroy(NULL, sh);
201 if (size > (ber_len_t) ((char *) sh->sh_end - base)) {
202 newptr = ch_realloc(base, size);
203 if ( newptr == NULL ) return NULL;
208 sh->sh_end = base + size;
210 /* Align (base + head of first block) == first returned block */
214 sh->sh_stack = stack;
219 int i, order = -1, order_end = -1;
221 size_shift = size - 1;
224 } while (size_shift >>= 1);
225 order = order_end - order_start + 1;
226 sh->sh_maxorder = order_end;
228 sh->sh_free = (struct sh_freelist *)
229 ch_malloc(order * sizeof(struct sh_freelist));
230 for (i = 0; i < order; i++) {
231 LDAP_LIST_INIT(&sh->sh_free[i]);
234 LDAP_LIST_INIT(&sh->sh_sopool);
236 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
237 slap_replenish_sopool(sh);
239 so = LDAP_LIST_FIRST(&sh->sh_sopool);
240 LDAP_LIST_REMOVE(so, so_link);
243 LDAP_LIST_INSERT_HEAD(&sh->sh_free[order-1], so, so_link);
245 sh->sh_map = (unsigned char **)
246 ch_malloc(order * sizeof(unsigned char *));
247 for (i = 0; i < order; i++) {
248 int shiftamt = order_start + 1 + i;
249 int nummaps = size >> shiftamt;
252 if (!nummaps) nummaps = 1;
253 sh->sh_map[i] = (unsigned char *) ch_malloc(nummaps);
254 memset(sh->sh_map[i], 0, nummaps);
262 * Separate memory context from thread context. Future users must
263 * know the context, since ch_free/slap_sl_context() cannot find it.
271 SET_MEMCTX(thrctx, NULL, 0);
280 struct slab_heap *sh = ctx;
281 ber_len_t *ptr, *newptr;
283 /* ber_set_option calls us like this */
284 if (No_sl_malloc || !ctx) {
285 newptr = ber_memalloc_x( size, NULL );
286 if ( newptr ) return newptr;
287 Debug(LDAP_DEBUG_ANY, "slap_sl_malloc of %lu bytes failed\n",
288 (unsigned long) size, 0, 0);
290 exit( EXIT_FAILURE );
293 /* Add room for head, ensure room for tail when freed, and
294 * round up to doubleword boundary. */
295 size = (size + sizeof(ber_len_t) + Align-1 + !size) & -Align;
298 if (size < (ber_len_t) ((char *) sh->sh_end - (char *) sh->sh_last)) {
299 newptr = sh->sh_last;
300 sh->sh_last = (char *) sh->sh_last + size;
302 return( (void *)newptr );
305 size -= sizeof(ber_len_t);
308 struct slab_object *so_new, *so_left, *so_right;
309 ber_len_t size_shift;
311 int i, j, order = -1;
313 size_shift = size - 1;
316 } while (size_shift >>= 1);
318 size -= sizeof(ber_len_t);
320 for (i = order; i <= sh->sh_maxorder &&
321 LDAP_LIST_EMPTY(&sh->sh_free[i-order_start]); i++);
324 so_new = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
325 LDAP_LIST_REMOVE(so_new, so_link);
326 ptr = so_new->so_ptr;
327 diff = (unsigned long)((char*)ptr -
328 (char*)sh->sh_base) >> (order + 1);
329 sh->sh_map[order-order_start][diff>>3] |= (1 << (diff & 0x7));
331 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, so_new, so_link);
333 } else if (i <= sh->sh_maxorder) {
334 for (j = i; j > order; j--) {
335 so_left = LDAP_LIST_FIRST(&sh->sh_free[j-order_start]);
336 LDAP_LIST_REMOVE(so_left, so_link);
337 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
338 slap_replenish_sopool(sh);
340 so_right = LDAP_LIST_FIRST(&sh->sh_sopool);
341 LDAP_LIST_REMOVE(so_right, so_link);
342 so_right->so_ptr = (void *)((char *)so_left->so_ptr + (1 << j));
343 if (j == order + 1) {
344 ptr = so_left->so_ptr;
345 diff = (unsigned long)((char*)ptr -
346 (char*)sh->sh_base) >> (order+1);
347 sh->sh_map[order-order_start][diff>>3] |=
350 LDAP_LIST_INSERT_HEAD(
351 &sh->sh_free[j-1-order_start], so_right, so_link);
352 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, so_left, so_link);
355 LDAP_LIST_INSERT_HEAD(
356 &sh->sh_free[j-1-order_start], so_right, so_link);
357 LDAP_LIST_INSERT_HEAD(
358 &sh->sh_free[j-1-order_start], so_left, so_link);
362 /* FIXME: missing return; guessing we failed... */
365 Debug(LDAP_DEBUG_TRACE,
366 "slap_sl_malloc of %lu bytes falling back to ch_malloc\n",
367 (unsigned long) size, 0, 0);
368 return ch_malloc(size);
371 #define LIM_SQRT(t) /* some value < sqrt(max value of unsigned type t) */ \
372 ((0UL|(t)-1) >>31>>31 > 1 ? ((t)1 <<32) - 1 : \
373 (0UL|(t)-1) >>31 ? 65535U : (0UL|(t)-1) >>15 ? 255U : 15U)
376 slap_sl_calloc( ber_len_t n, ber_len_t size, void *ctx )
379 ber_len_t total = n * size;
381 /* The sqrt test is a slight optimization: often avoids the division */
382 if ((n | size) <= LIM_SQRT(ber_len_t) || n == 0 || total/n == size) {
383 newptr = slap_sl_malloc( total, ctx );
384 memset( newptr, 0, n*size );
386 Debug(LDAP_DEBUG_ANY, "slap_sl_calloc(%lu,%lu) out of range\n",
387 (unsigned long) n, (unsigned long) size, 0);
395 slap_sl_realloc(void *ptr, ber_len_t size, void *ctx)
397 struct slab_heap *sh = ctx;
398 ber_len_t oldsize, *p = (ber_len_t *) ptr, *nextp;
402 return slap_sl_malloc(size, ctx);
404 /* Not our memory? */
405 if (No_sl_malloc || !sh || ptr < sh->sh_base || ptr >= sh->sh_end) {
406 /* Like ch_realloc(), except not trying a new context */
407 newptr = ber_memrealloc_x(ptr, size, NULL);
411 Debug(LDAP_DEBUG_ANY, "slap_sl_realloc of %lu bytes failed\n",
412 (unsigned long) size, 0, 0);
414 exit( EXIT_FAILURE );
418 slap_sl_free(ptr, ctx);
425 /* Add room for head, round up to doubleword boundary */
426 size = (size + sizeof(ber_len_t) + Align-1) & -Align;
430 /* Never shrink blocks */
431 if (size <= oldsize) {
436 nextp = (ber_len_t *) ((char *) p + oldsize);
438 /* If reallocing the last block, try to grow it */
439 if (nextp == sh->sh_last) {
440 if (size < (ber_len_t) ((char *) sh->sh_end - (char *) p)) {
441 sh->sh_last = (char *) p + size;
442 p[0] = (p[0] & 1) | size;
446 /* Nowhere to grow, need to alloc and copy */
448 /* Slight optimization of the final realloc variant */
449 newptr = slap_sl_malloc(size-sizeof(ber_len_t), ctx);
450 AC_MEMCPY(newptr, ptr, oldsize-sizeof(ber_len_t));
451 /* Not last block, can just mark old region as free */
457 size -= sizeof(ber_len_t);
458 oldsize -= sizeof(ber_len_t);
460 } else if (oldsize > size) {
464 newptr = slap_sl_malloc(size, ctx);
465 AC_MEMCPY(newptr, ptr, oldsize);
466 slap_sl_free(ptr, ctx);
471 slap_sl_free(void *ptr, void *ctx)
473 struct slab_heap *sh = ctx;
475 ber_len_t *p = ptr, *nextp, *tmpp;
480 if (No_sl_malloc || !sh || ptr < sh->sh_base || ptr >= sh->sh_end) {
481 ber_memfree_x(ptr, NULL);
489 nextp = (ber_len_t *) ((char *) p + size);
490 if (sh->sh_last != nextp) {
491 /* Mark it free: tail = size, head of next block |= 1 */
495 /* Reclaim freed block(s) off tail */
497 p = (ber_len_t *) ((char *) p - p[-1]);
503 int size_shift, order_size;
504 struct slab_object *so;
506 int i, inserted = 0, order = -1;
508 size_shift = size + sizeof(ber_len_t) - 1;
511 } while (size_shift >>= 1);
513 for (i = order, tmpp = p; i <= sh->sh_maxorder; i++) {
514 order_size = 1 << (i+1);
515 diff = (unsigned long)((char*)tmpp - (char*)sh->sh_base) >> (i+1);
516 sh->sh_map[i-order_start][diff>>3] &= (~(1 << (diff & 0x7)));
517 if (diff == ((diff>>1)<<1)) {
518 if (!(sh->sh_map[i-order_start][(diff+1)>>3] &
519 (1<<((diff+1)&0x7)))) {
520 so = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
522 if ((char*)so->so_ptr == (char*)tmpp) {
523 LDAP_LIST_REMOVE( so, so_link );
524 } else if ((char*)so->so_ptr ==
525 (char*)tmpp + order_size) {
526 LDAP_LIST_REMOVE(so, so_link);
529 so = LDAP_LIST_NEXT(so, so_link);
532 if (i < sh->sh_maxorder) {
535 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start+1],
540 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
541 slap_replenish_sopool(sh);
543 so = LDAP_LIST_FIRST(&sh->sh_sopool);
544 LDAP_LIST_REMOVE(so, so_link);
546 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start],
550 Debug(LDAP_DEBUG_TRACE, "slap_sl_free: "
551 "free object not found while bit is clear.\n",
558 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
559 slap_replenish_sopool(sh);
561 so = LDAP_LIST_FIRST(&sh->sh_sopool);
562 LDAP_LIST_REMOVE(so, so_link);
564 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start],
570 if (!(sh->sh_map[i-order_start][(diff-1)>>3] &
571 (1<<((diff-1)&0x7)))) {
572 so = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
574 if ((char*)so->so_ptr == (char*)tmpp) {
575 LDAP_LIST_REMOVE(so, so_link);
576 } else if ((char*)tmpp == (char *)so->so_ptr + order_size) {
577 LDAP_LIST_REMOVE(so, so_link);
581 so = LDAP_LIST_NEXT(so, so_link);
584 if (i < sh->sh_maxorder) {
586 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start+1], so, so_link);
590 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
591 slap_replenish_sopool(sh);
593 so = LDAP_LIST_FIRST(&sh->sh_sopool);
594 LDAP_LIST_REMOVE(so, so_link);
596 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start],
600 Debug(LDAP_DEBUG_TRACE, "slap_sl_free: "
601 "free object not found while bit is clear.\n",
608 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
609 slap_replenish_sopool(sh);
611 so = LDAP_LIST_FIRST(&sh->sh_sopool);
612 LDAP_LIST_REMOVE(so, so_link);
614 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start],
625 * Return the memory context of the current thread if the given block of
626 * memory belongs to it, otherwise return NULL.
629 slap_sl_context( void *ptr )
632 struct slab_heap *sh;
634 if ( slapMode & SLAP_TOOL_MODE ) return NULL;
636 sh = GET_MEMCTX(ldap_pvt_thread_pool_context(), &memctx);
637 if (sh && ptr >= sh->sh_base && ptr <= sh->sh_end) {
643 static struct slab_object *
644 slap_replenish_sopool(
648 struct slab_object *so_block;
651 so_block = (struct slab_object *)ch_malloc(
652 SLAP_SLAB_SOBLOCK * sizeof(struct slab_object));
654 if ( so_block == NULL ) {
658 so_block[0].so_blockhead = 1;
659 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, &so_block[0], so_link);
660 for (i = 1; i < SLAP_SLAB_SOBLOCK; i++) {
661 so_block[i].so_blockhead = 0;
662 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, &so_block[i], so_link );
670 print_slheap(int level, void *ctx)
672 struct slab_heap *sh = ctx;
673 struct slab_object *so;
677 Debug(level, "NULL memctx\n", 0, 0, 0);
681 Debug(level, "sh->sh_maxorder=%d\n", sh->sh_maxorder, 0, 0);
683 for (i = order_start; i <= sh->sh_maxorder; i++) {
685 Debug(level, "order=%d\n", i, 0, 0);
686 for (j = 0; j < (1<<(sh->sh_maxorder-i))/8; j++) {
687 Debug(level, "%02x ", sh->sh_map[i-order_start][j], 0, 0);
691 Debug(level, "%02x ", sh->sh_map[i-order_start][0], 0, 0);
693 Debug(level, "\n", 0, 0, 0);
694 Debug(level, "free list:\n", 0, 0, 0);
695 so = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
697 Debug(level, "%p\n", so->so_ptr, 0, 0);
698 so = LDAP_LIST_NEXT(so, so_link);