1 /* sl_malloc.c - malloc routines using a per-thread slab */
3 /* This work is part of OpenLDAP Software <http://www.openldap.org/>.
5 * Copyright 2003-2014 The OpenLDAP Foundation.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted only as authorized by the OpenLDAP
12 * A copy of this license is available in the file LICENSE in the
13 * top-level directory of the distribution or, alternatively, at
14 * <http://www.OpenLDAP.org/license.html>.
20 #include <ac/string.h>
25 /* Get debugging help from Valgrind */
26 #include <valgrind/memcheck.h>
27 #define VGMEMP_MARK(m,s) VALGRIND_MAKE_MEM_NOACCESS(m,s)
28 #define VGMEMP_CREATE(h,r,z) VALGRIND_CREATE_MEMPOOL(h,r,z)
29 #define VGMEMP_TRIM(h,a,s) VALGRIND_MEMPOOL_TRIM(h,a,s)
30 #define VGMEMP_ALLOC(h,a,s) VALGRIND_MEMPOOL_ALLOC(h,a,s)
31 #define VGMEMP_CHANGE(h,a,b,s) VALGRIND_MEMPOOL_CHANGE(h,a,b,s)
33 #define VGMEMP_MARK(m,s)
34 #define VGMEMP_CREATE(h,r,z)
35 #define VGMEMP_TRIM(h,a,s)
36 #define VGMEMP_ALLOC(h,a,s)
37 #define VGMEMP_CHANGE(h,a,b,s)
41 * This allocator returns temporary memory from a slab in a given memory
42 * context, aligned on a 2-int boundary. It cannot be used for data
43 * which will outlive the task allocating it.
45 * A new memory context attaches to the creator's thread context, if any.
46 * Threads cannot use other threads' memory contexts; there are no locks.
48 * The caller of slap_sl_malloc, usually a thread pool task, must
49 * slap_sl_free the memory before finishing: New tasks reuse the context
50 * and normally reset it, reclaiming memory left over from last task.
52 * The allocator helps memory fragmentation, speed and memory leaks.
53 * It is not (yet) reliable as a garbage collector:
55 * It falls back to context NULL - plain ber_memalloc() - when the
56 * context's slab is full. A reset does not reclaim such memory.
57 * Conversely, free/realloc of data not from the given context assumes
58 * context NULL. The data must not belong to another memory context.
60 * Code which has lost track of the current memory context can try
61 * slap_sl_context() or ch_malloc.c:ch_free/ch_realloc().
63 * Allocations cannot yet return failure. Like ch_malloc, they succeed
64 * or abort slapd. This will change, do fix code which assumes success.
68 * The stack-based allocator stores (ber_len_t)sizeof(head+block) at
69 * allocated blocks' head - and in freed blocks also at the tail, marked
70 * by ORing *next* block's head with 1. Freed blocks are only reclaimed
71 * from the last block forward. This is fast, but when a block is never
72 * freed, older blocks will not be reclaimed until the slab is reset...
75 #ifdef SLAP_NO_SL_MALLOC /* Useful with memory debuggers like Valgrind */
76 enum { No_sl_malloc = 1 };
78 enum { No_sl_malloc = 0 };
81 #define SLAP_SLAB_SOBLOCK 64
86 LDAP_LIST_ENTRY(slab_object) so_link;
95 unsigned char **sh_map;
96 LDAP_LIST_HEAD(sh_freelist, slab_object) *sh_free;
97 LDAP_LIST_HEAD(sh_so, slab_object) sh_sopool;
101 Align = sizeof(ber_len_t) > 2*sizeof(int)
102 ? sizeof(ber_len_t) : 2*sizeof(int),
103 Align_log2 = 1 + (Align>2) + (Align>4) + (Align>8) + (Align>16),
104 order_start = Align_log2 - 1,
108 static struct slab_object * slap_replenish_sopool(struct slab_heap* sh);
110 static void print_slheap(int level, void *ctx);
113 /* Keep memory context in a thread-local var, or in a global when no threads */
115 static struct slab_heap *slheap;
116 # define SET_MEMCTX(thrctx, memctx, sfree) ((void) (slheap = (memctx)))
117 # define GET_MEMCTX(thrctx, memctxp) (*(memctxp) = slheap)
119 # define memctx_key ((void *) slap_sl_mem_init)
120 # define SET_MEMCTX(thrctx, memctx, kfree) \
121 ldap_pvt_thread_pool_setkey(thrctx,memctx_key, memctx,kfree, NULL,NULL)
122 # define GET_MEMCTX(thrctx, memctxp) \
123 ((void) (*(memctxp) = NULL), \
124 (void) ldap_pvt_thread_pool_getkey(thrctx,memctx_key, memctxp,NULL), \
126 #endif /* NO_THREADS */
129 /* Destroy the context, or if key==NULL clean it up for reuse. */
136 struct slab_heap *sh = data;
137 struct slab_object *so;
141 for (i = 0; i <= sh->sh_maxorder - order_start; i++) {
142 so = LDAP_LIST_FIRST(&sh->sh_free[i]);
144 struct slab_object *so_tmp = so;
145 so = LDAP_LIST_NEXT(so, so_link);
146 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, so_tmp, so_link);
148 ch_free(sh->sh_map[i]);
150 ch_free(sh->sh_free);
153 so = LDAP_LIST_FIRST(&sh->sh_sopool);
155 struct slab_object *so_tmp = so;
156 so = LDAP_LIST_NEXT(so, so_link);
157 if (!so_tmp->so_blockhead) {
158 LDAP_LIST_REMOVE(so_tmp, so_link);
161 so = LDAP_LIST_FIRST(&sh->sh_sopool);
163 struct slab_object *so_tmp = so;
164 so = LDAP_LIST_NEXT(so, so_link);
170 ber_memfree_x(sh->sh_base, NULL);
171 ber_memfree_x(sh, NULL);
175 BerMemoryFunctions slap_sl_mfuncs =
176 { slap_sl_malloc, slap_sl_calloc, slap_sl_realloc, slap_sl_free };
181 assert( Align == 1 << Align_log2 );
183 ber_set_option( NULL, LBER_OPT_MEMORY_FNS, &slap_sl_mfuncs );
186 /* Create, reset or just return the memory context of the current thread. */
196 struct slab_heap *sh;
197 ber_len_t size_shift;
198 struct slab_object *so;
200 enum { Base_offset = (unsigned) -sizeof(ber_len_t) % Align };
202 sh = GET_MEMCTX(thrctx, &memctx);
206 /* Round up to doubleword boundary, then make room for initial
207 * padding, preserving expected available size for pool version */
208 size = ((size + Align-1) & -Align) + Base_offset;
211 sh = ch_malloc(sizeof(struct slab_heap));
212 base = ch_malloc(size);
213 SET_MEMCTX(thrctx, sh, slap_sl_mem_destroy);
214 VGMEMP_MARK(base, size);
215 VGMEMP_CREATE(sh, 0, 0);
217 slap_sl_mem_destroy(NULL, sh);
219 if (size > (ber_len_t) ((char *) sh->sh_end - base)) {
220 newptr = ch_realloc(base, size);
221 if ( newptr == NULL ) return NULL;
222 VGMEMP_CHANGE(sh, base, newptr, size);
225 VGMEMP_TRIM(sh, base, 0);
228 sh->sh_end = base + size;
230 /* Align (base + head of first block) == first returned block */
234 sh->sh_stack = stack;
239 int i, order = -1, order_end = -1;
241 size_shift = size - 1;
244 } while (size_shift >>= 1);
245 order = order_end - order_start + 1;
246 sh->sh_maxorder = order_end;
248 sh->sh_free = (struct sh_freelist *)
249 ch_malloc(order * sizeof(struct sh_freelist));
250 for (i = 0; i < order; i++) {
251 LDAP_LIST_INIT(&sh->sh_free[i]);
254 LDAP_LIST_INIT(&sh->sh_sopool);
256 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
257 slap_replenish_sopool(sh);
259 so = LDAP_LIST_FIRST(&sh->sh_sopool);
260 LDAP_LIST_REMOVE(so, so_link);
263 LDAP_LIST_INSERT_HEAD(&sh->sh_free[order-1], so, so_link);
265 sh->sh_map = (unsigned char **)
266 ch_malloc(order * sizeof(unsigned char *));
267 for (i = 0; i < order; i++) {
268 int shiftamt = order_start + 1 + i;
269 int nummaps = size >> shiftamt;
272 if (!nummaps) nummaps = 1;
273 sh->sh_map[i] = (unsigned char *) ch_malloc(nummaps);
274 memset(sh->sh_map[i], 0, nummaps);
282 * Separate memory context from thread context. Future users must
283 * know the context, since ch_free/slap_sl_context() cannot find it.
291 SET_MEMCTX(thrctx, NULL, 0);
300 struct slab_heap *sh = ctx;
301 ber_len_t *ptr, *newptr;
303 /* ber_set_option calls us like this */
304 if (No_sl_malloc || !ctx) {
305 newptr = ber_memalloc_x( size, NULL );
306 if ( newptr ) return newptr;
307 Debug(LDAP_DEBUG_ANY, "slap_sl_malloc of %lu bytes failed\n",
308 (unsigned long) size, 0, 0);
310 exit( EXIT_FAILURE );
313 /* Add room for head, ensure room for tail when freed, and
314 * round up to doubleword boundary. */
315 size = (size + sizeof(ber_len_t) + Align-1 + !size) & -Align;
318 if (size < (ber_len_t) ((char *) sh->sh_end - (char *) sh->sh_last)) {
319 newptr = sh->sh_last;
320 sh->sh_last = (char *) sh->sh_last + size;
321 VGMEMP_ALLOC(sh, newptr, size);
323 return( (void *)newptr );
326 size -= sizeof(ber_len_t);
329 struct slab_object *so_new, *so_left, *so_right;
330 ber_len_t size_shift;
332 int i, j, order = -1;
334 size_shift = size - 1;
337 } while (size_shift >>= 1);
339 size -= sizeof(ber_len_t);
341 for (i = order; i <= sh->sh_maxorder &&
342 LDAP_LIST_EMPTY(&sh->sh_free[i-order_start]); i++);
345 so_new = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
346 LDAP_LIST_REMOVE(so_new, so_link);
347 ptr = so_new->so_ptr;
348 diff = (unsigned long)((char*)ptr -
349 (char*)sh->sh_base) >> (order + 1);
350 sh->sh_map[order-order_start][diff>>3] |= (1 << (diff & 0x7));
352 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, so_new, so_link);
354 } else if (i <= sh->sh_maxorder) {
355 for (j = i; j > order; j--) {
356 so_left = LDAP_LIST_FIRST(&sh->sh_free[j-order_start]);
357 LDAP_LIST_REMOVE(so_left, so_link);
358 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
359 slap_replenish_sopool(sh);
361 so_right = LDAP_LIST_FIRST(&sh->sh_sopool);
362 LDAP_LIST_REMOVE(so_right, so_link);
363 so_right->so_ptr = (void *)((char *)so_left->so_ptr + (1 << j));
364 if (j == order + 1) {
365 ptr = so_left->so_ptr;
366 diff = (unsigned long)((char*)ptr -
367 (char*)sh->sh_base) >> (order+1);
368 sh->sh_map[order-order_start][diff>>3] |=
371 LDAP_LIST_INSERT_HEAD(
372 &sh->sh_free[j-1-order_start], so_right, so_link);
373 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, so_left, so_link);
376 LDAP_LIST_INSERT_HEAD(
377 &sh->sh_free[j-1-order_start], so_right, so_link);
378 LDAP_LIST_INSERT_HEAD(
379 &sh->sh_free[j-1-order_start], so_left, so_link);
383 /* FIXME: missing return; guessing we failed... */
386 Debug(LDAP_DEBUG_TRACE,
387 "sl_malloc %lu: ch_malloc\n",
388 (unsigned long) size, 0, 0);
389 return ch_malloc(size);
392 #define LIM_SQRT(t) /* some value < sqrt(max value of unsigned type t) */ \
393 ((0UL|(t)-1) >>31>>31 > 1 ? ((t)1 <<32) - 1 : \
394 (0UL|(t)-1) >>31 ? 65535U : (0UL|(t)-1) >>15 ? 255U : 15U)
397 slap_sl_calloc( ber_len_t n, ber_len_t size, void *ctx )
400 ber_len_t total = n * size;
402 /* The sqrt test is a slight optimization: often avoids the division */
403 if ((n | size) <= LIM_SQRT(ber_len_t) || n == 0 || total/n == size) {
404 newptr = slap_sl_malloc( total, ctx );
405 memset( newptr, 0, n*size );
407 Debug(LDAP_DEBUG_ANY, "slap_sl_calloc(%lu,%lu) out of range\n",
408 (unsigned long) n, (unsigned long) size, 0);
416 slap_sl_realloc(void *ptr, ber_len_t size, void *ctx)
418 struct slab_heap *sh = ctx;
419 ber_len_t oldsize, *p = (ber_len_t *) ptr, *nextp;
423 return slap_sl_malloc(size, ctx);
425 /* Not our memory? */
426 if (No_sl_malloc || !sh || ptr < sh->sh_base || ptr >= sh->sh_end) {
427 /* Like ch_realloc(), except not trying a new context */
428 newptr = ber_memrealloc_x(ptr, size, NULL);
432 Debug(LDAP_DEBUG_ANY, "slap_sl_realloc of %lu bytes failed\n",
433 (unsigned long) size, 0, 0);
435 exit( EXIT_FAILURE );
439 slap_sl_free(ptr, ctx);
446 /* Add room for head, round up to doubleword boundary */
447 size = (size + sizeof(ber_len_t) + Align-1) & -Align;
451 /* Never shrink blocks */
452 if (size <= oldsize) {
457 nextp = (ber_len_t *) ((char *) p + oldsize);
459 /* If reallocing the last block, try to grow it */
460 if (nextp == sh->sh_last) {
461 if (size < (ber_len_t) ((char *) sh->sh_end - (char *) p)) {
462 sh->sh_last = (char *) p + size;
463 p[0] = (p[0] & 1) | size;
467 /* Nowhere to grow, need to alloc and copy */
469 /* Slight optimization of the final realloc variant */
470 newptr = slap_sl_malloc(size-sizeof(ber_len_t), ctx);
471 AC_MEMCPY(newptr, ptr, oldsize-sizeof(ber_len_t));
472 /* Not last block, can just mark old region as free */
478 size -= sizeof(ber_len_t);
479 oldsize -= sizeof(ber_len_t);
481 } else if (oldsize > size) {
485 newptr = slap_sl_malloc(size, ctx);
486 AC_MEMCPY(newptr, ptr, oldsize);
487 slap_sl_free(ptr, ctx);
492 slap_sl_free(void *ptr, void *ctx)
494 struct slab_heap *sh = ctx;
496 ber_len_t *p = ptr, *nextp, *tmpp;
501 if (No_sl_malloc || !sh || ptr < sh->sh_base || ptr >= sh->sh_end) {
502 ber_memfree_x(ptr, NULL);
510 nextp = (ber_len_t *) ((char *) p + size);
511 if (sh->sh_last != nextp) {
512 /* Mark it free: tail = size, head of next block |= 1 */
515 /* We can't tell Valgrind about it yet, because we
516 * still need read/write access to this block for
517 * when we eventually get to reclaim it.
520 /* Reclaim freed block(s) off tail */
522 p = (ber_len_t *) ((char *) p - p[-1]);
525 VGMEMP_TRIM(sh, sh->sh_base,
526 (char *) sh->sh_last - (char *) sh->sh_base);
530 int size_shift, order_size;
531 struct slab_object *so;
533 int i, inserted = 0, order = -1;
535 size_shift = size + sizeof(ber_len_t) - 1;
538 } while (size_shift >>= 1);
540 for (i = order, tmpp = p; i <= sh->sh_maxorder; i++) {
541 order_size = 1 << (i+1);
542 diff = (unsigned long)((char*)tmpp - (char*)sh->sh_base) >> (i+1);
543 sh->sh_map[i-order_start][diff>>3] &= (~(1 << (diff & 0x7)));
544 if (diff == ((diff>>1)<<1)) {
545 if (!(sh->sh_map[i-order_start][(diff+1)>>3] &
546 (1<<((diff+1)&0x7)))) {
547 so = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
549 if ((char*)so->so_ptr == (char*)tmpp) {
550 LDAP_LIST_REMOVE( so, so_link );
551 } else if ((char*)so->so_ptr ==
552 (char*)tmpp + order_size) {
553 LDAP_LIST_REMOVE(so, so_link);
556 so = LDAP_LIST_NEXT(so, so_link);
559 if (i < sh->sh_maxorder) {
562 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start+1],
567 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
568 slap_replenish_sopool(sh);
570 so = LDAP_LIST_FIRST(&sh->sh_sopool);
571 LDAP_LIST_REMOVE(so, so_link);
573 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start],
577 Debug(LDAP_DEBUG_TRACE, "slap_sl_free: "
578 "free object not found while bit is clear.\n",
585 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
586 slap_replenish_sopool(sh);
588 so = LDAP_LIST_FIRST(&sh->sh_sopool);
589 LDAP_LIST_REMOVE(so, so_link);
591 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start],
597 if (!(sh->sh_map[i-order_start][(diff-1)>>3] &
598 (1<<((diff-1)&0x7)))) {
599 so = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
601 if ((char*)so->so_ptr == (char*)tmpp) {
602 LDAP_LIST_REMOVE(so, so_link);
603 } else if ((char*)tmpp == (char *)so->so_ptr + order_size) {
604 LDAP_LIST_REMOVE(so, so_link);
608 so = LDAP_LIST_NEXT(so, so_link);
611 if (i < sh->sh_maxorder) {
613 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start+1], so, so_link);
617 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
618 slap_replenish_sopool(sh);
620 so = LDAP_LIST_FIRST(&sh->sh_sopool);
621 LDAP_LIST_REMOVE(so, so_link);
623 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start],
627 Debug(LDAP_DEBUG_TRACE, "slap_sl_free: "
628 "free object not found while bit is clear.\n",
635 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
636 slap_replenish_sopool(sh);
638 so = LDAP_LIST_FIRST(&sh->sh_sopool);
639 LDAP_LIST_REMOVE(so, so_link);
641 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start],
652 * Return the memory context of the current thread if the given block of
653 * memory belongs to it, otherwise return NULL.
656 slap_sl_context( void *ptr )
659 struct slab_heap *sh;
661 if ( slapMode & SLAP_TOOL_MODE ) return NULL;
663 sh = GET_MEMCTX(ldap_pvt_thread_pool_context(), &memctx);
664 if (sh && ptr >= sh->sh_base && ptr <= sh->sh_end) {
670 static struct slab_object *
671 slap_replenish_sopool(
675 struct slab_object *so_block;
678 so_block = (struct slab_object *)ch_malloc(
679 SLAP_SLAB_SOBLOCK * sizeof(struct slab_object));
681 if ( so_block == NULL ) {
685 so_block[0].so_blockhead = 1;
686 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, &so_block[0], so_link);
687 for (i = 1; i < SLAP_SLAB_SOBLOCK; i++) {
688 so_block[i].so_blockhead = 0;
689 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, &so_block[i], so_link );
697 print_slheap(int level, void *ctx)
699 struct slab_heap *sh = ctx;
700 struct slab_object *so;
704 Debug(level, "NULL memctx\n", 0, 0, 0);
708 Debug(level, "sh->sh_maxorder=%d\n", sh->sh_maxorder, 0, 0);
710 for (i = order_start; i <= sh->sh_maxorder; i++) {
712 Debug(level, "order=%d\n", i, 0, 0);
713 for (j = 0; j < (1<<(sh->sh_maxorder-i))/8; j++) {
714 Debug(level, "%02x ", sh->sh_map[i-order_start][j], 0, 0);
718 Debug(level, "%02x ", sh->sh_map[i-order_start][0], 0, 0);
720 Debug(level, "\n", 0, 0, 0);
721 Debug(level, "free list:\n", 0, 0, 0);
722 so = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
724 Debug(level, "%p\n", so->so_ptr, 0, 0);
725 so = LDAP_LIST_NEXT(so, so_link);