1 /* sl_malloc.c - malloc routines using a per-thread slab */
3 /* This work is part of OpenLDAP Software <http://www.openldap.org/>.
5 * Copyright 2003-2015 The OpenLDAP Foundation.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted only as authorized by the OpenLDAP
12 * A copy of this license is available in the file LICENSE in the
13 * top-level directory of the distribution or, alternatively, at
14 * <http://www.OpenLDAP.org/license.html>.
20 #include <ac/string.h>
25 /* Get debugging help from Valgrind */
26 #include <valgrind/memcheck.h>
27 #define VGMEMP_MARK(m,s) VALGRIND_MAKE_MEM_NOACCESS(m,s)
28 #define VGMEMP_CREATE(h,r,z) VALGRIND_CREATE_MEMPOOL(h,r,z)
29 #define VGMEMP_TRIM(h,a,s) VALGRIND_MEMPOOL_TRIM(h,a,s)
30 #define VGMEMP_ALLOC(h,a,s) VALGRIND_MEMPOOL_ALLOC(h,a,s)
31 #define VGMEMP_CHANGE(h,a,b,s) VALGRIND_MEMPOOL_CHANGE(h,a,b,s)
33 #define VGMEMP_MARK(m,s)
34 #define VGMEMP_CREATE(h,r,z)
35 #define VGMEMP_TRIM(h,a,s)
36 #define VGMEMP_ALLOC(h,a,s)
37 #define VGMEMP_CHANGE(h,a,b,s)
41 * This allocator returns temporary memory from a slab in a given memory
42 * context, aligned on a 2-int boundary. It cannot be used for data
43 * which will outlive the task allocating it.
45 * A new memory context attaches to the creator's thread context, if any.
46 * Threads cannot use other threads' memory contexts; there are no locks.
48 * The caller of slap_sl_malloc, usually a thread pool task, must
49 * slap_sl_free the memory before finishing: New tasks reuse the context
50 * and normally reset it, reclaiming memory left over from last task.
52 * The allocator helps memory fragmentation, speed and memory leaks.
53 * It is not (yet) reliable as a garbage collector:
55 * It falls back to context NULL - plain ber_memalloc() - when the
56 * context's slab is full. A reset does not reclaim such memory.
57 * Conversely, free/realloc of data not from the given context assumes
58 * context NULL. The data must not belong to another memory context.
60 * Code which has lost track of the current memory context can try
61 * slap_sl_context() or ch_malloc.c:ch_free/ch_realloc().
63 * Allocations cannot yet return failure. Like ch_malloc, they succeed
64 * or abort slapd. This will change, do fix code which assumes success.
68 * The stack-based allocator stores (ber_len_t)sizeof(head+block) at
69 * allocated blocks' head - and in freed blocks also at the tail, marked
70 * by ORing *next* block's head with 1. Freed blocks are only reclaimed
71 * from the last block forward. This is fast, but when a block is never
72 * freed, older blocks will not be reclaimed until the slab is reset...
75 #ifdef SLAP_NO_SL_MALLOC /* Useful with memory debuggers like Valgrind */
76 enum { No_sl_malloc = 1 };
78 enum { No_sl_malloc = 0 };
81 #define SLAP_SLAB_SOBLOCK 64
86 LDAP_LIST_ENTRY(slab_object) so_link;
95 unsigned char **sh_map;
96 LDAP_LIST_HEAD(sh_freelist, slab_object) *sh_free;
97 LDAP_LIST_HEAD(sh_so, slab_object) sh_sopool;
101 Align = sizeof(ber_len_t) > 2*sizeof(int)
102 ? sizeof(ber_len_t) : 2*sizeof(int),
103 Align_log2 = 1 + (Align>2) + (Align>4) + (Align>8) + (Align>16),
104 order_start = Align_log2 - 1,
108 static struct slab_object * slap_replenish_sopool(struct slab_heap* sh);
110 static void print_slheap(int level, void *ctx);
113 /* Keep memory context in a thread-local var, or in a global when no threads */
115 static struct slab_heap *slheap;
116 # define SET_MEMCTX(thrctx, memctx, sfree) ((void) (slheap = (memctx)))
117 # define GET_MEMCTX(thrctx, memctxp) (*(memctxp) = slheap)
119 # define memctx_key ((void *) slap_sl_mem_init)
120 # define SET_MEMCTX(thrctx, memctx, kfree) \
121 ldap_pvt_thread_pool_setkey(thrctx,memctx_key, memctx,kfree, NULL,NULL)
122 # define GET_MEMCTX(thrctx, memctxp) \
123 ((void) (*(memctxp) = NULL), \
124 (void) ldap_pvt_thread_pool_getkey(thrctx,memctx_key, memctxp,NULL), \
126 #endif /* NO_THREADS */
129 /* Destroy the context, or if key==NULL clean it up for reuse. */
136 struct slab_heap *sh = data;
137 struct slab_object *so;
141 for (i = 0; i <= sh->sh_maxorder - order_start; i++) {
142 so = LDAP_LIST_FIRST(&sh->sh_free[i]);
144 struct slab_object *so_tmp = so;
145 so = LDAP_LIST_NEXT(so, so_link);
146 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, so_tmp, so_link);
148 ch_free(sh->sh_map[i]);
150 ch_free(sh->sh_free);
153 so = LDAP_LIST_FIRST(&sh->sh_sopool);
155 struct slab_object *so_tmp = so;
156 so = LDAP_LIST_NEXT(so, so_link);
157 if (!so_tmp->so_blockhead) {
158 LDAP_LIST_REMOVE(so_tmp, so_link);
161 so = LDAP_LIST_FIRST(&sh->sh_sopool);
163 struct slab_object *so_tmp = so;
164 so = LDAP_LIST_NEXT(so, so_link);
170 ber_memfree_x(sh->sh_base, NULL);
171 ber_memfree_x(sh, NULL);
175 BerMemoryFunctions slap_sl_mfuncs =
176 { slap_sl_malloc, slap_sl_calloc, slap_sl_realloc, slap_sl_free };
181 assert( Align == 1 << Align_log2 );
183 ber_set_option( NULL, LBER_OPT_MEMORY_FNS, &slap_sl_mfuncs );
186 /* Create, reset or just return the memory context of the current thread. */
196 struct slab_heap *sh;
197 ber_len_t size_shift;
198 struct slab_object *so;
200 enum { Base_offset = (unsigned) -sizeof(ber_len_t) % Align };
202 sh = GET_MEMCTX(thrctx, &memctx);
206 /* Round up to doubleword boundary, then make room for initial
207 * padding, preserving expected available size for pool version */
208 size = ((size + Align-1) & -Align) + Base_offset;
211 sh = ch_malloc(sizeof(struct slab_heap));
212 base = ch_malloc(size);
213 SET_MEMCTX(thrctx, sh, slap_sl_mem_destroy);
214 VGMEMP_MARK(base, size);
215 VGMEMP_CREATE(sh, 0, 0);
217 slap_sl_mem_destroy(NULL, sh);
219 if (size > (ber_len_t) ((char *) sh->sh_end - base)) {
220 newptr = ch_realloc(base, size);
221 if ( newptr == NULL ) return NULL;
222 VGMEMP_CHANGE(sh, base, newptr, size);
225 VGMEMP_TRIM(sh, base, 0);
228 sh->sh_end = base + size;
230 /* Align (base + head of first block) == first returned block */
234 sh->sh_stack = stack;
239 int i, order = -1, order_end = -1;
241 size_shift = size - 1;
244 } while (size_shift >>= 1);
245 order = order_end - order_start + 1;
246 sh->sh_maxorder = order_end;
248 sh->sh_free = (struct sh_freelist *)
249 ch_malloc(order * sizeof(struct sh_freelist));
250 for (i = 0; i < order; i++) {
251 LDAP_LIST_INIT(&sh->sh_free[i]);
254 LDAP_LIST_INIT(&sh->sh_sopool);
256 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
257 slap_replenish_sopool(sh);
259 so = LDAP_LIST_FIRST(&sh->sh_sopool);
260 LDAP_LIST_REMOVE(so, so_link);
263 LDAP_LIST_INSERT_HEAD(&sh->sh_free[order-1], so, so_link);
265 sh->sh_map = (unsigned char **)
266 ch_malloc(order * sizeof(unsigned char *));
267 for (i = 0; i < order; i++) {
268 int shiftamt = order_start + 1 + i;
269 int nummaps = size >> shiftamt;
272 if (!nummaps) nummaps = 1;
273 sh->sh_map[i] = (unsigned char *) ch_malloc(nummaps);
274 memset(sh->sh_map[i], 0, nummaps);
282 * Assign memory context to thread context. Use NULL to detach
283 * current memory context from thread. Future users must
284 * know the context, since ch_free/slap_sl_context() cannot find it.
292 SET_MEMCTX(thrctx, memctx, slap_sl_mem_destroy);
301 struct slab_heap *sh = ctx;
302 ber_len_t *ptr, *newptr;
304 /* ber_set_option calls us like this */
305 if (No_sl_malloc || !ctx) {
306 newptr = ber_memalloc_x( size, NULL );
307 if ( newptr ) return newptr;
308 Debug(LDAP_DEBUG_ANY, "slap_sl_malloc of %lu bytes failed\n",
309 (unsigned long) size, 0, 0);
311 exit( EXIT_FAILURE );
314 /* Add room for head, ensure room for tail when freed, and
315 * round up to doubleword boundary. */
316 size = (size + sizeof(ber_len_t) + Align-1 + !size) & -Align;
319 if (size < (ber_len_t) ((char *) sh->sh_end - (char *) sh->sh_last)) {
320 newptr = sh->sh_last;
321 sh->sh_last = (char *) sh->sh_last + size;
322 VGMEMP_ALLOC(sh, newptr, size);
324 return( (void *)newptr );
327 size -= sizeof(ber_len_t);
330 struct slab_object *so_new, *so_left, *so_right;
331 ber_len_t size_shift;
333 int i, j, order = -1;
335 size_shift = size - 1;
338 } while (size_shift >>= 1);
340 size -= sizeof(ber_len_t);
342 for (i = order; i <= sh->sh_maxorder &&
343 LDAP_LIST_EMPTY(&sh->sh_free[i-order_start]); i++);
346 so_new = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
347 LDAP_LIST_REMOVE(so_new, so_link);
348 ptr = so_new->so_ptr;
349 diff = (unsigned long)((char*)ptr -
350 (char*)sh->sh_base) >> (order + 1);
351 sh->sh_map[order-order_start][diff>>3] |= (1 << (diff & 0x7));
353 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, so_new, so_link);
355 } else if (i <= sh->sh_maxorder) {
356 for (j = i; j > order; j--) {
357 so_left = LDAP_LIST_FIRST(&sh->sh_free[j-order_start]);
358 LDAP_LIST_REMOVE(so_left, so_link);
359 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
360 slap_replenish_sopool(sh);
362 so_right = LDAP_LIST_FIRST(&sh->sh_sopool);
363 LDAP_LIST_REMOVE(so_right, so_link);
364 so_right->so_ptr = (void *)((char *)so_left->so_ptr + (1 << j));
365 if (j == order + 1) {
366 ptr = so_left->so_ptr;
367 diff = (unsigned long)((char*)ptr -
368 (char*)sh->sh_base) >> (order+1);
369 sh->sh_map[order-order_start][diff>>3] |=
372 LDAP_LIST_INSERT_HEAD(
373 &sh->sh_free[j-1-order_start], so_right, so_link);
374 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, so_left, so_link);
377 LDAP_LIST_INSERT_HEAD(
378 &sh->sh_free[j-1-order_start], so_right, so_link);
379 LDAP_LIST_INSERT_HEAD(
380 &sh->sh_free[j-1-order_start], so_left, so_link);
384 /* FIXME: missing return; guessing we failed... */
387 Debug(LDAP_DEBUG_TRACE,
388 "sl_malloc %lu: ch_malloc\n",
389 (unsigned long) size, 0, 0);
390 return ch_malloc(size);
393 #define LIM_SQRT(t) /* some value < sqrt(max value of unsigned type t) */ \
394 ((0UL|(t)-1) >>31>>31 > 1 ? ((t)1 <<32) - 1 : \
395 (0UL|(t)-1) >>31 ? 65535U : (0UL|(t)-1) >>15 ? 255U : 15U)
398 slap_sl_calloc( ber_len_t n, ber_len_t size, void *ctx )
401 ber_len_t total = n * size;
403 /* The sqrt test is a slight optimization: often avoids the division */
404 if ((n | size) <= LIM_SQRT(ber_len_t) || n == 0 || total/n == size) {
405 newptr = slap_sl_malloc( total, ctx );
406 memset( newptr, 0, n*size );
408 Debug(LDAP_DEBUG_ANY, "slap_sl_calloc(%lu,%lu) out of range\n",
409 (unsigned long) n, (unsigned long) size, 0);
417 slap_sl_realloc(void *ptr, ber_len_t size, void *ctx)
419 struct slab_heap *sh = ctx;
420 ber_len_t oldsize, *p = (ber_len_t *) ptr, *nextp;
424 return slap_sl_malloc(size, ctx);
426 /* Not our memory? */
427 if (No_sl_malloc || !sh || ptr < sh->sh_base || ptr >= sh->sh_end) {
428 /* Like ch_realloc(), except not trying a new context */
429 newptr = ber_memrealloc_x(ptr, size, NULL);
433 Debug(LDAP_DEBUG_ANY, "slap_sl_realloc of %lu bytes failed\n",
434 (unsigned long) size, 0, 0);
436 exit( EXIT_FAILURE );
440 slap_sl_free(ptr, ctx);
447 /* Add room for head, round up to doubleword boundary */
448 size = (size + sizeof(ber_len_t) + Align-1) & -Align;
452 /* Never shrink blocks */
453 if (size <= oldsize) {
458 nextp = (ber_len_t *) ((char *) p + oldsize);
460 /* If reallocing the last block, try to grow it */
461 if (nextp == sh->sh_last) {
462 if (size < (ber_len_t) ((char *) sh->sh_end - (char *) p)) {
463 sh->sh_last = (char *) p + size;
464 p[0] = (p[0] & 1) | size;
468 /* Nowhere to grow, need to alloc and copy */
470 /* Slight optimization of the final realloc variant */
471 newptr = slap_sl_malloc(size-sizeof(ber_len_t), ctx);
472 AC_MEMCPY(newptr, ptr, oldsize-sizeof(ber_len_t));
473 /* Not last block, can just mark old region as free */
479 size -= sizeof(ber_len_t);
480 oldsize -= sizeof(ber_len_t);
482 } else if (oldsize > size) {
486 newptr = slap_sl_malloc(size, ctx);
487 AC_MEMCPY(newptr, ptr, oldsize);
488 slap_sl_free(ptr, ctx);
493 slap_sl_free(void *ptr, void *ctx)
495 struct slab_heap *sh = ctx;
497 ber_len_t *p = ptr, *nextp, *tmpp;
502 if (No_sl_malloc || !sh || ptr < sh->sh_base || ptr >= sh->sh_end) {
503 ber_memfree_x(ptr, NULL);
511 nextp = (ber_len_t *) ((char *) p + size);
512 if (sh->sh_last != nextp) {
513 /* Mark it free: tail = size, head of next block |= 1 */
516 /* We can't tell Valgrind about it yet, because we
517 * still need read/write access to this block for
518 * when we eventually get to reclaim it.
521 /* Reclaim freed block(s) off tail */
523 p = (ber_len_t *) ((char *) p - p[-1]);
526 VGMEMP_TRIM(sh, sh->sh_base,
527 (char *) sh->sh_last - (char *) sh->sh_base);
531 int size_shift, order_size;
532 struct slab_object *so;
534 int i, inserted = 0, order = -1;
536 size_shift = size + sizeof(ber_len_t) - 1;
539 } while (size_shift >>= 1);
541 for (i = order, tmpp = p; i <= sh->sh_maxorder; i++) {
542 order_size = 1 << (i+1);
543 diff = (unsigned long)((char*)tmpp - (char*)sh->sh_base) >> (i+1);
544 sh->sh_map[i-order_start][diff>>3] &= (~(1 << (diff & 0x7)));
545 if (diff == ((diff>>1)<<1)) {
546 if (!(sh->sh_map[i-order_start][(diff+1)>>3] &
547 (1<<((diff+1)&0x7)))) {
548 so = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
550 if ((char*)so->so_ptr == (char*)tmpp) {
551 LDAP_LIST_REMOVE( so, so_link );
552 } else if ((char*)so->so_ptr ==
553 (char*)tmpp + order_size) {
554 LDAP_LIST_REMOVE(so, so_link);
557 so = LDAP_LIST_NEXT(so, so_link);
560 if (i < sh->sh_maxorder) {
563 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start+1],
568 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
569 slap_replenish_sopool(sh);
571 so = LDAP_LIST_FIRST(&sh->sh_sopool);
572 LDAP_LIST_REMOVE(so, so_link);
574 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start],
578 Debug(LDAP_DEBUG_TRACE, "slap_sl_free: "
579 "free object not found while bit is clear.\n",
586 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
587 slap_replenish_sopool(sh);
589 so = LDAP_LIST_FIRST(&sh->sh_sopool);
590 LDAP_LIST_REMOVE(so, so_link);
592 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start],
598 if (!(sh->sh_map[i-order_start][(diff-1)>>3] &
599 (1<<((diff-1)&0x7)))) {
600 so = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
602 if ((char*)so->so_ptr == (char*)tmpp) {
603 LDAP_LIST_REMOVE(so, so_link);
604 } else if ((char*)tmpp == (char *)so->so_ptr + order_size) {
605 LDAP_LIST_REMOVE(so, so_link);
609 so = LDAP_LIST_NEXT(so, so_link);
612 if (i < sh->sh_maxorder) {
614 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start+1], so, so_link);
618 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
619 slap_replenish_sopool(sh);
621 so = LDAP_LIST_FIRST(&sh->sh_sopool);
622 LDAP_LIST_REMOVE(so, so_link);
624 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start],
628 Debug(LDAP_DEBUG_TRACE, "slap_sl_free: "
629 "free object not found while bit is clear.\n",
636 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
637 slap_replenish_sopool(sh);
639 so = LDAP_LIST_FIRST(&sh->sh_sopool);
640 LDAP_LIST_REMOVE(so, so_link);
642 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start],
653 * Return the memory context of the current thread if the given block of
654 * memory belongs to it, otherwise return NULL.
657 slap_sl_context( void *ptr )
660 struct slab_heap *sh;
662 if ( slapMode & SLAP_TOOL_MODE ) return NULL;
664 sh = GET_MEMCTX(ldap_pvt_thread_pool_context(), &memctx);
665 if (sh && ptr >= sh->sh_base && ptr <= sh->sh_end) {
671 static struct slab_object *
672 slap_replenish_sopool(
676 struct slab_object *so_block;
679 so_block = (struct slab_object *)ch_malloc(
680 SLAP_SLAB_SOBLOCK * sizeof(struct slab_object));
682 if ( so_block == NULL ) {
686 so_block[0].so_blockhead = 1;
687 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, &so_block[0], so_link);
688 for (i = 1; i < SLAP_SLAB_SOBLOCK; i++) {
689 so_block[i].so_blockhead = 0;
690 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, &so_block[i], so_link );
698 print_slheap(int level, void *ctx)
700 struct slab_heap *sh = ctx;
701 struct slab_object *so;
705 Debug(level, "NULL memctx\n", 0, 0, 0);
709 Debug(level, "sh->sh_maxorder=%d\n", sh->sh_maxorder, 0, 0);
711 for (i = order_start; i <= sh->sh_maxorder; i++) {
713 Debug(level, "order=%d\n", i, 0, 0);
714 for (j = 0; j < (1<<(sh->sh_maxorder-i))/8; j++) {
715 Debug(level, "%02x ", sh->sh_map[i-order_start][j], 0, 0);
719 Debug(level, "%02x ", sh->sh_map[i-order_start][0], 0, 0);
721 Debug(level, "\n", 0, 0, 0);
722 Debug(level, "free list:\n", 0, 0, 0);
723 so = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
725 Debug(level, "%p\n", so->so_ptr, 0, 0);
726 so = LDAP_LIST_NEXT(so, so_link);