X-Git-Url: https://git.sur5r.net/?a=blobdiff_plain;f=servers%2Fslapd%2Fsl_malloc.c;h=b4e0ac614680e6d91cc887e7baa55e8aef32998b;hb=68ab1a2272b21d43d14db7e5dfe9cef3345b1310;hp=0adb1133df48d5812370489f1ac05b23b4f31132;hpb=30f401c62898426664d71518e4d176293f64ebc7;p=openldap diff --git a/servers/slapd/sl_malloc.c b/servers/slapd/sl_malloc.c index 0adb1133df..b4e0ac6146 100644 --- a/servers/slapd/sl_malloc.c +++ b/servers/slapd/sl_malloc.c @@ -2,7 +2,7 @@ /* $OpenLDAP$ */ /* This work is part of OpenLDAP Software . * - * Copyright 2003-2007 The OpenLDAP Foundation. + * Copyright 2003-2009 The OpenLDAP Foundation. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -90,6 +90,14 @@ slap_sl_mem_init() static struct slab_heap *slheap; #endif +/* This allocator always returns memory aligned on a 2-int boundary. + * + * The stack-based allocator stores the size as a ber_len_t at both + * the head and tail of the allocated block. When freeing a block, the + * tail length is ORed with 1 to mark it as free. Freed space can only + * be reclaimed from the tail forward. If the tail block is never freed, + * nothing else will be reclaimed until the slab is reset... + */ void * slap_sl_mem_create( ber_len_t size, @@ -114,7 +122,7 @@ slap_sl_mem_create( sh = sh_tmp; #endif - if ( !new ) + if ( sh && !new ) return sh; /* round up to doubleword boundary */ @@ -138,7 +146,12 @@ slap_sl_mem_create( if ( newptr == NULL ) return NULL; sh->sh_base = newptr; } - sh->sh_last = sh->sh_base; + /* insert dummy len */ + { + ber_len_t *i = sh->sh_base; + *i++ = 0; + sh->sh_last = i; + } sh->sh_end = (char *) sh->sh_base + size; sh->sh_stack = stack; return sh; @@ -195,7 +208,7 @@ slap_sl_mem_create( if (size > (char *)sh->sh_end - (char *)sh->sh_base) { void *newptr; - newptr = realloc( sh->sh_base, size ); + newptr = ch_realloc( sh->sh_base, size ); if ( newptr == NULL ) return NULL; sh->sh_base = newptr; } @@ -246,7 +259,8 @@ slap_sl_mem_detach( slheap = NULL; #else /* separate from context */ - ldap_pvt_thread_pool_setkey( ctx, (void *)slap_sl_mem_init, NULL, NULL, NULL, NULL ); + ldap_pvt_thread_pool_setkey( ctx, (void *)slap_sl_mem_init, + NULL, 0, NULL, NULL ); #endif } @@ -257,23 +271,26 @@ slap_sl_malloc( ) { struct slab_heap *sh = ctx; - ber_len_t size_shift; int pad = 2*sizeof(int)-1, pad_shift; - int order = -1, order_start = -1; - struct slab_object *so_new, *so_left, *so_right; ber_len_t *ptr, *newptr; - unsigned long diff; - int i, j; #ifdef SLAP_NO_SL_MALLOC - return ber_memalloc_x( size, NULL ); + newptr = ber_memalloc_x( size, NULL ); + if ( newptr ) return newptr; + assert( 0 ); + exit( EXIT_FAILURE ); #endif /* ber_set_option calls us like this */ - if (!ctx) return ber_memalloc_x(size, NULL); + if (!ctx) { + newptr = ber_memalloc_x( size, NULL ); + if ( newptr ) return newptr; + assert( 0 ); + exit( EXIT_FAILURE ); + } - /* round up to doubleword boundary */ - size += sizeof(ber_len_t) + pad; + /* round up to doubleword boundary, plus space for len at head and tail */ + size += 2*sizeof(ber_len_t) + pad; size &= ~pad; if (sh->sh_stack) { @@ -284,10 +301,18 @@ slap_sl_malloc( return ch_malloc(size); } newptr = sh->sh_last; - *newptr++ = size - sizeof(ber_len_t); sh->sh_last = (char *) sh->sh_last + size; + size -= sizeof(ber_len_t); + *newptr++ = size; + *(ber_len_t *)((char *)sh->sh_last - sizeof(ber_len_t)) = size; return( (void *)newptr ); } else { + struct slab_object *so_new, *so_left, *so_right; + ber_len_t size_shift; + int order = -1, order_start = -1; + unsigned long diff; + int i, j; + size_shift = size - 1; do { order++; @@ -374,7 +399,10 @@ slap_sl_realloc(void *ptr, ber_len_t size, void *ctx) return slap_sl_malloc(size, ctx); #ifdef SLAP_NO_SL_MALLOC - return ber_memrealloc_x( ptr, size, NULL ); + newptr = ber_memrealloc_x( ptr, size, NULL ); + if ( newptr ) return newptr; + assert( 0 ); + exit( EXIT_FAILURE ); #endif /* Not our memory? */ @@ -400,21 +428,26 @@ slap_sl_realloc(void *ptr, ber_len_t size, void *ctx) size += pad + sizeof( ber_len_t ); size &= ~pad; + p--; + /* Never shrink blocks */ - if (size <= p[-1]) { - newptr = p; + if (size <= p[0]) { + newptr = ptr; /* If reallocing the last block, we can grow it */ - } else if ((char *)ptr + p[-1] == sh->sh_last && + } else if ((char *)ptr + p[0] == sh->sh_last && (char *)ptr + size < (char *)sh->sh_end ) { - newptr = p; - sh->sh_last = (char *)sh->sh_last + size - p[-1]; - p[-1] = size; - + newptr = ptr; + sh->sh_last = (char *)ptr + size; + p[0] = size; + p[size/sizeof(ber_len_t)] = size; + /* Nowhere to grow, need to alloc and copy */ } else { - newptr = slap_sl_malloc(size, ctx); - AC_MEMCPY(newptr, ptr, p[-1]); + newptr = slap_sl_malloc(size-sizeof(ber_len_t), ctx); + AC_MEMCPY(newptr, ptr, p[0]-sizeof(ber_len_t)); + /* mark old region as free */ + p[p[0]/sizeof(ber_len_t)] |= 1; } return newptr; } else { @@ -435,13 +468,8 @@ void slap_sl_free(void *ptr, void *ctx) { struct slab_heap *sh = ctx; - int size, size_shift, order_size; - int pad = 2*sizeof(int)-1, pad_shift; + ber_len_t size; ber_len_t *p = (ber_len_t *)ptr, *tmpp; - int order_start = -1, order = -1; - struct slab_object *so; - unsigned long diff; - int i, inserted = 0; if (!ptr) return; @@ -453,10 +481,31 @@ slap_sl_free(void *ptr, void *ctx) if (!sh || ptr < sh->sh_base || ptr >= sh->sh_end) { ber_memfree_x(ptr, NULL); - } else if (sh->sh_stack && (char *)ptr + p[-1] == sh->sh_last) { - p--; - sh->sh_last = p; - } else if (!sh->sh_stack) { + } else if (sh->sh_stack) { + tmpp = (ber_len_t *)((char *)ptr + p[-1]); + /* mark it free */ + tmpp[-1] |= 1; + /* reclaim free space off tail */ + while ( tmpp == sh->sh_last ) { + if ( tmpp[-1] & 1 ) { + size = tmpp[-1] ^ 1; + ptr = (char *)tmpp - size; + p = (ber_len_t *)ptr; + p--; + sh->sh_last = p; + tmpp = sh->sh_last; + } else { + break; + } + } + } else { + int size_shift, order_size; + int pad = 2*sizeof(int)-1, pad_shift; + int order_start = -1, order = -1; + struct slab_object *so; + unsigned long diff; + int i, inserted = 0; + size = *(--p); size_shift = size + sizeof(ber_len_t) - 1; do {