X-Git-Url: https://git.sur5r.net/?a=blobdiff_plain;ds=sidebyside;f=common%2Fdlmalloc.c;h=2276532da7a8441c500c39b1716e23a2806a8c21;hb=201532a69cf7e7a84bff354fdab45947425d22b4;hp=c51351e9618ec92ce81ca93f6d6303b39a8ceadb;hpb=1730edf76c54381475e2da11f75b1ce563c4e62c;p=u-boot diff --git a/common/dlmalloc.c b/common/dlmalloc.c index c51351e961..2276532da7 100644 --- a/common/dlmalloc.c +++ b/common/dlmalloc.c @@ -1457,7 +1457,7 @@ typedef struct malloc_chunk* mbinptr; indexing, maintain locality, and avoid some initialization tests. */ -#define top (bin_at(0)->fd) /* The topmost chunk */ +#define top (av_[2]) /* The topmost chunk */ #define last_remainder (bin_at(1)) /* remainder from last split */ @@ -1494,6 +1494,7 @@ static mbinptr av_[NAV * 2 + 2] = { IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127) }; +#ifndef CONFIG_RELOC_FIXUP_WORKS void malloc_bin_reloc (void) { unsigned long *p = (unsigned long *)(&av_[2]); @@ -1502,7 +1503,33 @@ void malloc_bin_reloc (void) *p++ += gd->reloc_off; } } - +#endif + +ulong mem_malloc_start = 0; +ulong mem_malloc_end = 0; +ulong mem_malloc_brk = 0; + +void *sbrk(ptrdiff_t increment) +{ + ulong old = mem_malloc_brk; + ulong new = old + increment; + + if ((new < mem_malloc_start) || (new > mem_malloc_end)) + return (void *)MORECORE_FAILURE; + + mem_malloc_brk = new; + + return (void *)old; +} + +void mem_malloc_init(ulong start, ulong size) +{ + mem_malloc_start = start; + mem_malloc_end = start + size; + mem_malloc_brk = start; + + memset((void *)mem_malloc_start, 0, size); +} /* field-extraction macros */ @@ -1552,13 +1579,14 @@ void malloc_bin_reloc (void) #define BINBLOCKWIDTH 4 /* bins per block */ -#define binblocks (bin_at(0)->size) /* bitvector of nonempty blocks */ +#define binblocks_r ((INTERNAL_SIZE_T)av_[1]) /* bitvector of nonempty blocks */ +#define binblocks_w (av_[1]) /* bin<->block macros */ #define idx2binblock(ix) ((unsigned)1 << (ix / BINBLOCKWIDTH)) -#define mark_binblock(ii) (binblocks |= idx2binblock(ii)) -#define clear_binblock(ii) (binblocks &= ~(idx2binblock(ii))) +#define mark_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r | idx2binblock(ii))) +#define clear_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r & ~(idx2binblock(ii)))) @@ -2151,6 +2179,12 @@ Void_t* mALLOc(bytes) size_t bytes; INTERNAL_SIZE_T nb; + /* check if mem_malloc_init() was run */ + if ((mem_malloc_start == 0) && (mem_malloc_end == 0)) { + /* not initialized yet */ + return 0; + } + if ((long)bytes < 0) return 0; nb = request2size(bytes); /* padded request size; */ @@ -2250,17 +2284,17 @@ Void_t* mALLOc(bytes) size_t bytes; search for best fitting chunk by scanning bins in blockwidth units. */ - if ( (block = idx2binblock(idx)) <= binblocks) + if ( (block = idx2binblock(idx)) <= binblocks_r) { /* Get to the first marked block */ - if ( (block & binblocks) == 0) + if ( (block & binblocks_r) == 0) { /* force to an even block boundary */ idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH; block <<= 1; - while ((block & binblocks) == 0) + while ((block & binblocks_r) == 0) { idx += BINBLOCKWIDTH; block <<= 1; @@ -2315,7 +2349,7 @@ Void_t* mALLOc(bytes) size_t bytes; { if ((startidx & (BINBLOCKWIDTH - 1)) == 0) { - binblocks &= ~block; + av_[1] = (mbinptr)(binblocks_r & ~block); break; } --startidx; @@ -2324,9 +2358,9 @@ Void_t* mALLOc(bytes) size_t bytes; /* Get to the next possibly nonempty block */ - if ( (block <<= 1) <= binblocks && (block != 0) ) + if ( (block <<= 1) <= binblocks_r && (block != 0) ) { - while ((block & binblocks) == 0) + while ((block & binblocks_r) == 0) { idx += BINBLOCKWIDTH; block <<= 1;