3 * Dynamic memory manager
\r
8 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
\r
9 * All rights reserved.
\r
11 * Redistribution and use in source and binary forms, with or without modification,
\r
12 * are permitted provided that the following conditions are met:
\r
14 * 1. Redistributions of source code must retain the above copyright notice,
\r
15 * this list of conditions and the following disclaimer.
\r
16 * 2. Redistributions in binary form must reproduce the above copyright notice,
\r
17 * this list of conditions and the following disclaimer in the documentation
\r
18 * and/or other materials provided with the distribution.
\r
19 * 3. The name of the author may not be used to endorse or promote products
\r
20 * derived from this software without specific prior written permission.
\r
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
\r
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
\r
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
\r
25 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
\r
26 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
\r
27 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
\r
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
\r
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
\r
30 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
\r
33 * This file is part of the lwIP TCP/IP stack.
\r
35 * Author: Adam Dunkels <adam@sics.se>
\r
41 #include "lwip/arch.h"
\r
42 #include "lwip/opt.h"
\r
43 #include "lwip/def.h"
\r
44 #include "lwip/mem.h"
\r
46 #include "lwip/sys.h"
\r
48 #include "lwip/stats.h"
\r
50 #if (MEM_LIBC_MALLOC == 0)
\r
51 /* lwIP replacement for your libc malloc() */
\r
54 mem_size_t next, prev;
\r
55 #if MEM_ALIGNMENT == 1
\r
57 #elif MEM_ALIGNMENT == 2
\r
59 #elif MEM_ALIGNMENT == 4
\r
61 #elif MEM_ALIGNMENT == 8
\r
64 #error "unhandled MEM_ALIGNMENT size"
\r
65 #endif /* MEM_ALIGNMENT */
\r
68 static struct mem *ram_end;
\r
71 static u8_t ram[MEM_SIZE + sizeof(struct mem) + MEM_ALIGNMENT];
\r
73 /* Christiaan alignment fix */
\r
75 static struct mem ram_heap[1 + ( (MEM_SIZE + sizeof(struct mem) - 1) / sizeof(struct mem))];
\r
79 #if 0 /* this one does not align correctly for some, resulting in crashes */
\r
80 #define SIZEOF_STRUCT_MEM (unsigned int)MEM_ALIGN_SIZE(sizeof(struct mem))
\r
82 #define SIZEOF_STRUCT_MEM (sizeof(struct mem) + \
\r
83 (((sizeof(struct mem) % MEM_ALIGNMENT) == 0)? 0 : \
\r
84 (4 - (sizeof(struct mem) % MEM_ALIGNMENT))))
\r
87 static struct mem *lfree; /* pointer to the lowest free block */
\r
89 static sys_sem_t mem_sem;
\r
92 plug_holes(struct mem *mem)
\r
97 LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram);
\r
98 LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end);
\r
99 LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0);
\r
101 /* plug hole forward */
\r
102 LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE", mem->next <= MEM_SIZE);
\r
104 nmem = (struct mem *)&ram[mem->next];
\r
105 if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) {
\r
106 if (lfree == nmem) {
\r
109 mem->next = nmem->next;
\r
110 ((struct mem *)&ram[nmem->next])->prev = (u8_t *)mem - ram;
\r
113 /* plug hole backward */
\r
114 pmem = (struct mem *)&ram[mem->prev];
\r
115 if (pmem != mem && pmem->used == 0) {
\r
116 if (lfree == mem) {
\r
119 pmem->next = mem->next;
\r
120 ((struct mem *)&ram[mem->next])->prev = (u8_t *)pmem - ram;
\r
130 /* Adam original */
\r
132 /* Christiaan alignment fix */
\r
133 ram = (u8_t*)ram_heap;
\r
135 memset(ram, 0, MEM_SIZE);
\r
136 mem = (struct mem *)ram;
\r
137 mem->next = MEM_SIZE;
\r
140 ram_end = (struct mem *)&ram[MEM_SIZE];
\r
142 ram_end->next = MEM_SIZE;
\r
143 ram_end->prev = MEM_SIZE;
\r
145 mem_sem = sys_sem_new(1);
\r
147 lfree = (struct mem *)ram;
\r
150 lwip_stats.mem.avail = MEM_SIZE;
\r
151 #endif /* MEM_STATS */
\r
155 mem_free(void *rmem)
\r
159 if (rmem == NULL) {
\r
160 LWIP_DEBUGF(MEM_DEBUG | DBG_TRACE | 2, ("mem_free(p == NULL) was called.\n"));
\r
164 sys_sem_wait(mem_sem);
\r
166 LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
\r
167 (u8_t *)rmem < (u8_t *)ram_end);
\r
169 if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
\r
170 LWIP_DEBUGF(MEM_DEBUG | 3, ("mem_free: illegal memory\n"));
\r
172 ++lwip_stats.mem.err;
\r
173 #endif /* MEM_STATS */
\r
174 sys_sem_signal(mem_sem);
\r
177 mem = (struct mem *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
\r
179 LWIP_ASSERT("mem_free: mem->used", mem->used);
\r
188 lwip_stats.mem.used -= mem->next - ((u8_t *)mem - ram);
\r
190 #endif /* MEM_STATS */
\r
192 sys_sem_signal(mem_sem);
\r
196 mem_realloc(void *rmem, mem_size_t newsize)
\r
199 mem_size_t ptr, ptr2;
\r
200 struct mem *mem, *mem2;
\r
202 /* Expand the size of the allocated memory region so that we can
\r
203 adjust for alignment. */
\r
204 if ((newsize % MEM_ALIGNMENT) != 0) {
\r
205 newsize += MEM_ALIGNMENT - ((newsize + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT);
\r
208 if (newsize > MEM_SIZE) {
\r
212 sys_sem_wait(mem_sem);
\r
214 LWIP_ASSERT("mem_realloc: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
\r
215 (u8_t *)rmem < (u8_t *)ram_end);
\r
217 if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
\r
218 LWIP_DEBUGF(MEM_DEBUG | 3, ("mem_realloc: illegal memory\n"));
\r
221 mem = (struct mem *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
\r
223 ptr = (u8_t *)mem - ram;
\r
225 size = mem->next - ptr - SIZEOF_STRUCT_MEM;
\r
227 lwip_stats.mem.used -= (size - newsize);
\r
228 #endif /* MEM_STATS */
\r
230 if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE < size) {
\r
231 ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
\r
232 mem2 = (struct mem *)&ram[ptr2];
\r
234 mem2->next = mem->next;
\r
237 if (mem2->next != MEM_SIZE) {
\r
238 ((struct mem *)&ram[mem2->next])->prev = ptr2;
\r
243 sys_sem_signal(mem_sem);
\r
249 * Adam's mem_malloc(), suffers from bug #17922
\r
250 * Set if to 0 for alternative mem_malloc().
\r
253 mem_malloc(mem_size_t size)
\r
255 mem_size_t ptr, ptr2;
\r
256 struct mem *mem, *mem2;
\r
262 /* Expand the size of the allocated memory region so that we can
\r
263 adjust for alignment. */
\r
264 if ((size % MEM_ALIGNMENT) != 0) {
\r
265 size += MEM_ALIGNMENT - ((size + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT);
\r
268 if (size > MEM_SIZE) {
\r
272 sys_sem_wait(mem_sem);
\r
274 for (ptr = (u8_t *)lfree - ram; ptr < MEM_SIZE; ptr = ((struct mem *)&ram[ptr])->next) {
\r
275 mem = (struct mem *)&ram[ptr];
\r
277 mem->next - (ptr + SIZEOF_STRUCT_MEM) >= size + SIZEOF_STRUCT_MEM) {
\r
278 ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
\r
279 mem2 = (struct mem *)&ram[ptr2];
\r
282 mem2->next = mem->next;
\r
284 if (mem2->next != MEM_SIZE) {
\r
285 ((struct mem *)&ram[mem2->next])->prev = ptr2;
\r
291 lwip_stats.mem.used += (size + SIZEOF_STRUCT_MEM);
\r
292 /* if (lwip_stats.mem.max < lwip_stats.mem.used) {
\r
293 lwip_stats.mem.max = lwip_stats.mem.used;
\r
295 if (lwip_stats.mem.max < ptr2) {
\r
296 lwip_stats.mem.max = ptr2;
\r
298 #endif /* MEM_STATS */
\r
300 if (mem == lfree) {
\r
301 /* Find next free block after mem */
\r
302 while (lfree->used && lfree != ram_end) {
\r
303 lfree = (struct mem *)&ram[lfree->next];
\r
305 LWIP_ASSERT("mem_malloc: !lfree->used", !lfree->used);
\r
307 sys_sem_signal(mem_sem);
\r
308 LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
\r
309 (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end);
\r
310 LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
\r
311 (unsigned long)((u8_t *)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0);
\r
312 return (u8_t *)mem + SIZEOF_STRUCT_MEM;
\r
315 LWIP_DEBUGF(MEM_DEBUG | 2, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size));
\r
317 ++lwip_stats.mem.err;
\r
318 #endif /* MEM_STATS */
\r
319 sys_sem_signal(mem_sem);
\r
324 * Adam's mem_malloc() plus solution for bug #17922
\r
327 mem_malloc(mem_size_t size)
\r
329 mem_size_t ptr, ptr2;
\r
330 struct mem *mem, *mem2;
\r
336 /* Expand the size of the allocated memory region so that we can
\r
337 adjust for alignment. */
\r
338 if ((size % MEM_ALIGNMENT) != 0) {
\r
339 size += MEM_ALIGNMENT - ((size + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT);
\r
342 if (size > MEM_SIZE) {
\r
346 sys_sem_wait(mem_sem);
\r
348 for (ptr = (u8_t *)lfree - ram; ptr < MEM_SIZE - size; ptr = ((struct mem *)&ram[ptr])->next) {
\r
349 mem = (struct mem *)&ram[ptr];
\r
353 ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
\r
355 if (mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) >= size) {
\r
356 /* split large block, create empty remainder */
\r
359 /* create mem2 struct */
\r
360 mem2 = (struct mem *)&ram[ptr2];
\r
362 mem2->next = mem->next;
\r
365 if (mem2->next != MEM_SIZE) {
\r
366 ((struct mem *)&ram[mem2->next])->prev = ptr2;
\r
369 else if (mem->next - (ptr + SIZEOF_STRUCT_MEM) > size) {
\r
370 /* near fit, no split, no mem2 creation,
\r
371 round up to mem->next */
\r
375 else if (mem->next - (ptr + SIZEOF_STRUCT_MEM) == size) {
\r
376 /* exact fit, do not split, no mem2 creation */
\r
383 lwip_stats.mem.used += (size + SIZEOF_STRUCT_MEM);
\r
384 if (lwip_stats.mem.max < ptr2) {
\r
385 lwip_stats.mem.max = ptr2;
\r
387 #endif /* MEM_STATS */
\r
388 if (mem == lfree) {
\r
389 /* Find next free block after mem */
\r
390 while (lfree->used && lfree != ram_end) {
\r
391 lfree = (struct mem *)&ram[lfree->next];
\r
393 LWIP_ASSERT("mem_malloc: !lfree->used", !lfree->used);
\r
395 sys_sem_signal(mem_sem);
\r
396 LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
\r
397 (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end);
\r
398 LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
\r
399 (unsigned long)((u8_t *)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0);
\r
400 return (u8_t *)mem + SIZEOF_STRUCT_MEM;
\r
404 LWIP_DEBUGF(MEM_DEBUG | 2, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size));
\r
406 ++lwip_stats.mem.err;
\r
407 #endif /* MEM_STATS */
\r
408 sys_sem_signal(mem_sem);
\r
413 #endif /* MEM_LIBC_MALLOC == 0 */
\r