Greg Hartman | 76d05dc | 2016-11-23 15:51:27 -0800 | [diff] [blame] | 1 | /** |
| 2 | * @file |
| 3 | * Dynamic memory manager |
| 4 | * |
| 5 | * This is a lightweight replacement for the standard C library malloc(). |
| 6 | * |
| 7 | * If you want to use the standard C library malloc() instead, define |
| 8 | * MEM_LIBC_MALLOC to 1 in your lwipopts.h |
| 9 | * |
| 10 | * To let mem_malloc() use pools (prevents fragmentation and is much faster than |
| 11 | * a heap but might waste some memory), define MEM_USE_POOLS to 1, define |
| 12 | * MEM_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list |
| 13 | * of pools like this (more pools can be added between _START and _END): |
| 14 | * |
| 15 | * Define three pools with sizes 256, 512, and 1512 bytes |
| 16 | * LWIP_MALLOC_MEMPOOL_START |
| 17 | * LWIP_MALLOC_MEMPOOL(20, 256) |
| 18 | * LWIP_MALLOC_MEMPOOL(10, 512) |
| 19 | * LWIP_MALLOC_MEMPOOL(5, 1512) |
| 20 | * LWIP_MALLOC_MEMPOOL_END |
| 21 | */ |
| 22 | |
| 23 | /* |
| 24 | * Copyright (c) 2001-2004 Swedish Institute of Computer Science. |
| 25 | * All rights reserved. |
| 26 | * |
| 27 | * Redistribution and use in source and binary forms, with or without modification, |
| 28 | * are permitted provided that the following conditions are met: |
| 29 | * |
| 30 | * 1. Redistributions of source code must retain the above copyright notice, |
| 31 | * this list of conditions and the following disclaimer. |
| 32 | * 2. Redistributions in binary form must reproduce the above copyright notice, |
| 33 | * this list of conditions and the following disclaimer in the documentation |
| 34 | * and/or other materials provided with the distribution. |
| 35 | * 3. The name of the author may not be used to endorse or promote products |
| 36 | * derived from this software without specific prior written permission. |
| 37 | * |
| 38 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
| 39 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
| 40 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT |
| 41 | * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
| 42 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT |
| 43 | * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 44 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 45 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING |
| 46 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY |
| 47 | * OF SUCH DAMAGE. |
| 48 | * |
| 49 | * This file is part of the lwIP TCP/IP stack. |
| 50 | * |
| 51 | * Author: Adam Dunkels <adam@sics.se> |
| 52 | * Simon Goldschmidt |
| 53 | * |
| 54 | */ |
| 55 | |
| 56 | #include "lwip/opt.h" |
| 57 | |
| 58 | #if !MEM_LIBC_MALLOC /* don't build if not configured for use in lwipopts.h */ |
| 59 | |
| 60 | #include "lwip/def.h" |
| 61 | #include "lwip/mem.h" |
| 62 | #include "lwip/sys.h" |
| 63 | #include "lwip/stats.h" |
| 64 | #include "lwip/err.h" |
| 65 | |
| 66 | #include <string.h> |
| 67 | |
| 68 | #if MEM_USE_POOLS |
| 69 | /* lwIP head implemented with different sized pools */ |
| 70 | |
| 71 | /** |
| 72 | * Allocate memory: determine the smallest pool that is big enough |
| 73 | * to contain an element of 'size' and get an element from that pool. |
| 74 | * |
| 75 | * @param size the size in bytes of the memory needed |
| 76 | * @return a pointer to the allocated memory or NULL if the pool is empty |
| 77 | */ |
| 78 | void * |
| 79 | mem_malloc(mem_size_t size) |
| 80 | { |
| 81 | struct memp_malloc_helper *element; |
| 82 | memp_t poolnr; |
| 83 | mem_size_t required_size = size + sizeof(struct memp_malloc_helper); |
| 84 | |
| 85 | for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr = (memp_t)(poolnr + 1)) { |
| 86 | #if MEM_USE_POOLS_TRY_BIGGER_POOL |
| 87 | again: |
| 88 | #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */ |
| 89 | /* is this pool big enough to hold an element of the required size |
| 90 | plus a struct memp_malloc_helper that saves the pool this element came from? */ |
| 91 | if (required_size <= memp_sizes[poolnr]) { |
| 92 | break; |
| 93 | } |
| 94 | } |
| 95 | if (poolnr > MEMP_POOL_LAST) { |
| 96 | LWIP_ASSERT("mem_malloc(): no pool is that big!", 0); |
| 97 | return NULL; |
| 98 | } |
| 99 | element = (struct memp_malloc_helper*)memp_malloc(poolnr); |
| 100 | if (element == NULL) { |
| 101 | /* No need to DEBUGF or ASSERT: This error is already |
| 102 | taken care of in memp.c */ |
| 103 | #if MEM_USE_POOLS_TRY_BIGGER_POOL |
| 104 | /** Try a bigger pool if this one is empty! */ |
| 105 | if (poolnr < MEMP_POOL_LAST) { |
| 106 | poolnr++; |
| 107 | goto again; |
| 108 | } |
| 109 | #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */ |
| 110 | return NULL; |
| 111 | } |
| 112 | |
| 113 | /* save the pool number this element came from */ |
| 114 | element->poolnr = poolnr; |
| 115 | /* and return a pointer to the memory directly after the struct memp_malloc_helper */ |
| 116 | element++; |
| 117 | |
| 118 | return element; |
| 119 | } |
| 120 | |
| 121 | /** |
| 122 | * Free memory previously allocated by mem_malloc. Loads the pool number |
| 123 | * and calls memp_free with that pool number to put the element back into |
| 124 | * its pool |
| 125 | * |
| 126 | * @param rmem the memory element to free |
| 127 | */ |
| 128 | void |
| 129 | mem_free(void *rmem) |
| 130 | { |
| 131 | struct memp_malloc_helper *hmem = (struct memp_malloc_helper*)rmem; |
| 132 | |
| 133 | LWIP_ASSERT("rmem != NULL", (rmem != NULL)); |
| 134 | LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem))); |
| 135 | |
| 136 | /* get the original struct memp_malloc_helper */ |
| 137 | hmem--; |
| 138 | |
| 139 | LWIP_ASSERT("hmem != NULL", (hmem != NULL)); |
| 140 | LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem))); |
| 141 | LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX)); |
| 142 | |
| 143 | /* and put it in the pool we saved earlier */ |
| 144 | memp_free(hmem->poolnr, hmem); |
| 145 | } |
| 146 | |
| 147 | #else /* MEM_USE_POOLS */ |
| 148 | /* lwIP replacement for your libc malloc() */ |
| 149 | |
| 150 | /** |
| 151 | * The heap is made up as a list of structs of this type. |
| 152 | * This does not have to be aligned since for getting its size, |
| 153 | * we only use the macro SIZEOF_STRUCT_MEM, which automatically alignes. |
| 154 | */ |
| 155 | struct mem { |
| 156 | /** index (-> ram[next]) of the next struct */ |
| 157 | mem_size_t next; |
| 158 | /** index (-> ram[prev]) of the previous struct */ |
| 159 | mem_size_t prev; |
| 160 | /** 1: this area is used; 0: this area is unused */ |
| 161 | u8_t used; |
| 162 | }; |
| 163 | |
| 164 | /** All allocated blocks will be MIN_SIZE bytes big, at least! |
| 165 | * MIN_SIZE can be overridden to suit your needs. Smaller values save space, |
| 166 | * larger values could prevent too small blocks to fragment the RAM too much. */ |
| 167 | #ifndef MIN_SIZE |
| 168 | #define MIN_SIZE 12 |
| 169 | #endif /* MIN_SIZE */ |
| 170 | /* some alignment macros: we define them here for better source code layout */ |
| 171 | #define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE) |
| 172 | #define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem)) |
| 173 | #define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE) |
| 174 | |
| 175 | /** If you want to relocate the heap to external memory, simply define |
| 176 | * LWIP_RAM_HEAP_POINTER as a void-pointer to that location. |
| 177 | * If so, make sure the memory at that location is big enough (see below on |
| 178 | * how that space is calculated). */ |
| 179 | #ifndef LWIP_RAM_HEAP_POINTER |
| 180 | /** the heap. we need one struct mem at the end and some room for alignment */ |
| 181 | u8_t ram_heap[MEM_SIZE_ALIGNED + (2*SIZEOF_STRUCT_MEM) + MEM_ALIGNMENT]; |
| 182 | #define LWIP_RAM_HEAP_POINTER ram_heap |
| 183 | #endif /* LWIP_RAM_HEAP_POINTER */ |
| 184 | |
| 185 | /** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */ |
| 186 | static u8_t *ram; |
| 187 | /** the last entry, always unused! */ |
| 188 | static struct mem *ram_end; |
| 189 | /** pointer to the lowest free block, this is used for faster search */ |
| 190 | static struct mem *lfree; |
| 191 | |
| 192 | /** concurrent access protection */ |
| 193 | static sys_mutex_t mem_mutex; |
| 194 | |
| 195 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT |
| 196 | |
| 197 | static volatile u8_t mem_free_count; |
| 198 | |
| 199 | /* Allow mem_free from other (e.g. interrupt) context */ |
| 200 | #define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free) |
| 201 | #define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free) |
| 202 | #define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free) |
| 203 | #define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc) |
| 204 | #define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc) |
| 205 | #define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc) |
| 206 | |
| 207 | #else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ |
| 208 | |
| 209 | /* Protect the heap only by using a semaphore */ |
| 210 | #define LWIP_MEM_FREE_DECL_PROTECT() |
| 211 | #define LWIP_MEM_FREE_PROTECT() sys_mutex_lock(&mem_mutex) |
| 212 | #define LWIP_MEM_FREE_UNPROTECT() sys_mutex_unlock(&mem_mutex) |
| 213 | /* mem_malloc is protected using semaphore AND LWIP_MEM_ALLOC_PROTECT */ |
| 214 | #define LWIP_MEM_ALLOC_DECL_PROTECT() |
| 215 | #define LWIP_MEM_ALLOC_PROTECT() |
| 216 | #define LWIP_MEM_ALLOC_UNPROTECT() |
| 217 | |
| 218 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ |
| 219 | |
| 220 | |
| 221 | /** |
| 222 | * "Plug holes" by combining adjacent empty struct mems. |
| 223 | * After this function is through, there should not exist |
| 224 | * one empty struct mem pointing to another empty struct mem. |
| 225 | * |
| 226 | * @param mem this points to a struct mem which just has been freed |
| 227 | * @internal this function is only called by mem_free() and mem_trim() |
| 228 | * |
| 229 | * This assumes access to the heap is protected by the calling function |
| 230 | * already. |
| 231 | */ |
| 232 | static void |
| 233 | plug_holes(struct mem *mem) |
| 234 | { |
| 235 | struct mem *nmem; |
| 236 | struct mem *pmem; |
| 237 | |
| 238 | LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram); |
| 239 | LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end); |
| 240 | LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0); |
| 241 | |
| 242 | /* plug hole forward */ |
| 243 | LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED); |
| 244 | |
| 245 | nmem = (struct mem *)(void *)&ram[mem->next]; |
| 246 | if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) { |
| 247 | /* if mem->next is unused and not end of ram, combine mem and mem->next */ |
| 248 | if (lfree == nmem) { |
| 249 | lfree = mem; |
| 250 | } |
| 251 | mem->next = nmem->next; |
| 252 | ((struct mem *)(void *)&ram[nmem->next])->prev = (mem_size_t)((u8_t *)mem - ram); |
| 253 | } |
| 254 | |
| 255 | /* plug hole backward */ |
| 256 | pmem = (struct mem *)(void *)&ram[mem->prev]; |
| 257 | if (pmem != mem && pmem->used == 0) { |
| 258 | /* if mem->prev is unused, combine mem and mem->prev */ |
| 259 | if (lfree == mem) { |
| 260 | lfree = pmem; |
| 261 | } |
| 262 | pmem->next = mem->next; |
| 263 | ((struct mem *)(void *)&ram[mem->next])->prev = (mem_size_t)((u8_t *)pmem - ram); |
| 264 | } |
| 265 | } |
| 266 | |
| 267 | /** |
| 268 | * Zero the heap and initialize start, end and lowest-free |
| 269 | */ |
| 270 | void |
| 271 | lwip_mem_init(void) |
| 272 | { |
| 273 | struct mem *mem; |
| 274 | |
| 275 | LWIP_ASSERT("Sanity check alignment", |
| 276 | (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT-1)) == 0); |
| 277 | |
| 278 | /* align the heap */ |
| 279 | ram = (u8_t *)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER); |
| 280 | /* initialize the start of the heap */ |
| 281 | mem = (struct mem *)(void *)ram; |
| 282 | mem->next = MEM_SIZE_ALIGNED; |
| 283 | mem->prev = 0; |
| 284 | mem->used = 0; |
| 285 | /* initialize the end of the heap */ |
| 286 | ram_end = (struct mem *)(void *)&ram[MEM_SIZE_ALIGNED]; |
| 287 | ram_end->used = 1; |
| 288 | ram_end->next = MEM_SIZE_ALIGNED; |
| 289 | ram_end->prev = MEM_SIZE_ALIGNED; |
| 290 | |
| 291 | /* initialize the lowest-free pointer to the start of the heap */ |
| 292 | lfree = (struct mem *)(void *)ram; |
| 293 | |
| 294 | MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED); |
| 295 | |
| 296 | if(sys_mutex_new(&mem_mutex) != ERR_OK) { |
| 297 | LWIP_ASSERT("failed to create mem_mutex", 0); |
| 298 | } |
| 299 | } |
| 300 | |
| 301 | /** |
| 302 | * Put a struct mem back on the heap |
| 303 | * |
| 304 | * @param rmem is the data portion of a struct mem as returned by a previous |
| 305 | * call to mem_malloc() |
| 306 | */ |
| 307 | void |
| 308 | mem_free(void *rmem) |
| 309 | { |
| 310 | struct mem *mem; |
| 311 | LWIP_MEM_FREE_DECL_PROTECT(); |
| 312 | |
| 313 | if (rmem == NULL) { |
| 314 | LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n")); |
| 315 | return; |
| 316 | } |
| 317 | LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t)rmem) & (MEM_ALIGNMENT-1)) == 0); |
| 318 | |
| 319 | LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram && |
| 320 | (u8_t *)rmem < (u8_t *)ram_end); |
| 321 | |
| 322 | if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) { |
| 323 | SYS_ARCH_DECL_PROTECT(lev); |
| 324 | LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n")); |
| 325 | /* protect mem stats from concurrent access */ |
| 326 | SYS_ARCH_PROTECT(lev); |
| 327 | MEM_STATS_INC(illegal); |
| 328 | SYS_ARCH_UNPROTECT(lev); |
| 329 | return; |
| 330 | } |
| 331 | /* protect the heap from concurrent access */ |
| 332 | LWIP_MEM_FREE_PROTECT(); |
| 333 | /* Get the corresponding struct mem ... */ |
| 334 | mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM); |
| 335 | /* ... which has to be in a used state ... */ |
| 336 | LWIP_ASSERT("mem_free: mem->used", mem->used); |
| 337 | /* ... and is now unused. */ |
| 338 | mem->used = 0; |
| 339 | |
| 340 | if (mem < lfree) { |
| 341 | /* the newly freed struct is now the lowest */ |
| 342 | lfree = mem; |
| 343 | } |
| 344 | |
| 345 | MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram))); |
| 346 | |
| 347 | /* finally, see if prev or next are free also */ |
| 348 | plug_holes(mem); |
| 349 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT |
| 350 | mem_free_count = 1; |
| 351 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ |
| 352 | LWIP_MEM_FREE_UNPROTECT(); |
| 353 | } |
| 354 | |
| 355 | /** |
| 356 | * Shrink memory returned by mem_malloc(). |
| 357 | * |
| 358 | * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked |
| 359 | * @param newsize required size after shrinking (needs to be smaller than or |
| 360 | * equal to the previous size) |
| 361 | * @return for compatibility reasons: is always == rmem, at the moment |
| 362 | * or NULL if newsize is > old size, in which case rmem is NOT touched |
| 363 | * or freed! |
| 364 | */ |
| 365 | void * |
| 366 | mem_trim(void *rmem, mem_size_t newsize) |
| 367 | { |
| 368 | mem_size_t size; |
| 369 | mem_size_t ptr, ptr2; |
| 370 | struct mem *mem, *mem2; |
| 371 | /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */ |
| 372 | LWIP_MEM_FREE_DECL_PROTECT(); |
| 373 | |
| 374 | /* Expand the size of the allocated memory region so that we can |
| 375 | adjust for alignment. */ |
| 376 | newsize = LWIP_MEM_ALIGN_SIZE(newsize); |
| 377 | |
| 378 | if(newsize < MIN_SIZE_ALIGNED) { |
| 379 | /* every data block must be at least MIN_SIZE_ALIGNED long */ |
| 380 | newsize = MIN_SIZE_ALIGNED; |
| 381 | } |
| 382 | |
| 383 | if (newsize > MEM_SIZE_ALIGNED) { |
| 384 | return NULL; |
| 385 | } |
| 386 | |
| 387 | LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram && |
| 388 | (u8_t *)rmem < (u8_t *)ram_end); |
| 389 | |
| 390 | if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) { |
| 391 | SYS_ARCH_DECL_PROTECT(lev); |
| 392 | LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n")); |
| 393 | /* protect mem stats from concurrent access */ |
| 394 | SYS_ARCH_PROTECT(lev); |
| 395 | MEM_STATS_INC(illegal); |
| 396 | SYS_ARCH_UNPROTECT(lev); |
| 397 | return rmem; |
| 398 | } |
| 399 | /* Get the corresponding struct mem ... */ |
| 400 | mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM); |
| 401 | /* ... and its offset pointer */ |
| 402 | ptr = (mem_size_t)((u8_t *)mem - ram); |
| 403 | |
| 404 | size = mem->next - ptr - SIZEOF_STRUCT_MEM; |
| 405 | LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size); |
| 406 | if (newsize > size) { |
| 407 | /* not supported */ |
| 408 | return NULL; |
| 409 | } |
| 410 | if (newsize == size) { |
| 411 | /* No change in size, simply return */ |
| 412 | return rmem; |
| 413 | } |
| 414 | |
| 415 | /* protect the heap from concurrent access */ |
| 416 | LWIP_MEM_FREE_PROTECT(); |
| 417 | |
| 418 | mem2 = (struct mem *)(void *)&ram[mem->next]; |
| 419 | if(mem2->used == 0) { |
| 420 | /* The next struct is unused, we can simply move it at little */ |
| 421 | mem_size_t next; |
| 422 | /* remember the old next pointer */ |
| 423 | next = mem2->next; |
| 424 | /* create new struct mem which is moved directly after the shrinked mem */ |
| 425 | ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize; |
| 426 | if (lfree == mem2) { |
| 427 | lfree = (struct mem *)(void *)&ram[ptr2]; |
| 428 | } |
| 429 | mem2 = (struct mem *)(void *)&ram[ptr2]; |
| 430 | mem2->used = 0; |
| 431 | /* restore the next pointer */ |
| 432 | mem2->next = next; |
| 433 | /* link it back to mem */ |
| 434 | mem2->prev = ptr; |
| 435 | /* link mem to it */ |
| 436 | mem->next = ptr2; |
| 437 | /* last thing to restore linked list: as we have moved mem2, |
| 438 | * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not |
| 439 | * the end of the heap */ |
| 440 | if (mem2->next != MEM_SIZE_ALIGNED) { |
| 441 | ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2; |
| 442 | } |
| 443 | MEM_STATS_DEC_USED(used, (size - newsize)); |
| 444 | /* no need to plug holes, we've already done that */ |
| 445 | } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) { |
| 446 | /* Next struct is used but there's room for another struct mem with |
| 447 | * at least MIN_SIZE_ALIGNED of data. |
| 448 | * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem |
| 449 | * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED'). |
| 450 | * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty |
| 451 | * region that couldn't hold data, but when mem->next gets freed, |
| 452 | * the 2 regions would be combined, resulting in more free memory */ |
| 453 | ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize; |
| 454 | mem2 = (struct mem *)(void *)&ram[ptr2]; |
| 455 | if (mem2 < lfree) { |
| 456 | lfree = mem2; |
| 457 | } |
| 458 | mem2->used = 0; |
| 459 | mem2->next = mem->next; |
| 460 | mem2->prev = ptr; |
| 461 | mem->next = ptr2; |
| 462 | if (mem2->next != MEM_SIZE_ALIGNED) { |
| 463 | ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2; |
| 464 | } |
| 465 | MEM_STATS_DEC_USED(used, (size - newsize)); |
| 466 | /* the original mem->next is used, so no need to plug holes! */ |
| 467 | } |
| 468 | /* else { |
| 469 | next struct mem is used but size between mem and mem2 is not big enough |
| 470 | to create another struct mem |
| 471 | -> don't do anyhting. |
| 472 | -> the remaining space stays unused since it is too small |
| 473 | } */ |
| 474 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT |
| 475 | mem_free_count = 1; |
| 476 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ |
| 477 | LWIP_MEM_FREE_UNPROTECT(); |
| 478 | return rmem; |
| 479 | } |
| 480 | |
| 481 | /** |
| 482 | * Adam's mem_malloc() plus solution for bug #17922 |
| 483 | * Allocate a block of memory with a minimum of 'size' bytes. |
| 484 | * |
| 485 | * @param size is the minimum size of the requested block in bytes. |
| 486 | * @return pointer to allocated memory or NULL if no free memory was found. |
| 487 | * |
| 488 | * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT). |
| 489 | */ |
| 490 | void * |
| 491 | mem_malloc(mem_size_t size) |
| 492 | { |
| 493 | mem_size_t ptr, ptr2; |
| 494 | struct mem *mem, *mem2; |
| 495 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT |
| 496 | u8_t local_mem_free_count = 0; |
| 497 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ |
| 498 | LWIP_MEM_ALLOC_DECL_PROTECT(); |
| 499 | |
| 500 | if (size == 0) { |
| 501 | return NULL; |
| 502 | } |
| 503 | |
| 504 | /* Expand the size of the allocated memory region so that we can |
| 505 | adjust for alignment. */ |
| 506 | size = LWIP_MEM_ALIGN_SIZE(size); |
| 507 | |
| 508 | if(size < MIN_SIZE_ALIGNED) { |
| 509 | /* every data block must be at least MIN_SIZE_ALIGNED long */ |
| 510 | size = MIN_SIZE_ALIGNED; |
| 511 | } |
| 512 | |
| 513 | if (size > MEM_SIZE_ALIGNED) { |
| 514 | return NULL; |
| 515 | } |
| 516 | |
| 517 | /* protect the heap from concurrent access */ |
| 518 | sys_mutex_lock(&mem_mutex); |
| 519 | LWIP_MEM_ALLOC_PROTECT(); |
| 520 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT |
| 521 | /* run as long as a mem_free disturbed mem_malloc */ |
| 522 | do { |
| 523 | local_mem_free_count = 0; |
| 524 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ |
| 525 | |
| 526 | /* Scan through the heap searching for a free block that is big enough, |
| 527 | * beginning with the lowest free block. |
| 528 | */ |
| 529 | for (ptr = (mem_size_t)((u8_t *)lfree - ram); ptr < MEM_SIZE_ALIGNED - size; |
| 530 | ptr = ((struct mem *)(void *)&ram[ptr])->next) { |
| 531 | mem = (struct mem *)(void *)&ram[ptr]; |
| 532 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT |
| 533 | mem_free_count = 0; |
| 534 | LWIP_MEM_ALLOC_UNPROTECT(); |
| 535 | /* allow mem_free to run */ |
| 536 | LWIP_MEM_ALLOC_PROTECT(); |
| 537 | if (mem_free_count != 0) { |
| 538 | local_mem_free_count = mem_free_count; |
| 539 | } |
| 540 | mem_free_count = 0; |
| 541 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ |
| 542 | |
| 543 | if ((!mem->used) && |
| 544 | (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) { |
| 545 | /* mem is not used and at least perfect fit is possible: |
| 546 | * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */ |
| 547 | |
| 548 | if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) { |
| 549 | /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing |
| 550 | * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem') |
| 551 | * -> split large block, create empty remainder, |
| 552 | * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if |
| 553 | * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size, |
| 554 | * struct mem would fit in but no data between mem2 and mem2->next |
| 555 | * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty |
| 556 | * region that couldn't hold data, but when mem->next gets freed, |
| 557 | * the 2 regions would be combined, resulting in more free memory |
| 558 | */ |
| 559 | ptr2 = ptr + SIZEOF_STRUCT_MEM + size; |
| 560 | /* create mem2 struct */ |
| 561 | mem2 = (struct mem *)(void *)&ram[ptr2]; |
| 562 | mem2->used = 0; |
| 563 | mem2->next = mem->next; |
| 564 | mem2->prev = ptr; |
| 565 | /* and insert it between mem and mem->next */ |
| 566 | mem->next = ptr2; |
| 567 | mem->used = 1; |
| 568 | |
| 569 | if (mem2->next != MEM_SIZE_ALIGNED) { |
| 570 | ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2; |
| 571 | } |
| 572 | MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM)); |
| 573 | } else { |
| 574 | /* (a mem2 struct does no fit into the user data space of mem and mem->next will always |
| 575 | * be used at this point: if not we have 2 unused structs in a row, plug_holes should have |
| 576 | * take care of this). |
| 577 | * -> near fit or excact fit: do not split, no mem2 creation |
| 578 | * also can't move mem->next directly behind mem, since mem->next |
| 579 | * will always be used at this point! |
| 580 | */ |
| 581 | mem->used = 1; |
| 582 | MEM_STATS_INC_USED(used, mem->next - (mem_size_t)((u8_t *)mem - ram)); |
| 583 | } |
| 584 | |
| 585 | if (mem == lfree) { |
| 586 | /* Find next free block after mem and update lowest free pointer */ |
| 587 | while (lfree->used && lfree != ram_end) { |
| 588 | LWIP_MEM_ALLOC_UNPROTECT(); |
| 589 | /* prevent high interrupt latency... */ |
| 590 | LWIP_MEM_ALLOC_PROTECT(); |
| 591 | lfree = (struct mem *)(void *)&ram[lfree->next]; |
| 592 | } |
| 593 | LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used))); |
| 594 | } |
| 595 | LWIP_MEM_ALLOC_UNPROTECT(); |
| 596 | sys_mutex_unlock(&mem_mutex); |
| 597 | LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.", |
| 598 | (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end); |
| 599 | LWIP_ASSERT("mem_malloc: allocated memory properly aligned.", |
| 600 | ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0); |
| 601 | LWIP_ASSERT("mem_malloc: sanity check alignment", |
| 602 | (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0); |
| 603 | |
| 604 | return (u8_t *)mem + SIZEOF_STRUCT_MEM; |
| 605 | } |
| 606 | } |
| 607 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT |
| 608 | /* if we got interrupted by a mem_free, try again */ |
| 609 | } while(local_mem_free_count != 0); |
| 610 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ |
| 611 | LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size)); |
| 612 | MEM_STATS_INC(err); |
| 613 | LWIP_MEM_ALLOC_UNPROTECT(); |
| 614 | sys_mutex_unlock(&mem_mutex); |
| 615 | return NULL; |
| 616 | } |
| 617 | |
| 618 | #endif /* MEM_USE_POOLS */ |
| 619 | /** |
| 620 | * Contiguously allocates enough space for count objects that are size bytes |
| 621 | * of memory each and returns a pointer to the allocated memory. |
| 622 | * |
| 623 | * The allocated memory is filled with bytes of value zero. |
| 624 | * |
| 625 | * @param count number of objects to allocate |
| 626 | * @param size size of the objects to allocate |
| 627 | * @return pointer to allocated memory / NULL pointer if there is an error |
| 628 | */ |
| 629 | void *mem_calloc(mem_size_t count, mem_size_t size) |
| 630 | { |
| 631 | void *p; |
| 632 | |
| 633 | /* allocate 'count' objects of size 'size' */ |
| 634 | p = mem_malloc(count * size); |
| 635 | if (p) { |
| 636 | /* zero the memory */ |
| 637 | memset(p, 0, count * size); |
| 638 | } |
| 639 | return p; |
| 640 | } |
| 641 | |
| 642 | #endif /* !MEM_LIBC_MALLOC */ |