mem.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017
  1. /**
  2. * @file
  3. * Dynamic memory manager
  4. *
  5. * This is a lightweight replacement for the standard C library malloc().
  6. *
  7. * If you want to use the standard C library malloc() instead, define
  8. * MEM_LIBC_MALLOC to 1 in your lwipopts.h
  9. *
  10. * To let mem_malloc() use pools (prevents fragmentation and is much faster than
  11. * a heap but might waste some memory), define MEM_USE_POOLS to 1, define
  12. * MEMP_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list
  13. * of pools like this (more pools can be added between _START and _END):
  14. *
  15. * Define three pools with sizes 256, 512, and 1512 bytes
  16. * LWIP_MALLOC_MEMPOOL_START
  17. * LWIP_MALLOC_MEMPOOL(20, 256)
  18. * LWIP_MALLOC_MEMPOOL(10, 512)
  19. * LWIP_MALLOC_MEMPOOL(5, 1512)
  20. * LWIP_MALLOC_MEMPOOL_END
  21. */
  22. /*
  23. * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
  24. * All rights reserved.
  25. *
  26. * Redistribution and use in source and binary forms, with or without modification,
  27. * are permitted provided that the following conditions are met:
  28. *
  29. * 1. Redistributions of source code must retain the above copyright notice,
  30. * this list of conditions and the following disclaimer.
  31. * 2. Redistributions in binary form must reproduce the above copyright notice,
  32. * this list of conditions and the following disclaimer in the documentation
  33. * and/or other materials provided with the distribution.
  34. * 3. The name of the author may not be used to endorse or promote products
  35. * derived from this software without specific prior written permission.
  36. *
  37. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
  38. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  39. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
  40. * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  41. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
  42. * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  43. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  44. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
  45. * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
  46. * OF SUCH DAMAGE.
  47. *
  48. * This file is part of the lwIP TCP/IP stack.
  49. *
  50. * Author: Adam Dunkels <adam@sics.se>
  51. * Simon Goldschmidt
  52. *
  53. */
  54. #include "lwip/opt.h"
  55. #include "lwip/mem.h"
  56. #include "lwip/def.h"
  57. #include "lwip/sys.h"
  58. #include "lwip/stats.h"
  59. #include "lwip/err.h"
  60. #include <string.h>
  61. #if MEM_LIBC_MALLOC
  62. #include <stdlib.h> /* for malloc()/free() */
  63. #endif
  64. /* This is overridable for tests only... */
  65. #ifndef LWIP_MEM_ILLEGAL_FREE
  66. #define LWIP_MEM_ILLEGAL_FREE(msg) LWIP_ASSERT(msg, 0)
  67. #endif
  68. #define MEM_STATS_INC_LOCKED(x) SYS_ARCH_LOCKED(MEM_STATS_INC(x))
  69. #define MEM_STATS_INC_USED_LOCKED(x, y) SYS_ARCH_LOCKED(MEM_STATS_INC_USED(x, y))
  70. #define MEM_STATS_DEC_USED_LOCKED(x, y) SYS_ARCH_LOCKED(MEM_STATS_DEC_USED(x, y))
  71. #if MEM_OVERFLOW_CHECK
  72. #define MEM_SANITY_OFFSET MEM_SANITY_REGION_BEFORE_ALIGNED
  73. #define MEM_SANITY_OVERHEAD (MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED)
  74. #else
  75. #define MEM_SANITY_OFFSET 0
  76. #define MEM_SANITY_OVERHEAD 0
  77. #endif
  78. #if MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK
  79. /**
  80. * Check if a mep element was victim of an overflow or underflow
  81. * (e.g. the restricted area after/before it has been altered)
  82. *
  83. * @param p the mem element to check
  84. * @param size allocated size of the element
  85. * @param descr1 description of the element source shown on error
  86. * @param descr2 description of the element source shown on error
  87. */
  88. void
  89. mem_overflow_check_raw(void *p, size_t size, const char *descr1, const char *descr2)
  90. {
  91. #if MEM_SANITY_REGION_AFTER_ALIGNED || MEM_SANITY_REGION_BEFORE_ALIGNED
  92. u16_t k;
  93. u8_t *m;
  94. #if MEM_SANITY_REGION_AFTER_ALIGNED > 0
  95. m = (u8_t *)p + size;
  96. for (k = 0; k < MEM_SANITY_REGION_AFTER_ALIGNED; k++) {
  97. if (m[k] != 0xcd) {
  98. char errstr[128];
  99. snprintf(errstr, sizeof(errstr), "detected mem overflow in %s%s", descr1, descr2);
  100. LWIP_ASSERT(errstr, 0);
  101. }
  102. }
  103. #endif /* MEM_SANITY_REGION_AFTER_ALIGNED > 0 */
  104. #if MEM_SANITY_REGION_BEFORE_ALIGNED > 0
  105. m = (u8_t *)p - MEM_SANITY_REGION_BEFORE_ALIGNED;
  106. for (k = 0; k < MEM_SANITY_REGION_BEFORE_ALIGNED; k++) {
  107. if (m[k] != 0xcd) {
  108. char errstr[128];
  109. snprintf(errstr, sizeof(errstr), "detected mem underflow in %s%s", descr1, descr2);
  110. LWIP_ASSERT(errstr, 0);
  111. }
  112. }
  113. #endif /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 */
  114. #else
  115. LWIP_UNUSED_ARG(p);
  116. LWIP_UNUSED_ARG(desc);
  117. LWIP_UNUSED_ARG(descr);
  118. #endif
  119. }
  120. /**
  121. * Initialize the restricted area of a mem element.
  122. */
  123. void
  124. mem_overflow_init_raw(void *p, size_t size)
  125. {
  126. #if MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0
  127. u8_t *m;
  128. #if MEM_SANITY_REGION_BEFORE_ALIGNED > 0
  129. m = (u8_t *)p - MEM_SANITY_REGION_BEFORE_ALIGNED;
  130. memset(m, 0xcd, MEM_SANITY_REGION_BEFORE_ALIGNED);
  131. #endif
  132. #if MEM_SANITY_REGION_AFTER_ALIGNED > 0
  133. m = (u8_t *)p + size;
  134. memset(m, 0xcd, MEM_SANITY_REGION_AFTER_ALIGNED);
  135. #endif
  136. #else /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 */
  137. LWIP_UNUSED_ARG(p);
  138. LWIP_UNUSED_ARG(desc);
  139. #endif /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 */
  140. }
  141. #endif /* MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK */
  142. #if MEM_LIBC_MALLOC || MEM_USE_POOLS
  143. /** mem_init is not used when using pools instead of a heap or using
  144. * C library malloc().
  145. */
  146. void
  147. mem_init(void)
  148. {
  149. }
  150. /** mem_trim is not used when using pools instead of a heap or using
  151. * C library malloc(): we can't free part of a pool element and the stack
  152. * support mem_trim() to return a different pointer
  153. */
  154. void *
  155. mem_trim(void *mem, mem_size_t size)
  156. {
  157. LWIP_UNUSED_ARG(size);
  158. return mem;
  159. }
  160. #endif /* MEM_LIBC_MALLOC || MEM_USE_POOLS */
  161. #if MEM_LIBC_MALLOC
  162. /* lwIP heap implemented using C library malloc() */
  163. /* in case C library malloc() needs extra protection,
  164. * allow these defines to be overridden.
  165. */
  166. #ifndef mem_clib_free
  167. #define mem_clib_free free
  168. #endif
  169. #ifndef mem_clib_malloc
  170. #define mem_clib_malloc malloc
  171. #endif
  172. #ifndef mem_clib_calloc
  173. #define mem_clib_calloc calloc
  174. #endif
  175. #if LWIP_STATS && MEM_STATS
  176. #define MEM_LIBC_STATSHELPER_SIZE LWIP_MEM_ALIGN_SIZE(sizeof(mem_size_t))
  177. #else
  178. #define MEM_LIBC_STATSHELPER_SIZE 0
  179. #endif
  180. /**
  181. * Allocate a block of memory with a minimum of 'size' bytes.
  182. *
  183. * @param size is the minimum size of the requested block in bytes.
  184. * @return pointer to allocated memory or NULL if no free memory was found.
  185. *
  186. * Note that the returned value must always be aligned (as defined by MEM_ALIGNMENT).
  187. */
  188. void *
  189. mem_malloc(mem_size_t size)
  190. {
  191. void *ret = mem_clib_malloc(size + MEM_LIBC_STATSHELPER_SIZE);
  192. if (ret == NULL) {
  193. MEM_STATS_INC_LOCKED(err);
  194. } else {
  195. LWIP_ASSERT("malloc() must return aligned memory", LWIP_MEM_ALIGN(ret) == ret);
  196. #if LWIP_STATS && MEM_STATS
  197. *(mem_size_t *)ret = size;
  198. ret = (u8_t *)ret + MEM_LIBC_STATSHELPER_SIZE;
  199. MEM_STATS_INC_USED_LOCKED(used, size);
  200. #endif
  201. }
  202. return ret;
  203. }
  204. /** Put memory back on the heap
  205. *
  206. * @param rmem is the pointer as returned by a previous call to mem_malloc()
  207. */
  208. void
  209. mem_free(void *rmem)
  210. {
  211. LWIP_ASSERT("rmem != NULL", (rmem != NULL));
  212. LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
  213. #if LWIP_STATS && MEM_STATS
  214. rmem = (u8_t *)rmem - MEM_LIBC_STATSHELPER_SIZE;
  215. MEM_STATS_DEC_USED_LOCKED(used, *(mem_size_t *)rmem);
  216. #endif
  217. mem_clib_free(rmem);
  218. }
  219. #elif MEM_USE_POOLS
  220. /* lwIP heap implemented with different sized pools */
  221. /**
  222. * Allocate memory: determine the smallest pool that is big enough
  223. * to contain an element of 'size' and get an element from that pool.
  224. *
  225. * @param size the size in bytes of the memory needed
  226. * @return a pointer to the allocated memory or NULL if the pool is empty
  227. */
  228. void *
  229. mem_malloc(mem_size_t size)
  230. {
  231. void *ret;
  232. struct memp_malloc_helper *element = NULL;
  233. memp_t poolnr;
  234. mem_size_t required_size = size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper));
  235. for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr = (memp_t)(poolnr + 1)) {
  236. /* is this pool big enough to hold an element of the required size
  237. plus a struct memp_malloc_helper that saves the pool this element came from? */
  238. if (required_size <= memp_pools[poolnr]->size) {
  239. element = (struct memp_malloc_helper *)memp_malloc(poolnr);
  240. if (element == NULL) {
  241. /* No need to DEBUGF or ASSERT: This error is already taken care of in memp.c */
  242. #if MEM_USE_POOLS_TRY_BIGGER_POOL
  243. /** Try a bigger pool if this one is empty! */
  244. if (poolnr < MEMP_POOL_LAST) {
  245. continue;
  246. }
  247. #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
  248. MEM_STATS_INC_LOCKED(err);
  249. return NULL;
  250. }
  251. break;
  252. }
  253. }
  254. if (poolnr > MEMP_POOL_LAST) {
  255. LWIP_ASSERT("mem_malloc(): no pool is that big!", 0);
  256. MEM_STATS_INC_LOCKED(err);
  257. return NULL;
  258. }
  259. /* save the pool number this element came from */
  260. element->poolnr = poolnr;
  261. /* and return a pointer to the memory directly after the struct memp_malloc_helper */
  262. ret = (u8_t *)element + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper));
  263. #if MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS)
  264. /* truncating to u16_t is safe because struct memp_desc::size is u16_t */
  265. element->size = (u16_t)size;
  266. MEM_STATS_INC_USED_LOCKED(used, element->size);
  267. #endif /* MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) */
  268. #if MEMP_OVERFLOW_CHECK
  269. /* initialize unused memory (diff between requested size and selected pool's size) */
  270. memset((u8_t *)ret + size, 0xcd, memp_pools[poolnr]->size - size);
  271. #endif /* MEMP_OVERFLOW_CHECK */
  272. return ret;
  273. }
  274. /**
  275. * Free memory previously allocated by mem_malloc. Loads the pool number
  276. * and calls memp_free with that pool number to put the element back into
  277. * its pool
  278. *
  279. * @param rmem the memory element to free
  280. */
  281. void
  282. mem_free(void *rmem)
  283. {
  284. struct memp_malloc_helper *hmem;
  285. LWIP_ASSERT("rmem != NULL", (rmem != NULL));
  286. LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
  287. /* get the original struct memp_malloc_helper */
  288. /* cast through void* to get rid of alignment warnings */
  289. hmem = (struct memp_malloc_helper *)(void *)((u8_t *)rmem - LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)));
  290. LWIP_ASSERT("hmem != NULL", (hmem != NULL));
  291. LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem)));
  292. LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX));
  293. MEM_STATS_DEC_USED_LOCKED(used, hmem->size);
  294. #if MEMP_OVERFLOW_CHECK
  295. {
  296. u16_t i;
  297. LWIP_ASSERT("MEM_USE_POOLS: invalid chunk size",
  298. hmem->size <= memp_pools[hmem->poolnr]->size);
  299. /* check that unused memory remained untouched (diff between requested size and selected pool's size) */
  300. for (i = hmem->size; i < memp_pools[hmem->poolnr]->size; i++) {
  301. u8_t data = *((u8_t *)rmem + i);
  302. LWIP_ASSERT("MEM_USE_POOLS: mem overflow detected", data == 0xcd);
  303. }
  304. }
  305. #endif /* MEMP_OVERFLOW_CHECK */
  306. /* and put it in the pool we saved earlier */
  307. memp_free(hmem->poolnr, hmem);
  308. }
  309. #else /* MEM_USE_POOLS */
  310. /* lwIP replacement for your libc malloc() */
  311. /**
  312. * The heap is made up as a list of structs of this type.
  313. * This does not have to be aligned since for getting its size,
  314. * we only use the macro SIZEOF_STRUCT_MEM, which automatically aligns.
  315. */
  316. struct mem {
  317. /** index (-> ram[next]) of the next struct */
  318. mem_size_t next;
  319. /** index (-> ram[prev]) of the previous struct */
  320. mem_size_t prev;
  321. /** 1: this area is used; 0: this area is unused */
  322. u8_t used;
  323. #if MEM_OVERFLOW_CHECK
  324. /** this keeps track of the user allocation size for guard checks */
  325. mem_size_t user_size;
  326. #endif
  327. };
  328. /** All allocated blocks will be MIN_SIZE bytes big, at least!
  329. * MIN_SIZE can be overridden to suit your needs. Smaller values save space,
  330. * larger values could prevent too small blocks to fragment the RAM too much. */
  331. #ifndef MIN_SIZE
  332. #define MIN_SIZE 12
  333. #endif /* MIN_SIZE */
  334. /* some alignment macros: we define them here for better source code layout */
  335. #define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE)
  336. #define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem))
  337. #define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE)
  338. /** If you want to relocate the heap to external memory, simply define
  339. * LWIP_RAM_HEAP_POINTER as a void-pointer to that location.
  340. * If so, make sure the memory at that location is big enough (see below on
  341. * how that space is calculated). */
  342. #ifndef LWIP_RAM_HEAP_POINTER
  343. /** the heap. we need one struct mem at the end and some room for alignment */
  344. LWIP_DECLARE_MEMORY_ALIGNED(ram_heap, MEM_SIZE_ALIGNED + (2U * SIZEOF_STRUCT_MEM));
  345. #define LWIP_RAM_HEAP_POINTER ram_heap
  346. #endif /* LWIP_RAM_HEAP_POINTER */
  347. /** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */
  348. static u8_t *ram;
  349. /** the last entry, always unused! */
  350. static struct mem *ram_end;
  351. /** concurrent access protection */
  352. #if !NO_SYS
  353. static sys_mutex_t mem_mutex;
  354. #endif
  355. #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
  356. static volatile u8_t mem_free_count;
  357. /* Allow mem_free from other (e.g. interrupt) context */
  358. #define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free)
  359. #define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free)
  360. #define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free)
  361. #define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc)
  362. #define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc)
  363. #define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc)
  364. #define LWIP_MEM_LFREE_VOLATILE volatile
  365. #else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
  366. /* Protect the heap only by using a mutex */
  367. #define LWIP_MEM_FREE_DECL_PROTECT()
  368. #define LWIP_MEM_FREE_PROTECT() sys_mutex_lock(&mem_mutex)
  369. #define LWIP_MEM_FREE_UNPROTECT() sys_mutex_unlock(&mem_mutex)
  370. /* mem_malloc is protected using mutex AND LWIP_MEM_ALLOC_PROTECT */
  371. #define LWIP_MEM_ALLOC_DECL_PROTECT()
  372. #define LWIP_MEM_ALLOC_PROTECT()
  373. #define LWIP_MEM_ALLOC_UNPROTECT()
  374. #define LWIP_MEM_LFREE_VOLATILE
  375. #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
  376. /** pointer to the lowest free block, this is used for faster search */
  377. static struct mem * LWIP_MEM_LFREE_VOLATILE lfree;
  378. #if MEM_SANITY_CHECK
  379. static void mem_sanity(void);
  380. #define MEM_SANITY() mem_sanity()
  381. #else
  382. #define MEM_SANITY()
  383. #endif
  384. #if MEM_OVERFLOW_CHECK
  385. static void
  386. mem_overflow_init_element(struct mem *mem, mem_size_t user_size)
  387. {
  388. void *p = (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET;
  389. mem->user_size = user_size;
  390. mem_overflow_init_raw(p, user_size);
  391. }
  392. static void
  393. mem_overflow_check_element(struct mem *mem)
  394. {
  395. void *p = (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET;
  396. mem_overflow_check_raw(p, mem->user_size, "heap", "");
  397. }
  398. #else /* MEM_OVERFLOW_CHECK */
  399. #define mem_overflow_init_element(mem, size)
  400. #define mem_overflow_check_element(mem)
  401. #endif /* MEM_OVERFLOW_CHECK */
  402. static struct mem *
  403. ptr_to_mem(mem_size_t ptr)
  404. {
  405. return (struct mem *)(void *)&ram[ptr];
  406. }
  407. static mem_size_t
  408. mem_to_ptr(void *mem)
  409. {
  410. return (mem_size_t)((u8_t *)mem - ram);
  411. }
  412. /**
  413. * "Plug holes" by combining adjacent empty struct mems.
  414. * After this function is through, there should not exist
  415. * one empty struct mem pointing to another empty struct mem.
  416. *
  417. * @param mem this points to a struct mem which just has been freed
  418. * @internal this function is only called by mem_free() and mem_trim()
  419. *
  420. * This assumes access to the heap is protected by the calling function
  421. * already.
  422. */
  423. static void
  424. plug_holes(struct mem *mem)
  425. {
  426. struct mem *nmem;
  427. struct mem *pmem;
  428. LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram);
  429. LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end);
  430. LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0);
  431. /* plug hole forward */
  432. LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED);
  433. nmem = ptr_to_mem(mem->next);
  434. if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) {
  435. /* if mem->next is unused and not end of ram, combine mem and mem->next */
  436. if (lfree == nmem) {
  437. lfree = mem;
  438. }
  439. mem->next = nmem->next;
  440. if (nmem->next != MEM_SIZE_ALIGNED) {
  441. ptr_to_mem(nmem->next)->prev = mem_to_ptr(mem);
  442. }
  443. }
  444. /* plug hole backward */
  445. pmem = ptr_to_mem(mem->prev);
  446. if (pmem != mem && pmem->used == 0) {
  447. /* if mem->prev is unused, combine mem and mem->prev */
  448. if (lfree == mem) {
  449. lfree = pmem;
  450. }
  451. pmem->next = mem->next;
  452. if (mem->next != MEM_SIZE_ALIGNED) {
  453. ptr_to_mem(mem->next)->prev = mem_to_ptr(pmem);
  454. }
  455. }
  456. }
  457. /**
  458. * Zero the heap and initialize start, end and lowest-free
  459. */
  460. void
  461. mem_init(void)
  462. {
  463. struct mem *mem;
  464. LWIP_ASSERT("Sanity check alignment",
  465. (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT - 1)) == 0);
  466. /* align the heap */
  467. ram = (u8_t *)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER);
  468. /* initialize the start of the heap */
  469. mem = (struct mem *)(void *)ram;
  470. mem->next = MEM_SIZE_ALIGNED;
  471. mem->prev = 0;
  472. mem->used = 0;
  473. /* initialize the end of the heap */
  474. ram_end = ptr_to_mem(MEM_SIZE_ALIGNED);
  475. ram_end->used = 1;
  476. ram_end->next = MEM_SIZE_ALIGNED;
  477. ram_end->prev = MEM_SIZE_ALIGNED;
  478. MEM_SANITY();
  479. /* initialize the lowest-free pointer to the start of the heap */
  480. lfree = (struct mem *)(void *)ram;
  481. MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED);
  482. if (sys_mutex_new(&mem_mutex) != ERR_OK) {
  483. LWIP_ASSERT("failed to create mem_mutex", 0);
  484. }
  485. }
  486. /* Check if a struct mem is correctly linked.
  487. * If not, double-free is a possible reason.
  488. */
  489. static int
  490. mem_link_valid(struct mem *mem)
  491. {
  492. struct mem *nmem, *pmem;
  493. mem_size_t rmem_idx;
  494. rmem_idx = mem_to_ptr(mem);
  495. nmem = ptr_to_mem(mem->next);
  496. pmem = ptr_to_mem(mem->prev);
  497. if ((mem->next > MEM_SIZE_ALIGNED) || (mem->prev > MEM_SIZE_ALIGNED) ||
  498. ((mem->prev != rmem_idx) && (pmem->next != rmem_idx)) ||
  499. ((nmem != ram_end) && (nmem->prev != rmem_idx))) {
  500. return 0;
  501. }
  502. return 1;
  503. }
  504. #if MEM_SANITY_CHECK
  505. static void
  506. mem_sanity(void)
  507. {
  508. struct mem *mem;
  509. u8_t last_used;
  510. /* begin with first element here */
  511. mem = (struct mem *)ram;
  512. LWIP_ASSERT("heap element used valid", (mem->used == 0) || (mem->used == 1));
  513. last_used = mem->used;
  514. LWIP_ASSERT("heap element prev ptr valid", mem->prev == 0);
  515. LWIP_ASSERT("heap element next ptr valid", mem->next <= MEM_SIZE_ALIGNED);
  516. LWIP_ASSERT("heap element next ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->next) == ptr_to_mem(mem->next)));
  517. /* check all elements before the end of the heap */
  518. for (mem = ptr_to_mem(mem->next);
  519. ((u8_t *)mem > ram) && (mem < ram_end);
  520. mem = ptr_to_mem(mem->next)) {
  521. LWIP_ASSERT("heap element aligned", LWIP_MEM_ALIGN(mem) == mem);
  522. LWIP_ASSERT("heap element prev ptr valid", mem->prev <= MEM_SIZE_ALIGNED);
  523. LWIP_ASSERT("heap element next ptr valid", mem->next <= MEM_SIZE_ALIGNED);
  524. LWIP_ASSERT("heap element prev ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->prev) == ptr_to_mem(mem->prev)));
  525. LWIP_ASSERT("heap element next ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->next) == ptr_to_mem(mem->next)));
  526. if (last_used == 0) {
  527. /* 2 unused elements in a row? */
  528. LWIP_ASSERT("heap element unused?", mem->used == 1);
  529. } else {
  530. LWIP_ASSERT("heap element unused member", (mem->used == 0) || (mem->used == 1));
  531. }
  532. LWIP_ASSERT("heap element link valid", mem_link_valid(mem));
  533. /* used/unused altering */
  534. last_used = mem->used;
  535. }
  536. LWIP_ASSERT("heap end ptr sanity", mem == ptr_to_mem(MEM_SIZE_ALIGNED));
  537. LWIP_ASSERT("heap element used valid", mem->used == 1);
  538. LWIP_ASSERT("heap element prev ptr valid", mem->prev == MEM_SIZE_ALIGNED);
  539. LWIP_ASSERT("heap element next ptr valid", mem->next == MEM_SIZE_ALIGNED);
  540. }
  541. #endif /* MEM_SANITY_CHECK */
  542. /**
  543. * Put a struct mem back on the heap
  544. *
  545. * @param rmem is the data portion of a struct mem as returned by a previous
  546. * call to mem_malloc()
  547. */
  548. void
  549. mem_free(void *rmem)
  550. {
  551. struct mem *mem;
  552. LWIP_MEM_FREE_DECL_PROTECT();
  553. if (rmem == NULL) {
  554. LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n"));
  555. return;
  556. }
  557. if ((((mem_ptr_t)rmem) & (MEM_ALIGNMENT - 1)) != 0) {
  558. LWIP_MEM_ILLEGAL_FREE("mem_free: sanity check alignment");
  559. LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: sanity check alignment\n"));
  560. /* protect mem stats from concurrent access */
  561. MEM_STATS_INC_LOCKED(illegal);
  562. return;
  563. }
  564. /* Get the corresponding struct mem: */
  565. /* cast through void* to get rid of alignment warnings */
  566. mem = (struct mem *)(void *)((u8_t *)rmem - (SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET));
  567. if ((u8_t *)mem < ram || (u8_t *)rmem + MIN_SIZE_ALIGNED > (u8_t *)ram_end) {
  568. LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory");
  569. LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n"));
  570. /* protect mem stats from concurrent access */
  571. MEM_STATS_INC_LOCKED(illegal);
  572. return;
  573. }
  574. #if MEM_OVERFLOW_CHECK
  575. mem_overflow_check_element(mem);
  576. #endif
  577. /* protect the heap from concurrent access */
  578. LWIP_MEM_FREE_PROTECT();
  579. /* mem has to be in a used state */
  580. if (!mem->used) {
  581. LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory: double free");
  582. LWIP_MEM_FREE_UNPROTECT();
  583. LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory: double free?\n"));
  584. /* protect mem stats from concurrent access */
  585. MEM_STATS_INC_LOCKED(illegal);
  586. return;
  587. }
  588. if (!mem_link_valid(mem)) {
  589. LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory: non-linked: double free");
  590. LWIP_MEM_FREE_UNPROTECT();
  591. LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory: non-linked: double free?\n"));
  592. /* protect mem stats from concurrent access */
  593. MEM_STATS_INC_LOCKED(illegal);
  594. return;
  595. }
  596. /* mem is now unused. */
  597. mem->used = 0;
  598. if (mem < lfree) {
  599. /* the newly freed struct is now the lowest */
  600. lfree = mem;
  601. }
  602. MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram)));
  603. /* finally, see if prev or next are free also */
  604. plug_holes(mem);
  605. MEM_SANITY();
  606. #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
  607. mem_free_count = 1;
  608. #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
  609. LWIP_MEM_FREE_UNPROTECT();
  610. }
  611. /**
  612. * Shrink memory returned by mem_malloc().
  613. *
  614. * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked
  615. * @param new_size required size after shrinking (needs to be smaller than or
  616. * equal to the previous size)
  617. * @return for compatibility reasons: is always == rmem, at the moment
  618. * or NULL if newsize is > old size, in which case rmem is NOT touched
  619. * or freed!
  620. */
  621. void *
  622. mem_trim(void *rmem, mem_size_t new_size)
  623. {
  624. mem_size_t size, newsize;
  625. mem_size_t ptr, ptr2;
  626. struct mem *mem, *mem2;
  627. /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */
  628. LWIP_MEM_FREE_DECL_PROTECT();
  629. /* Expand the size of the allocated memory region so that we can
  630. adjust for alignment. */
  631. newsize = (mem_size_t)LWIP_MEM_ALIGN_SIZE(new_size);
  632. if (newsize < MIN_SIZE_ALIGNED) {
  633. /* every data block must be at least MIN_SIZE_ALIGNED long */
  634. newsize = MIN_SIZE_ALIGNED;
  635. }
  636. #if MEM_OVERFLOW_CHECK
  637. newsize += MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED;
  638. #endif
  639. if ((newsize > MEM_SIZE_ALIGNED) || (newsize < new_size)) {
  640. return NULL;
  641. }
  642. LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
  643. (u8_t *)rmem < (u8_t *)ram_end);
  644. if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
  645. LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n"));
  646. /* protect mem stats from concurrent access */
  647. MEM_STATS_INC_LOCKED(illegal);
  648. return rmem;
  649. }
  650. /* Get the corresponding struct mem ... */
  651. /* cast through void* to get rid of alignment warnings */
  652. mem = (struct mem *)(void *)((u8_t *)rmem - (SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET));
  653. #if MEM_OVERFLOW_CHECK
  654. mem_overflow_check_element(mem);
  655. #endif
  656. /* ... and its offset pointer */
  657. ptr = mem_to_ptr(mem);
  658. size = (mem_size_t)((mem_size_t)(mem->next - ptr) - (SIZEOF_STRUCT_MEM + MEM_SANITY_OVERHEAD));
  659. LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size);
  660. if (newsize > size) {
  661. /* not supported */
  662. return NULL;
  663. }
  664. if (newsize == size) {
  665. /* No change in size, simply return */
  666. return rmem;
  667. }
  668. /* protect the heap from concurrent access */
  669. LWIP_MEM_FREE_PROTECT();
  670. mem2 = ptr_to_mem(mem->next);
  671. if (mem2->used == 0) {
  672. /* The next struct is unused, we can simply move it at little */
  673. mem_size_t next;
  674. LWIP_ASSERT("invalid next ptr", mem->next != MEM_SIZE_ALIGNED);
  675. /* remember the old next pointer */
  676. next = mem2->next;
  677. /* create new struct mem which is moved directly after the shrinked mem */
  678. ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize);
  679. if (lfree == mem2) {
  680. lfree = ptr_to_mem(ptr2);
  681. }
  682. mem2 = ptr_to_mem(ptr2);
  683. mem2->used = 0;
  684. /* restore the next pointer */
  685. mem2->next = next;
  686. /* link it back to mem */
  687. mem2->prev = ptr;
  688. /* link mem to it */
  689. mem->next = ptr2;
  690. /* last thing to restore linked list: as we have moved mem2,
  691. * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
  692. * the end of the heap */
  693. if (mem2->next != MEM_SIZE_ALIGNED) {
  694. ptr_to_mem(mem2->next)->prev = ptr2;
  695. }
  696. MEM_STATS_DEC_USED(used, (size - newsize));
  697. /* no need to plug holes, we've already done that */
  698. } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) {
  699. /* Next struct is used but there's room for another struct mem with
  700. * at least MIN_SIZE_ALIGNED of data.
  701. * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
  702. * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
  703. * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
  704. * region that couldn't hold data, but when mem->next gets freed,
  705. * the 2 regions would be combined, resulting in more free memory */
  706. ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize);
  707. LWIP_ASSERT("invalid next ptr", mem->next != MEM_SIZE_ALIGNED);
  708. mem2 = ptr_to_mem(ptr2);
  709. if (mem2 < lfree) {
  710. lfree = mem2;
  711. }
  712. mem2->used = 0;
  713. mem2->next = mem->next;
  714. mem2->prev = ptr;
  715. mem->next = ptr2;
  716. if (mem2->next != MEM_SIZE_ALIGNED) {
  717. ptr_to_mem(mem2->next)->prev = ptr2;
  718. }
  719. MEM_STATS_DEC_USED(used, (size - newsize));
  720. /* the original mem->next is used, so no need to plug holes! */
  721. }
  722. /* else {
  723. next struct mem is used but size between mem and mem2 is not big enough
  724. to create another struct mem
  725. -> don't do anyhting.
  726. -> the remaining space stays unused since it is too small
  727. } */
  728. #if MEM_OVERFLOW_CHECK
  729. mem_overflow_init_element(mem, new_size);
  730. #endif
  731. MEM_SANITY();
  732. #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
  733. mem_free_count = 1;
  734. #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
  735. LWIP_MEM_FREE_UNPROTECT();
  736. return rmem;
  737. }
  738. /**
  739. * Allocate a block of memory with a minimum of 'size' bytes.
  740. *
  741. * @param size_in is the minimum size of the requested block in bytes.
  742. * @return pointer to allocated memory or NULL if no free memory was found.
  743. *
  744. * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT).
  745. */
  746. void *
  747. mem_malloc(mem_size_t size_in)
  748. {
  749. mem_size_t ptr, ptr2, size;
  750. struct mem *mem, *mem2;
  751. #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
  752. u8_t local_mem_free_count = 0;
  753. #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
  754. LWIP_MEM_ALLOC_DECL_PROTECT();
  755. if (size_in == 0) {
  756. return NULL;
  757. }
  758. /* Expand the size of the allocated memory region so that we can
  759. adjust for alignment. */
  760. size = (mem_size_t)LWIP_MEM_ALIGN_SIZE(size_in);
  761. if (size < MIN_SIZE_ALIGNED) {
  762. /* every data block must be at least MIN_SIZE_ALIGNED long */
  763. size = MIN_SIZE_ALIGNED;
  764. }
  765. #if MEM_OVERFLOW_CHECK
  766. size += MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED;
  767. #endif
  768. if ((size > MEM_SIZE_ALIGNED) || (size < size_in)) {
  769. return NULL;
  770. }
  771. /* protect the heap from concurrent access */
  772. sys_mutex_lock(&mem_mutex);
  773. LWIP_MEM_ALLOC_PROTECT();
  774. #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
  775. /* run as long as a mem_free disturbed mem_malloc or mem_trim */
  776. do {
  777. local_mem_free_count = 0;
  778. #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
  779. /* Scan through the heap searching for a free block that is big enough,
  780. * beginning with the lowest free block.
  781. */
  782. for (ptr = mem_to_ptr(lfree); ptr < MEM_SIZE_ALIGNED - size;
  783. ptr = ptr_to_mem(ptr)->next) {
  784. mem = ptr_to_mem(ptr);
  785. #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
  786. mem_free_count = 0;
  787. LWIP_MEM_ALLOC_UNPROTECT();
  788. /* allow mem_free or mem_trim to run */
  789. LWIP_MEM_ALLOC_PROTECT();
  790. if (mem_free_count != 0) {
  791. /* If mem_free or mem_trim have run, we have to restart since they
  792. could have altered our current struct mem. */
  793. local_mem_free_count = 1;
  794. break;
  795. }
  796. #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
  797. if ((!mem->used) &&
  798. (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
  799. /* mem is not used and at least perfect fit is possible:
  800. * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
  801. if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) {
  802. /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
  803. * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
  804. * -> split large block, create empty remainder,
  805. * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
  806. * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
  807. * struct mem would fit in but no data between mem2 and mem2->next
  808. * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
  809. * region that couldn't hold data, but when mem->next gets freed,
  810. * the 2 regions would be combined, resulting in more free memory
  811. */
  812. ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + size);
  813. LWIP_ASSERT("invalid next ptr",ptr2 != MEM_SIZE_ALIGNED);
  814. /* create mem2 struct */
  815. mem2 = ptr_to_mem(ptr2);
  816. mem2->used = 0;
  817. mem2->next = mem->next;
  818. mem2->prev = ptr;
  819. /* and insert it between mem and mem->next */
  820. mem->next = ptr2;
  821. mem->used = 1;
  822. if (mem2->next != MEM_SIZE_ALIGNED) {
  823. ptr_to_mem(mem2->next)->prev = ptr2;
  824. }
  825. MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM));
  826. } else {
  827. /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
  828. * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
  829. * take care of this).
  830. * -> near fit or exact fit: do not split, no mem2 creation
  831. * also can't move mem->next directly behind mem, since mem->next
  832. * will always be used at this point!
  833. */
  834. mem->used = 1;
  835. MEM_STATS_INC_USED(used, mem->next - mem_to_ptr(mem));
  836. }
  837. #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
  838. mem_malloc_adjust_lfree:
  839. #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
  840. if (mem == lfree) {
  841. struct mem *cur = lfree;
  842. /* Find next free block after mem and update lowest free pointer */
  843. while (cur->used && cur != ram_end) {
  844. #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
  845. mem_free_count = 0;
  846. LWIP_MEM_ALLOC_UNPROTECT();
  847. /* prevent high interrupt latency... */
  848. LWIP_MEM_ALLOC_PROTECT();
  849. if (mem_free_count != 0) {
  850. /* If mem_free or mem_trim have run, we have to restart since they
  851. could have altered our current struct mem or lfree. */
  852. goto mem_malloc_adjust_lfree;
  853. }
  854. #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
  855. cur = ptr_to_mem(cur->next);
  856. }
  857. lfree = cur;
  858. LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used)));
  859. }
  860. LWIP_MEM_ALLOC_UNPROTECT();
  861. sys_mutex_unlock(&mem_mutex);
  862. LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
  863. (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end);
  864. LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
  865. ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0);
  866. LWIP_ASSERT("mem_malloc: sanity check alignment",
  867. (((mem_ptr_t)mem) & (MEM_ALIGNMENT - 1)) == 0);
  868. #if MEM_OVERFLOW_CHECK
  869. mem_overflow_init_element(mem, size_in);
  870. #endif
  871. MEM_SANITY();
  872. return (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET;
  873. }
  874. }
  875. #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
  876. /* if we got interrupted by a mem_free, try again */
  877. } while (local_mem_free_count != 0);
  878. #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
  879. MEM_STATS_INC(err);
  880. LWIP_MEM_ALLOC_UNPROTECT();
  881. sys_mutex_unlock(&mem_mutex);
  882. LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size));
  883. return NULL;
  884. }
  885. #endif /* MEM_USE_POOLS */
  886. #if MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS)
  887. void *
  888. mem_calloc(mem_size_t count, mem_size_t size)
  889. {
  890. return mem_clib_calloc(count, size);
  891. }
  892. #else /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */
  893. /**
  894. * Contiguously allocates enough space for count objects that are size bytes
  895. * of memory each and returns a pointer to the allocated memory.
  896. *
  897. * The allocated memory is filled with bytes of value zero.
  898. *
  899. * @param count number of objects to allocate
  900. * @param size size of the objects to allocate
  901. * @return pointer to allocated memory / NULL pointer if there is an error
  902. */
  903. void *
  904. mem_calloc(mem_size_t count, mem_size_t size)
  905. {
  906. void *p;
  907. size_t alloc_size = (size_t)count * (size_t)size;
  908. if ((size_t)(mem_size_t)alloc_size != alloc_size) {
  909. LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_calloc: could not allocate %"SZT_F" bytes\n", alloc_size));
  910. return NULL;
  911. }
  912. /* allocate 'count' objects of size 'size' */
  913. p = mem_malloc((mem_size_t)alloc_size);
  914. if (p) {
  915. /* zero the memory */
  916. memset(p, 0, alloc_size);
  917. }
  918. return p;
  919. }
  920. #endif /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */