mem.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2008-7-12 Bernard the first version
  9. * 2010-06-09 Bernard fix the end stub of heap
  10. * fix memory check in rt_realloc function
  11. * 2010-07-13 Bernard fix RT_ALIGN issue found by kuronca
  12. * 2010-10-14 Bernard fix rt_realloc issue when realloc a NULL pointer.
  13. * 2017-07-14 armink fix rt_realloc issue when new size is 0
  14. * 2018-10-02 Bernard Add 64bit support
  15. */
  16. /*
  17. * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
  18. * All rights reserved.
  19. *
  20. * Redistribution and use in source and binary forms, with or without modification,
  21. * are permitted provided that the following conditions are met:
  22. *
  23. * 1. Redistributions of source code must retain the above copyright notice,
  24. * this list of conditions and the following disclaimer.
  25. * 2. Redistributions in binary form must reproduce the above copyright notice,
  26. * this list of conditions and the following disclaimer in the documentation
  27. * and/or other materials provided with the distribution.
  28. * 3. The name of the author may not be used to endorse or promote products
  29. * derived from this software without specific prior written permission.
  30. *
  31. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
  32. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  33. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
  34. * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  35. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
  36. * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  37. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  38. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
  39. * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
  40. * OF SUCH DAMAGE.
  41. *
  42. * This file is part of the lwIP TCP/IP stack.
  43. *
  44. * Author: Adam Dunkels <adam@sics.se>
  45. * Simon Goldschmidt
  46. *
  47. */
  48. #include <rthw.h>
  49. #include <rtthread.h>
  50. #ifndef RT_USING_MEMHEAP_AS_HEAP
  51. #define RT_MEM_STATS
  52. #if defined (RT_USING_HEAP) && defined (RT_USING_SMALL_MEM)
  53. #ifdef RT_USING_HOOK
  54. static void (*rt_malloc_hook)(void *ptr, rt_size_t size);
  55. static void (*rt_free_hook)(void *ptr);
  56. /**
  57. * @addtogroup Hook
  58. */
  59. /**@{*/
  60. /**
  61. * This function will set a hook function, which will be invoked when a memory
  62. * block is allocated from heap memory.
  63. *
  64. * @param hook the hook function
  65. */
  66. void rt_malloc_sethook(void (*hook)(void *ptr, rt_size_t size))
  67. {
  68. rt_malloc_hook = hook;
  69. }
  70. /**
  71. * This function will set a hook function, which will be invoked when a memory
  72. * block is released to heap memory.
  73. *
  74. * @param hook the hook function
  75. */
  76. void rt_free_sethook(void (*hook)(void *ptr))
  77. {
  78. rt_free_hook = hook;
  79. }
  80. /**@}*/
  81. #endif /* RT_USING_HOOK */
  82. #define HEAP_MAGIC 0x1ea0
  83. struct heap_mem
  84. {
  85. /* magic and used flag */
  86. rt_uint16_t magic;
  87. rt_uint16_t used;
  88. #ifdef ARCH_CPU_64BIT
  89. rt_uint32_t resv;
  90. #endif /* ARCH_CPU_64BIT */
  91. rt_size_t next, prev;
  92. #ifdef RT_USING_MEMTRACE
  93. #ifdef ARCH_CPU_64BIT
  94. rt_uint8_t thread[8];
  95. #else
  96. rt_uint8_t thread[4]; /* thread name */
  97. #endif /* ARCH_CPU_64BIT */
  98. #endif /* RT_USING_MEMTRACE */
  99. };
  100. /** pointer to the heap: for alignment, heap_ptr is now a pointer instead of an array */
  101. static rt_uint8_t *heap_ptr;
  102. /** the last entry, always unused! */
  103. static struct heap_mem *heap_end;
  104. #ifdef ARCH_CPU_64BIT
  105. #define MIN_SIZE 24
  106. #else
  107. #define MIN_SIZE 12
  108. #endif /* ARCH_CPU_64BIT */
  109. #define MIN_SIZE_ALIGNED RT_ALIGN(MIN_SIZE, RT_ALIGN_SIZE)
  110. #define SIZEOF_STRUCT_MEM RT_ALIGN(sizeof(struct heap_mem), RT_ALIGN_SIZE)
  111. static struct heap_mem *lfree; /* pointer to the lowest free block */
  112. static struct rt_semaphore heap_sem;
  113. static rt_size_t mem_size_aligned;
  114. #ifdef RT_MEM_STATS
  115. static rt_size_t used_mem, max_mem;
  116. #endif /* RT_MEM_STATS */
  117. #ifdef RT_USING_MEMTRACE
  118. rt_inline void rt_mem_setname(struct heap_mem *mem, const char *name)
  119. {
  120. int index;
  121. for (index = 0; index < sizeof(mem->thread); index ++)
  122. {
  123. if (name[index] == '\0') break;
  124. mem->thread[index] = name[index];
  125. }
  126. for (; index < sizeof(mem->thread); index ++)
  127. {
  128. mem->thread[index] = ' ';
  129. }
  130. }
  131. #endif /* RT_USING_MEMTRACE */
  132. static void plug_holes(struct heap_mem *mem)
  133. {
  134. struct heap_mem *nmem;
  135. struct heap_mem *pmem;
  136. RT_ASSERT((rt_uint8_t *)mem >= heap_ptr);
  137. RT_ASSERT((rt_uint8_t *)mem < (rt_uint8_t *)heap_end);
  138. RT_ASSERT(mem->used == 0);
  139. /* plug hole forward */
  140. nmem = (struct heap_mem *)&heap_ptr[mem->next];
  141. if (mem != nmem &&
  142. nmem->used == 0 &&
  143. (rt_uint8_t *)nmem != (rt_uint8_t *)heap_end)
  144. {
  145. /* if mem->next is unused and not end of heap_ptr,
  146. * combine mem and mem->next
  147. */
  148. if (lfree == nmem)
  149. {
  150. lfree = mem;
  151. }
  152. mem->next = nmem->next;
  153. ((struct heap_mem *)&heap_ptr[nmem->next])->prev = (rt_uint8_t *)mem - heap_ptr;
  154. }
  155. /* plug hole backward */
  156. pmem = (struct heap_mem *)&heap_ptr[mem->prev];
  157. if (pmem != mem && pmem->used == 0)
  158. {
  159. /* if mem->prev is unused, combine mem and mem->prev */
  160. if (lfree == mem)
  161. {
  162. lfree = pmem;
  163. }
  164. pmem->next = mem->next;
  165. ((struct heap_mem *)&heap_ptr[mem->next])->prev = (rt_uint8_t *)pmem - heap_ptr;
  166. }
  167. }
  168. /**
  169. * @ingroup SystemInit
  170. *
  171. * This function will initialize system heap memory.
  172. *
  173. * @param begin_addr the beginning address of system heap memory.
  174. * @param end_addr the end address of system heap memory.
  175. */
  176. void rt_system_heap_init(void *begin_addr, void *end_addr)
  177. {
  178. struct heap_mem *mem;
  179. rt_ubase_t begin_align = RT_ALIGN((rt_ubase_t)begin_addr, RT_ALIGN_SIZE);
  180. rt_ubase_t end_align = RT_ALIGN_DOWN((rt_ubase_t)end_addr, RT_ALIGN_SIZE);
  181. RT_DEBUG_NOT_IN_INTERRUPT;
  182. /* alignment addr */
  183. if ((end_align > (2 * SIZEOF_STRUCT_MEM)) &&
  184. ((end_align - 2 * SIZEOF_STRUCT_MEM) >= begin_align))
  185. {
  186. /* calculate the aligned memory size */
  187. mem_size_aligned = end_align - begin_align - 2 * SIZEOF_STRUCT_MEM;
  188. }
  189. else
  190. {
  191. rt_kprintf("mem init, error begin address 0x%x, and end address 0x%x\n",
  192. (rt_ubase_t)begin_addr, (rt_ubase_t)end_addr);
  193. return;
  194. }
  195. /* point to begin address of heap */
  196. heap_ptr = (rt_uint8_t *)begin_align;
  197. RT_DEBUG_LOG(RT_DEBUG_MEM, ("mem init, heap begin address 0x%x, size %d\n",
  198. (rt_ubase_t)heap_ptr, mem_size_aligned));
  199. /* initialize the start of the heap */
  200. mem = (struct heap_mem *)heap_ptr;
  201. mem->magic = HEAP_MAGIC;
  202. mem->next = mem_size_aligned + SIZEOF_STRUCT_MEM;
  203. mem->prev = 0;
  204. mem->used = 0;
  205. #ifdef RT_USING_MEMTRACE
  206. rt_mem_setname(mem, "INIT");
  207. #endif /* RT_USING_MEMTRACE */
  208. /* initialize the end of the heap */
  209. heap_end = (struct heap_mem *)&heap_ptr[mem->next];
  210. heap_end->magic = HEAP_MAGIC;
  211. heap_end->used = 1;
  212. heap_end->next = mem_size_aligned + SIZEOF_STRUCT_MEM;
  213. heap_end->prev = mem_size_aligned + SIZEOF_STRUCT_MEM;
  214. #ifdef RT_USING_MEMTRACE
  215. rt_mem_setname(heap_end, "INIT");
  216. #endif /* RT_USING_MEMTRACE */
  217. rt_sem_init(&heap_sem, "heap", 1, RT_IPC_FLAG_PRIO);
  218. /* initialize the lowest-free pointer to the start of the heap */
  219. lfree = (struct heap_mem *)heap_ptr;
  220. }
  221. /**
  222. * @addtogroup MM
  223. */
  224. /**@{*/
  225. /**
  226. * Allocate a block of memory with a minimum of 'size' bytes.
  227. *
  228. * @param size is the minimum size of the requested block in bytes.
  229. *
  230. * @return pointer to allocated memory or NULL if no free memory was found.
  231. */
  232. void *rt_malloc(rt_size_t size)
  233. {
  234. rt_size_t ptr, ptr2;
  235. struct heap_mem *mem, *mem2;
  236. if (size == 0)
  237. return RT_NULL;
  238. RT_DEBUG_NOT_IN_INTERRUPT;
  239. if (size != RT_ALIGN(size, RT_ALIGN_SIZE))
  240. RT_DEBUG_LOG(RT_DEBUG_MEM, ("malloc size %d, but align to %d\n",
  241. size, RT_ALIGN(size, RT_ALIGN_SIZE)));
  242. else
  243. RT_DEBUG_LOG(RT_DEBUG_MEM, ("malloc size %d\n", size));
  244. /* alignment size */
  245. size = RT_ALIGN(size, RT_ALIGN_SIZE);
  246. if (size > mem_size_aligned)
  247. {
  248. RT_DEBUG_LOG(RT_DEBUG_MEM, ("no memory\n"));
  249. return RT_NULL;
  250. }
  251. /* every data block must be at least MIN_SIZE_ALIGNED long */
  252. if (size < MIN_SIZE_ALIGNED)
  253. size = MIN_SIZE_ALIGNED;
  254. /* take memory semaphore */
  255. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  256. for (ptr = (rt_uint8_t *)lfree - heap_ptr;
  257. ptr < mem_size_aligned - size;
  258. ptr = ((struct heap_mem *)&heap_ptr[ptr])->next)
  259. {
  260. mem = (struct heap_mem *)&heap_ptr[ptr];
  261. if ((!mem->used) && (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size)
  262. {
  263. /* mem is not used and at least perfect fit is possible:
  264. * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
  265. if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >=
  266. (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED))
  267. {
  268. /* (in addition to the above, we test if another struct heap_mem (SIZEOF_STRUCT_MEM) containing
  269. * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
  270. * -> split large block, create empty remainder,
  271. * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
  272. * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
  273. * struct heap_mem would fit in but no data between mem2 and mem2->next
  274. * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
  275. * region that couldn't hold data, but when mem->next gets freed,
  276. * the 2 regions would be combined, resulting in more free memory
  277. */
  278. ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
  279. /* create mem2 struct */
  280. mem2 = (struct heap_mem *)&heap_ptr[ptr2];
  281. mem2->magic = HEAP_MAGIC;
  282. mem2->used = 0;
  283. mem2->next = mem->next;
  284. mem2->prev = ptr;
  285. #ifdef RT_USING_MEMTRACE
  286. rt_mem_setname(mem2, " ");
  287. #endif /* RT_USING_MEMTRACE */
  288. /* and insert it between mem and mem->next */
  289. mem->next = ptr2;
  290. mem->used = 1;
  291. if (mem2->next != mem_size_aligned + SIZEOF_STRUCT_MEM)
  292. {
  293. ((struct heap_mem *)&heap_ptr[mem2->next])->prev = ptr2;
  294. }
  295. #ifdef RT_MEM_STATS
  296. used_mem += (size + SIZEOF_STRUCT_MEM);
  297. if (max_mem < used_mem)
  298. max_mem = used_mem;
  299. #endif /* RT_MEM_STATS */
  300. }
  301. else
  302. {
  303. /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
  304. * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
  305. * take care of this).
  306. * -> near fit or excact fit: do not split, no mem2 creation
  307. * also can't move mem->next directly behind mem, since mem->next
  308. * will always be used at this point!
  309. */
  310. mem->used = 1;
  311. #ifdef RT_MEM_STATS
  312. used_mem += mem->next - ((rt_uint8_t *)mem - heap_ptr);
  313. if (max_mem < used_mem)
  314. max_mem = used_mem;
  315. #endif /* RT_MEM_STATS */
  316. }
  317. /* set memory block magic */
  318. mem->magic = HEAP_MAGIC;
  319. #ifdef RT_USING_MEMTRACE
  320. if (rt_thread_self())
  321. rt_mem_setname(mem, rt_thread_self()->name);
  322. else
  323. rt_mem_setname(mem, "NONE");
  324. #endif /* RT_USING_MEMTRACE */
  325. if (mem == lfree)
  326. {
  327. /* Find next free block after mem and update lowest free pointer */
  328. while (lfree->used && lfree != heap_end)
  329. lfree = (struct heap_mem *)&heap_ptr[lfree->next];
  330. RT_ASSERT(((lfree == heap_end) || (!lfree->used)));
  331. }
  332. rt_sem_release(&heap_sem);
  333. RT_ASSERT((rt_ubase_t)mem + SIZEOF_STRUCT_MEM + size <= (rt_ubase_t)heap_end);
  334. RT_ASSERT((rt_ubase_t)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM) % RT_ALIGN_SIZE == 0);
  335. RT_ASSERT((((rt_ubase_t)mem) & (RT_ALIGN_SIZE - 1)) == 0);
  336. RT_DEBUG_LOG(RT_DEBUG_MEM,
  337. ("allocate memory at 0x%x, size: %d\n",
  338. (rt_ubase_t)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM),
  339. (rt_ubase_t)(mem->next - ((rt_uint8_t *)mem - heap_ptr))));
  340. RT_OBJECT_HOOK_CALL(rt_malloc_hook,
  341. (((void *)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM)), size));
  342. /* return the memory data except mem struct */
  343. return (rt_uint8_t *)mem + SIZEOF_STRUCT_MEM;
  344. }
  345. }
  346. rt_sem_release(&heap_sem);
  347. return RT_NULL;
  348. }
  349. RTM_EXPORT(rt_malloc);
  350. /**
  351. * This function will change the previously allocated memory block.
  352. *
  353. * @param rmem pointer to memory allocated by rt_malloc
  354. * @param newsize the required new size
  355. *
  356. * @return the changed memory block address
  357. */
  358. void *rt_realloc(void *rmem, rt_size_t newsize)
  359. {
  360. rt_size_t size;
  361. rt_size_t ptr, ptr2;
  362. struct heap_mem *mem, *mem2;
  363. void *nmem;
  364. RT_DEBUG_NOT_IN_INTERRUPT;
  365. /* alignment size */
  366. newsize = RT_ALIGN(newsize, RT_ALIGN_SIZE);
  367. if (newsize > mem_size_aligned)
  368. {
  369. RT_DEBUG_LOG(RT_DEBUG_MEM, ("realloc: out of memory\n"));
  370. return RT_NULL;
  371. }
  372. else if (newsize == 0)
  373. {
  374. rt_free(rmem);
  375. return RT_NULL;
  376. }
  377. /* allocate a new memory block */
  378. if (rmem == RT_NULL)
  379. return rt_malloc(newsize);
  380. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  381. if ((rt_uint8_t *)rmem < (rt_uint8_t *)heap_ptr ||
  382. (rt_uint8_t *)rmem >= (rt_uint8_t *)heap_end)
  383. {
  384. /* illegal memory */
  385. rt_sem_release(&heap_sem);
  386. return rmem;
  387. }
  388. mem = (struct heap_mem *)((rt_uint8_t *)rmem - SIZEOF_STRUCT_MEM);
  389. ptr = (rt_uint8_t *)mem - heap_ptr;
  390. size = mem->next - ptr - SIZEOF_STRUCT_MEM;
  391. if (size == newsize)
  392. {
  393. /* the size is the same as */
  394. rt_sem_release(&heap_sem);
  395. return rmem;
  396. }
  397. if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE < size)
  398. {
  399. /* split memory block */
  400. #ifdef RT_MEM_STATS
  401. used_mem -= (size - newsize);
  402. #endif /* RT_MEM_STATS */
  403. ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
  404. mem2 = (struct heap_mem *)&heap_ptr[ptr2];
  405. mem2->magic = HEAP_MAGIC;
  406. mem2->used = 0;
  407. mem2->next = mem->next;
  408. mem2->prev = ptr;
  409. #ifdef RT_USING_MEMTRACE
  410. rt_mem_setname(mem2, " ");
  411. #endif /* RT_USING_MEMTRACE */
  412. mem->next = ptr2;
  413. if (mem2->next != mem_size_aligned + SIZEOF_STRUCT_MEM)
  414. {
  415. ((struct heap_mem *)&heap_ptr[mem2->next])->prev = ptr2;
  416. }
  417. if (mem2 < lfree)
  418. {
  419. /* the splited struct is now the lowest */
  420. lfree = mem2;
  421. }
  422. plug_holes(mem2);
  423. rt_sem_release(&heap_sem);
  424. return rmem;
  425. }
  426. rt_sem_release(&heap_sem);
  427. /* expand memory */
  428. nmem = rt_malloc(newsize);
  429. if (nmem != RT_NULL) /* check memory */
  430. {
  431. rt_memcpy(nmem, rmem, size < newsize ? size : newsize);
  432. rt_free(rmem);
  433. }
  434. return nmem;
  435. }
  436. RTM_EXPORT(rt_realloc);
  437. /**
  438. * This function will contiguously allocate enough space for count objects
  439. * that are size bytes of memory each and returns a pointer to the allocated
  440. * memory.
  441. *
  442. * The allocated memory is filled with bytes of value zero.
  443. *
  444. * @param count number of objects to allocate
  445. * @param size size of the objects to allocate
  446. *
  447. * @return pointer to allocated memory / NULL pointer if there is an error
  448. */
  449. void *rt_calloc(rt_size_t count, rt_size_t size)
  450. {
  451. void *p;
  452. /* allocate 'count' objects of size 'size' */
  453. p = rt_malloc(count * size);
  454. /* zero the memory */
  455. if (p)
  456. rt_memset(p, 0, count * size);
  457. return p;
  458. }
  459. RTM_EXPORT(rt_calloc);
  460. /**
  461. * This function will release the previously allocated memory block by
  462. * rt_malloc. The released memory block is taken back to system heap.
  463. *
  464. * @param rmem the address of memory which will be released
  465. */
  466. void rt_free(void *rmem)
  467. {
  468. struct heap_mem *mem;
  469. if (rmem == RT_NULL)
  470. return;
  471. RT_DEBUG_NOT_IN_INTERRUPT;
  472. RT_ASSERT((((rt_ubase_t)rmem) & (RT_ALIGN_SIZE - 1)) == 0);
  473. RT_ASSERT((rt_uint8_t *)rmem >= (rt_uint8_t *)heap_ptr &&
  474. (rt_uint8_t *)rmem < (rt_uint8_t *)heap_end);
  475. RT_OBJECT_HOOK_CALL(rt_free_hook, (rmem));
  476. if ((rt_uint8_t *)rmem < (rt_uint8_t *)heap_ptr ||
  477. (rt_uint8_t *)rmem >= (rt_uint8_t *)heap_end)
  478. {
  479. RT_DEBUG_LOG(RT_DEBUG_MEM, ("illegal memory\n"));
  480. return;
  481. }
  482. /* Get the corresponding struct heap_mem ... */
  483. mem = (struct heap_mem *)((rt_uint8_t *)rmem - SIZEOF_STRUCT_MEM);
  484. RT_DEBUG_LOG(RT_DEBUG_MEM,
  485. ("release memory 0x%x, size: %d\n",
  486. (rt_ubase_t)rmem,
  487. (rt_ubase_t)(mem->next - ((rt_uint8_t *)mem - heap_ptr))));
  488. /* protect the heap from concurrent access */
  489. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  490. /* ... which has to be in a used state ... */
  491. if (!mem->used || mem->magic != HEAP_MAGIC)
  492. {
  493. rt_kprintf("to free a bad data block:\n");
  494. rt_kprintf("mem: 0x%08x, used flag: %d, magic code: 0x%04x\n", mem, mem->used, mem->magic);
  495. }
  496. RT_ASSERT(mem->used);
  497. RT_ASSERT(mem->magic == HEAP_MAGIC);
  498. /* ... and is now unused. */
  499. mem->used = 0;
  500. mem->magic = HEAP_MAGIC;
  501. #ifdef RT_USING_MEMTRACE
  502. rt_mem_setname(mem, " ");
  503. #endif /* RT_USING_MEMTRACE */
  504. if (mem < lfree)
  505. {
  506. /* the newly freed struct is now the lowest */
  507. lfree = mem;
  508. }
  509. #ifdef RT_MEM_STATS
  510. used_mem -= (mem->next - ((rt_uint8_t *)mem - heap_ptr));
  511. #endif /* RT_MEM_STATS */
  512. /* finally, see if prev or next are free also */
  513. plug_holes(mem);
  514. rt_sem_release(&heap_sem);
  515. }
  516. RTM_EXPORT(rt_free);
  517. #ifdef RT_MEM_STATS
  518. void rt_memory_info(rt_uint32_t *total,
  519. rt_uint32_t *used,
  520. rt_uint32_t *max_used)
  521. {
  522. if (total != RT_NULL)
  523. *total = mem_size_aligned;
  524. if (used != RT_NULL)
  525. *used = used_mem;
  526. if (max_used != RT_NULL)
  527. *max_used = max_mem;
  528. }
  529. #ifdef RT_USING_FINSH
  530. #include <finsh.h>
  531. void list_mem(void)
  532. {
  533. rt_kprintf("total memory: %d\n", mem_size_aligned);
  534. rt_kprintf("used memory : %d\n", used_mem);
  535. rt_kprintf("maximum allocated memory: %d\n", max_mem);
  536. }
  537. FINSH_FUNCTION_EXPORT(list_mem, list memory usage information)
  538. #ifdef RT_USING_MEMTRACE
  539. int memcheck(void)
  540. {
  541. int position;
  542. rt_ubase_t level;
  543. struct heap_mem *mem;
  544. level = rt_hw_interrupt_disable();
  545. for (mem = (struct heap_mem *)heap_ptr; mem != heap_end; mem = (struct heap_mem *)&heap_ptr[mem->next])
  546. {
  547. position = (rt_ubase_t)mem - (rt_ubase_t)heap_ptr;
  548. if (position < 0) goto __exit;
  549. if (position > (int)mem_size_aligned) goto __exit;
  550. if (mem->magic != HEAP_MAGIC) goto __exit;
  551. if (mem->used != 0 && mem->used != 1) goto __exit;
  552. }
  553. rt_hw_interrupt_enable(level);
  554. return 0;
  555. __exit:
  556. rt_kprintf("Memory block wrong:\n");
  557. rt_kprintf("address: 0x%08x\n", mem);
  558. rt_kprintf(" magic: 0x%04x\n", mem->magic);
  559. rt_kprintf(" used: %d\n", mem->used);
  560. rt_kprintf(" size: %d\n", mem->next - position - SIZEOF_STRUCT_MEM);
  561. rt_hw_interrupt_enable(level);
  562. return 0;
  563. }
  564. MSH_CMD_EXPORT(memcheck, check memory data);
  565. int memtrace(int argc, char **argv)
  566. {
  567. struct heap_mem *mem;
  568. list_mem();
  569. rt_kprintf("\nmemory heap address:\n");
  570. rt_kprintf("heap_ptr: 0x%08x\n", heap_ptr);
  571. rt_kprintf("lfree : 0x%08x\n", lfree);
  572. rt_kprintf("heap_end: 0x%08x\n", heap_end);
  573. rt_kprintf("\n--memory item information --\n");
  574. for (mem = (struct heap_mem *)heap_ptr; mem != heap_end; mem = (struct heap_mem *)&heap_ptr[mem->next])
  575. {
  576. int position = (rt_ubase_t)mem - (rt_ubase_t)heap_ptr;
  577. int size;
  578. rt_kprintf("[0x%08x - ", mem);
  579. size = mem->next - position - SIZEOF_STRUCT_MEM;
  580. if (size < 1024)
  581. rt_kprintf("%5d", size);
  582. else if (size < 1024 * 1024)
  583. rt_kprintf("%4dK", size / 1024);
  584. else
  585. rt_kprintf("%4dM", size / (1024 * 1024));
  586. rt_kprintf("] %c%c%c%c", mem->thread[0], mem->thread[1], mem->thread[2], mem->thread[3]);
  587. if (mem->magic != HEAP_MAGIC)
  588. rt_kprintf(": ***\n");
  589. else
  590. rt_kprintf("\n");
  591. }
  592. return 0;
  593. }
  594. MSH_CMD_EXPORT(memtrace, dump memory trace information);
  595. #endif /* RT_USING_MEMTRACE */
  596. #endif /* RT_USING_FINSH */
  597. #endif /* defined (RT_USING_HEAP) && defined (RT_USING_SMALL_MEM) */
  598. /**@}*/
  599. #endif /* RT_MEM_STATS */
  600. #endif /* RT_USING_MEMHEAP_AS_HEAP */