heap_4.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638
  1. /*
  2. * FreeRTOS Kernel <DEVELOPMENT BRANCH>
  3. * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
  4. *
  5. * SPDX-License-Identifier: MIT
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a copy of
  8. * this software and associated documentation files (the "Software"), to deal in
  9. * the Software without restriction, including without limitation the rights to
  10. * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
  11. * the Software, and to permit persons to whom the Software is furnished to do so,
  12. * subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in all
  15. * copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
  19. * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
  20. * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
  21. * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  22. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * https://www.FreeRTOS.org
  25. * https://github.com/FreeRTOS
  26. *
  27. */
  28. /*
  29. * A sample implementation of pvPortMalloc() and vPortFree() that combines
  30. * (coalescences) adjacent memory blocks as they are freed, and in so doing
  31. * limits memory fragmentation.
  32. *
  33. * See heap_1.c, heap_2.c and heap_3.c for alternative implementations, and the
  34. * memory management pages of https://www.FreeRTOS.org for more information.
  35. */
  36. #include <stdlib.h>
  37. #include <string.h>
  38. /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
  39. * all the API functions to use the MPU wrappers. That should only be done when
  40. * task.h is included from an application file. */
  41. #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
  42. #include "FreeRTOS.h"
  43. #include "task.h"
  44. #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
  45. #if ( configSUPPORT_DYNAMIC_ALLOCATION == 0 )
  46. #error This file must not be used if configSUPPORT_DYNAMIC_ALLOCATION is 0
  47. #endif
  48. #ifndef configHEAP_CLEAR_MEMORY_ON_FREE
  49. #define configHEAP_CLEAR_MEMORY_ON_FREE 0
  50. #endif
  51. /* Block sizes must not get too small. */
  52. #define heapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) )
  53. /* Assumes 8bit bytes! */
  54. #define heapBITS_PER_BYTE ( ( size_t ) 8 )
  55. /* Max value that fits in a size_t type. */
  56. #define heapSIZE_MAX ( ~( ( size_t ) 0 ) )
  57. /* Check if multiplying a and b will result in overflow. */
  58. #define heapMULTIPLY_WILL_OVERFLOW( a, b ) ( ( ( a ) > 0 ) && ( ( b ) > ( heapSIZE_MAX / ( a ) ) ) )
  59. /* Check if adding a and b will result in overflow. */
  60. #define heapADD_WILL_OVERFLOW( a, b ) ( ( a ) > ( heapSIZE_MAX - ( b ) ) )
  61. /* Check if the subtraction operation ( a - b ) will result in underflow. */
  62. #define heapSUBTRACT_WILL_UNDERFLOW( a, b ) ( ( a ) < ( b ) )
  63. /* MSB of the xBlockSize member of an BlockLink_t structure is used to track
  64. * the allocation status of a block. When MSB of the xBlockSize member of
  65. * an BlockLink_t structure is set then the block belongs to the application.
  66. * When the bit is free the block is still part of the free heap space. */
  67. #define heapBLOCK_ALLOCATED_BITMASK ( ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * heapBITS_PER_BYTE ) - 1 ) )
  68. #define heapBLOCK_SIZE_IS_VALID( xBlockSize ) ( ( ( xBlockSize ) & heapBLOCK_ALLOCATED_BITMASK ) == 0 )
  69. #define heapBLOCK_IS_ALLOCATED( pxBlock ) ( ( ( pxBlock->xBlockSize ) & heapBLOCK_ALLOCATED_BITMASK ) != 0 )
  70. #define heapALLOCATE_BLOCK( pxBlock ) ( ( pxBlock->xBlockSize ) |= heapBLOCK_ALLOCATED_BITMASK )
  71. #define heapFREE_BLOCK( pxBlock ) ( ( pxBlock->xBlockSize ) &= ~heapBLOCK_ALLOCATED_BITMASK )
  72. /*-----------------------------------------------------------*/
  73. /* Allocate the memory for the heap. */
  74. #if ( configAPPLICATION_ALLOCATED_HEAP == 1 )
  75. /* The application writer has already defined the array used for the RTOS
  76. * heap - probably so it can be placed in a special segment or address. */
  77. extern uint8_t ucHeap[ configTOTAL_HEAP_SIZE ];
  78. #else
  79. PRIVILEGED_DATA static uint8_t ucHeap[ configTOTAL_HEAP_SIZE ];
  80. #endif /* configAPPLICATION_ALLOCATED_HEAP */
  81. /* Define the linked list structure. This is used to link free blocks in order
  82. * of their memory address. */
  83. typedef struct A_BLOCK_LINK
  84. {
  85. struct A_BLOCK_LINK * pxNextFreeBlock; /**< The next free block in the list. */
  86. size_t xBlockSize; /**< The size of the free block. */
  87. } BlockLink_t;
  88. /* Setting configENABLE_HEAP_PROTECTOR to 1 enables heap block pointers
  89. * protection using an application supplied canary value to catch heap
  90. * corruption should a heap buffer overflow occur.
  91. */
  92. #if ( configENABLE_HEAP_PROTECTOR == 1 )
  93. /**
  94. * @brief Application provided function to get a random value to be used as canary.
  95. *
  96. * @param pxHeapCanary [out] Output parameter to return the canary value.
  97. */
  98. extern void vApplicationGetRandomHeapCanary( portPOINTER_SIZE_TYPE * pxHeapCanary );
  99. /* Canary value for protecting internal heap pointers. */
  100. PRIVILEGED_DATA static portPOINTER_SIZE_TYPE xHeapCanary;
  101. /* Macro to load/store BlockLink_t pointers to memory. By XORing the
  102. * pointers with a random canary value, heap overflows will result
  103. * in randomly unpredictable pointer values which will be caught by
  104. * heapVALIDATE_BLOCK_POINTER assert. */
  105. #define heapPROTECT_BLOCK_POINTER( pxBlock ) ( ( BlockLink_t * ) ( ( ( portPOINTER_SIZE_TYPE ) ( pxBlock ) ) ^ xHeapCanary ) )
  106. #else
  107. #define heapPROTECT_BLOCK_POINTER( pxBlock ) ( pxBlock )
  108. #endif /* configENABLE_HEAP_PROTECTOR */
  109. /* Assert that a heap block pointer is within the heap bounds. */
  110. #define heapVALIDATE_BLOCK_POINTER( pxBlock ) \
  111. configASSERT( ( ( uint8_t * ) ( pxBlock ) >= &( ucHeap[ 0 ] ) ) && \
  112. ( ( uint8_t * ) ( pxBlock ) <= &( ucHeap[ configTOTAL_HEAP_SIZE - 1 ] ) ) )
  113. /*-----------------------------------------------------------*/
  114. /*
  115. * Inserts a block of memory that is being freed into the correct position in
  116. * the list of free memory blocks. The block being freed will be merged with
  117. * the block in front it and/or the block behind it if the memory blocks are
  118. * adjacent to each other.
  119. */
  120. static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) PRIVILEGED_FUNCTION;
  121. /*
  122. * Called automatically to setup the required heap structures the first time
  123. * pvPortMalloc() is called.
  124. */
  125. static void prvHeapInit( void ) PRIVILEGED_FUNCTION;
  126. /*-----------------------------------------------------------*/
  127. /* The size of the structure placed at the beginning of each allocated memory
  128. * block must by correctly byte aligned. */
  129. static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( portBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) portBYTE_ALIGNMENT_MASK );
  130. /* Create a couple of list links to mark the start and end of the list. */
  131. PRIVILEGED_DATA static BlockLink_t xStart;
  132. PRIVILEGED_DATA static BlockLink_t * pxEnd = NULL;
  133. /* Keeps track of the number of calls to allocate and free memory as well as the
  134. * number of free bytes remaining, but says nothing about fragmentation. */
  135. PRIVILEGED_DATA static size_t xFreeBytesRemaining = ( size_t ) 0U;
  136. PRIVILEGED_DATA static size_t xMinimumEverFreeBytesRemaining = ( size_t ) 0U;
  137. PRIVILEGED_DATA static size_t xNumberOfSuccessfulAllocations = ( size_t ) 0U;
  138. PRIVILEGED_DATA static size_t xNumberOfSuccessfulFrees = ( size_t ) 0U;
  139. /*-----------------------------------------------------------*/
  140. void * pvPortMalloc( size_t xWantedSize )
  141. {
  142. BlockLink_t * pxBlock;
  143. BlockLink_t * pxPreviousBlock;
  144. BlockLink_t * pxNewBlockLink;
  145. void * pvReturn = NULL;
  146. size_t xAdditionalRequiredSize;
  147. size_t xAllocatedBlockSize = 0;
  148. if( xWantedSize > 0 )
  149. {
  150. /* The wanted size must be increased so it can contain a BlockLink_t
  151. * structure in addition to the requested amount of bytes. */
  152. if( heapADD_WILL_OVERFLOW( xWantedSize, xHeapStructSize ) == 0 )
  153. {
  154. xWantedSize += xHeapStructSize;
  155. /* Ensure that blocks are always aligned to the required number
  156. * of bytes. */
  157. if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 )
  158. {
  159. /* Byte alignment required. */
  160. xAdditionalRequiredSize = portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK );
  161. if( heapADD_WILL_OVERFLOW( xWantedSize, xAdditionalRequiredSize ) == 0 )
  162. {
  163. xWantedSize += xAdditionalRequiredSize;
  164. }
  165. else
  166. {
  167. xWantedSize = 0;
  168. }
  169. }
  170. else
  171. {
  172. mtCOVERAGE_TEST_MARKER();
  173. }
  174. }
  175. else
  176. {
  177. xWantedSize = 0;
  178. }
  179. }
  180. else
  181. {
  182. mtCOVERAGE_TEST_MARKER();
  183. }
  184. vTaskSuspendAll();
  185. {
  186. /* If this is the first call to malloc then the heap will require
  187. * initialisation to setup the list of free blocks. */
  188. if( pxEnd == NULL )
  189. {
  190. prvHeapInit();
  191. }
  192. else
  193. {
  194. mtCOVERAGE_TEST_MARKER();
  195. }
  196. /* Check the block size we are trying to allocate is not so large that the
  197. * top bit is set. The top bit of the block size member of the BlockLink_t
  198. * structure is used to determine who owns the block - the application or
  199. * the kernel, so it must be free. */
  200. if( heapBLOCK_SIZE_IS_VALID( xWantedSize ) != 0 )
  201. {
  202. if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) )
  203. {
  204. /* Traverse the list from the start (lowest address) block until
  205. * one of adequate size is found. */
  206. pxPreviousBlock = &xStart;
  207. pxBlock = heapPROTECT_BLOCK_POINTER( xStart.pxNextFreeBlock );
  208. heapVALIDATE_BLOCK_POINTER( pxBlock );
  209. while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) )
  210. {
  211. pxPreviousBlock = pxBlock;
  212. pxBlock = heapPROTECT_BLOCK_POINTER( pxBlock->pxNextFreeBlock );
  213. heapVALIDATE_BLOCK_POINTER( pxBlock );
  214. }
  215. /* If the end marker was reached then a block of adequate size
  216. * was not found. */
  217. if( pxBlock != pxEnd )
  218. {
  219. /* Return the memory space pointed to - jumping over the
  220. * BlockLink_t structure at its start. */
  221. pvReturn = ( void * ) ( ( ( uint8_t * ) heapPROTECT_BLOCK_POINTER( pxPreviousBlock->pxNextFreeBlock ) ) + xHeapStructSize );
  222. heapVALIDATE_BLOCK_POINTER( pvReturn );
  223. /* This block is being returned for use so must be taken out
  224. * of the list of free blocks. */
  225. pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;
  226. /* If the block is larger than required it can be split into
  227. * two. */
  228. configASSERT( heapSUBTRACT_WILL_UNDERFLOW( pxBlock->xBlockSize, xWantedSize ) == 0 );
  229. if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE )
  230. {
  231. /* This block is to be split into two. Create a new
  232. * block following the number of bytes requested. The void
  233. * cast is used to prevent byte alignment warnings from the
  234. * compiler. */
  235. pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize );
  236. configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
  237. /* Calculate the sizes of two blocks split from the
  238. * single block. */
  239. pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
  240. pxBlock->xBlockSize = xWantedSize;
  241. /* Insert the new block into the list of free blocks. */
  242. pxNewBlockLink->pxNextFreeBlock = pxPreviousBlock->pxNextFreeBlock;
  243. pxPreviousBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( pxNewBlockLink );
  244. }
  245. else
  246. {
  247. mtCOVERAGE_TEST_MARKER();
  248. }
  249. xFreeBytesRemaining -= pxBlock->xBlockSize;
  250. if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining )
  251. {
  252. xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
  253. }
  254. else
  255. {
  256. mtCOVERAGE_TEST_MARKER();
  257. }
  258. xAllocatedBlockSize = pxBlock->xBlockSize;
  259. /* The block is being returned - it is allocated and owned
  260. * by the application and has no "next" block. */
  261. heapALLOCATE_BLOCK( pxBlock );
  262. pxBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL );
  263. xNumberOfSuccessfulAllocations++;
  264. }
  265. else
  266. {
  267. mtCOVERAGE_TEST_MARKER();
  268. }
  269. }
  270. else
  271. {
  272. mtCOVERAGE_TEST_MARKER();
  273. }
  274. }
  275. else
  276. {
  277. mtCOVERAGE_TEST_MARKER();
  278. }
  279. traceMALLOC( pvReturn, xAllocatedBlockSize );
  280. /* Prevent compiler warnings when trace macros are not used. */
  281. ( void ) xAllocatedBlockSize;
  282. }
  283. ( void ) xTaskResumeAll();
  284. #if ( configUSE_MALLOC_FAILED_HOOK == 1 )
  285. {
  286. if( pvReturn == NULL )
  287. {
  288. vApplicationMallocFailedHook();
  289. }
  290. else
  291. {
  292. mtCOVERAGE_TEST_MARKER();
  293. }
  294. }
  295. #endif /* if ( configUSE_MALLOC_FAILED_HOOK == 1 ) */
  296. configASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) portBYTE_ALIGNMENT_MASK ) == 0 );
  297. return pvReturn;
  298. }
  299. /*-----------------------------------------------------------*/
  300. void vPortFree( void * pv )
  301. {
  302. uint8_t * puc = ( uint8_t * ) pv;
  303. BlockLink_t * pxLink;
  304. if( pv != NULL )
  305. {
  306. /* The memory being freed will have an BlockLink_t structure immediately
  307. * before it. */
  308. puc -= xHeapStructSize;
  309. /* This casting is to keep the compiler from issuing warnings. */
  310. pxLink = ( void * ) puc;
  311. heapVALIDATE_BLOCK_POINTER( pxLink );
  312. configASSERT( heapBLOCK_IS_ALLOCATED( pxLink ) != 0 );
  313. configASSERT( pxLink->pxNextFreeBlock == heapPROTECT_BLOCK_POINTER( NULL ) );
  314. if( heapBLOCK_IS_ALLOCATED( pxLink ) != 0 )
  315. {
  316. if( pxLink->pxNextFreeBlock == heapPROTECT_BLOCK_POINTER( NULL ) )
  317. {
  318. /* The block is being returned to the heap - it is no longer
  319. * allocated. */
  320. heapFREE_BLOCK( pxLink );
  321. #if ( configHEAP_CLEAR_MEMORY_ON_FREE == 1 )
  322. {
  323. /* Check for underflow as this can occur if xBlockSize is
  324. * overwritten in a heap block. */
  325. if( heapSUBTRACT_WILL_UNDERFLOW( pxLink->xBlockSize, xHeapStructSize ) == 0 )
  326. {
  327. ( void ) memset( puc + xHeapStructSize, 0, pxLink->xBlockSize - xHeapStructSize );
  328. }
  329. }
  330. #endif
  331. vTaskSuspendAll();
  332. {
  333. /* Add this block to the list of free blocks. */
  334. xFreeBytesRemaining += pxLink->xBlockSize;
  335. traceFREE( pv, pxLink->xBlockSize );
  336. prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) );
  337. xNumberOfSuccessfulFrees++;
  338. }
  339. ( void ) xTaskResumeAll();
  340. }
  341. else
  342. {
  343. mtCOVERAGE_TEST_MARKER();
  344. }
  345. }
  346. else
  347. {
  348. mtCOVERAGE_TEST_MARKER();
  349. }
  350. }
  351. }
  352. /*-----------------------------------------------------------*/
  353. size_t xPortGetFreeHeapSize( void )
  354. {
  355. return xFreeBytesRemaining;
  356. }
  357. /*-----------------------------------------------------------*/
  358. size_t xPortGetMinimumEverFreeHeapSize( void )
  359. {
  360. return xMinimumEverFreeBytesRemaining;
  361. }
  362. /*-----------------------------------------------------------*/
  363. void xPortResetHeapMinimumEverFreeHeapSize( void )
  364. {
  365. xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
  366. }
  367. /*-----------------------------------------------------------*/
  368. void vPortInitialiseBlocks( void )
  369. {
  370. /* This just exists to keep the linker quiet. */
  371. }
  372. /*-----------------------------------------------------------*/
  373. void * pvPortCalloc( size_t xNum,
  374. size_t xSize )
  375. {
  376. void * pv = NULL;
  377. if( heapMULTIPLY_WILL_OVERFLOW( xNum, xSize ) == 0 )
  378. {
  379. pv = pvPortMalloc( xNum * xSize );
  380. if( pv != NULL )
  381. {
  382. ( void ) memset( pv, 0, xNum * xSize );
  383. }
  384. }
  385. return pv;
  386. }
  387. /*-----------------------------------------------------------*/
  388. static void prvHeapInit( void ) /* PRIVILEGED_FUNCTION */
  389. {
  390. BlockLink_t * pxFirstFreeBlock;
  391. portPOINTER_SIZE_TYPE uxStartAddress, uxEndAddress;
  392. size_t xTotalHeapSize = configTOTAL_HEAP_SIZE;
  393. /* Ensure the heap starts on a correctly aligned boundary. */
  394. uxStartAddress = ( portPOINTER_SIZE_TYPE ) ucHeap;
  395. if( ( uxStartAddress & portBYTE_ALIGNMENT_MASK ) != 0 )
  396. {
  397. uxStartAddress += ( portBYTE_ALIGNMENT - 1 );
  398. uxStartAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK );
  399. xTotalHeapSize -= ( size_t ) ( uxStartAddress - ( portPOINTER_SIZE_TYPE ) ucHeap );
  400. }
  401. #if ( configENABLE_HEAP_PROTECTOR == 1 )
  402. {
  403. vApplicationGetRandomHeapCanary( &( xHeapCanary ) );
  404. }
  405. #endif
  406. /* xStart is used to hold a pointer to the first item in the list of free
  407. * blocks. The void cast is used to prevent compiler warnings. */
  408. xStart.pxNextFreeBlock = ( void * ) heapPROTECT_BLOCK_POINTER( uxStartAddress );
  409. xStart.xBlockSize = ( size_t ) 0;
  410. /* pxEnd is used to mark the end of the list of free blocks and is inserted
  411. * at the end of the heap space. */
  412. uxEndAddress = uxStartAddress + ( portPOINTER_SIZE_TYPE ) xTotalHeapSize;
  413. uxEndAddress -= ( portPOINTER_SIZE_TYPE ) xHeapStructSize;
  414. uxEndAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK );
  415. pxEnd = ( BlockLink_t * ) uxEndAddress;
  416. pxEnd->xBlockSize = 0;
  417. pxEnd->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL );
  418. /* To start with there is a single free block that is sized to take up the
  419. * entire heap space, minus the space taken by pxEnd. */
  420. pxFirstFreeBlock = ( BlockLink_t * ) uxStartAddress;
  421. pxFirstFreeBlock->xBlockSize = ( size_t ) ( uxEndAddress - ( portPOINTER_SIZE_TYPE ) pxFirstFreeBlock );
  422. pxFirstFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( pxEnd );
  423. /* Only one block exists - and it covers the entire usable heap space. */
  424. xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
  425. xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
  426. }
  427. /*-----------------------------------------------------------*/
  428. static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) /* PRIVILEGED_FUNCTION */
  429. {
  430. BlockLink_t * pxIterator;
  431. uint8_t * puc;
  432. /* Iterate through the list until a block is found that has a higher address
  433. * than the block being inserted. */
  434. for( pxIterator = &xStart; heapPROTECT_BLOCK_POINTER( pxIterator->pxNextFreeBlock ) < pxBlockToInsert; pxIterator = heapPROTECT_BLOCK_POINTER( pxIterator->pxNextFreeBlock ) )
  435. {
  436. /* Nothing to do here, just iterate to the right position. */
  437. }
  438. if( pxIterator != &xStart )
  439. {
  440. heapVALIDATE_BLOCK_POINTER( pxIterator );
  441. }
  442. /* Do the block being inserted, and the block it is being inserted after
  443. * make a contiguous block of memory? */
  444. puc = ( uint8_t * ) pxIterator;
  445. if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert )
  446. {
  447. pxIterator->xBlockSize += pxBlockToInsert->xBlockSize;
  448. pxBlockToInsert = pxIterator;
  449. }
  450. else
  451. {
  452. mtCOVERAGE_TEST_MARKER();
  453. }
  454. /* Do the block being inserted, and the block it is being inserted before
  455. * make a contiguous block of memory? */
  456. puc = ( uint8_t * ) pxBlockToInsert;
  457. if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) heapPROTECT_BLOCK_POINTER( pxIterator->pxNextFreeBlock ) )
  458. {
  459. if( heapPROTECT_BLOCK_POINTER( pxIterator->pxNextFreeBlock ) != pxEnd )
  460. {
  461. /* Form one big block from the two blocks. */
  462. pxBlockToInsert->xBlockSize += heapPROTECT_BLOCK_POINTER( pxIterator->pxNextFreeBlock )->xBlockSize;
  463. pxBlockToInsert->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( pxIterator->pxNextFreeBlock )->pxNextFreeBlock;
  464. }
  465. else
  466. {
  467. pxBlockToInsert->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( pxEnd );
  468. }
  469. }
  470. else
  471. {
  472. pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock;
  473. }
  474. /* If the block being inserted plugged a gap, so was merged with the block
  475. * before and the block after, then it's pxNextFreeBlock pointer will have
  476. * already been set, and should not be set here as that would make it point
  477. * to itself. */
  478. if( pxIterator != pxBlockToInsert )
  479. {
  480. pxIterator->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( pxBlockToInsert );
  481. }
  482. else
  483. {
  484. mtCOVERAGE_TEST_MARKER();
  485. }
  486. }
  487. /*-----------------------------------------------------------*/
  488. void vPortGetHeapStats( HeapStats_t * pxHeapStats )
  489. {
  490. BlockLink_t * pxBlock;
  491. size_t xBlocks = 0, xMaxSize = 0, xMinSize = SIZE_MAX;
  492. vTaskSuspendAll();
  493. {
  494. pxBlock = heapPROTECT_BLOCK_POINTER( xStart.pxNextFreeBlock );
  495. /* pxBlock will be NULL if the heap has not been initialised. The heap
  496. * is initialised automatically when the first allocation is made. */
  497. if( pxBlock != NULL )
  498. {
  499. while( pxBlock != pxEnd )
  500. {
  501. /* Increment the number of blocks and record the largest block seen
  502. * so far. */
  503. xBlocks++;
  504. if( pxBlock->xBlockSize > xMaxSize )
  505. {
  506. xMaxSize = pxBlock->xBlockSize;
  507. }
  508. if( pxBlock->xBlockSize < xMinSize )
  509. {
  510. xMinSize = pxBlock->xBlockSize;
  511. }
  512. /* Move to the next block in the chain until the last block is
  513. * reached. */
  514. pxBlock = heapPROTECT_BLOCK_POINTER( pxBlock->pxNextFreeBlock );
  515. }
  516. }
  517. }
  518. ( void ) xTaskResumeAll();
  519. pxHeapStats->xSizeOfLargestFreeBlockInBytes = xMaxSize;
  520. pxHeapStats->xSizeOfSmallestFreeBlockInBytes = xMinSize;
  521. pxHeapStats->xNumberOfFreeBlocks = xBlocks;
  522. taskENTER_CRITICAL();
  523. {
  524. pxHeapStats->xAvailableHeapSpaceInBytes = xFreeBytesRemaining;
  525. pxHeapStats->xNumberOfSuccessfulAllocations = xNumberOfSuccessfulAllocations;
  526. pxHeapStats->xNumberOfSuccessfulFrees = xNumberOfSuccessfulFrees;
  527. pxHeapStats->xMinimumEverFreeBytesRemaining = xMinimumEverFreeBytesRemaining;
  528. }
  529. taskEXIT_CRITICAL();
  530. }
  531. /*-----------------------------------------------------------*/
  532. /*
  533. * Reset the state in this file. This state is normally initialized at start up.
  534. * This function must be called by the application before restarting the
  535. * scheduler.
  536. */
  537. void vPortHeapResetState( void )
  538. {
  539. pxEnd = NULL;
  540. xFreeBytesRemaining = ( size_t ) 0U;
  541. xMinimumEverFreeBytesRemaining = ( size_t ) 0U;
  542. xNumberOfSuccessfulAllocations = ( size_t ) 0U;
  543. xNumberOfSuccessfulFrees = ( size_t ) 0U;
  544. }
  545. /*-----------------------------------------------------------*/