heap_5.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749
  1. /*
  2. * FreeRTOS Kernel <DEVELOPMENT BRANCH>
  3. * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
  4. *
  5. * SPDX-License-Identifier: MIT
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a copy of
  8. * this software and associated documentation files (the "Software"), to deal in
  9. * the Software without restriction, including without limitation the rights to
  10. * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
  11. * the Software, and to permit persons to whom the Software is furnished to do so,
  12. * subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in all
  15. * copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
  19. * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
  20. * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
  21. * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  22. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * https://www.FreeRTOS.org
  25. * https://github.com/FreeRTOS
  26. *
  27. */
  28. /*
  29. * A sample implementation of pvPortMalloc() that allows the heap to be defined
  30. * across multiple non-contiguous blocks and combines (coalescences) adjacent
  31. * memory blocks as they are freed.
  32. *
  33. * See heap_1.c, heap_2.c, heap_3.c and heap_4.c for alternative
  34. * implementations, and the memory management pages of https://www.FreeRTOS.org
  35. * for more information.
  36. *
  37. * Usage notes:
  38. *
  39. * vPortDefineHeapRegions() ***must*** be called before pvPortMalloc().
  40. * pvPortMalloc() will be called if any task objects (tasks, queues, event
  41. * groups, etc.) are created, therefore vPortDefineHeapRegions() ***must*** be
  42. * called before any other objects are defined.
  43. *
  44. * vPortDefineHeapRegions() takes a single parameter. The parameter is an array
  45. * of HeapRegion_t structures. HeapRegion_t is defined in portable.h as
  46. *
  47. * typedef struct HeapRegion
  48. * {
  49. * uint8_t *pucStartAddress; << Start address of a block of memory that will be part of the heap.
  50. * size_t xSizeInBytes; << Size of the block of memory.
  51. * } HeapRegion_t;
  52. *
  53. * The array is terminated using a NULL zero sized region definition, and the
  54. * memory regions defined in the array ***must*** appear in address order from
  55. * low address to high address. So the following is a valid example of how
  56. * to use the function.
  57. *
  58. * HeapRegion_t xHeapRegions[] =
  59. * {
  60. * { ( uint8_t * ) 0x80000000UL, 0x10000 }, << Defines a block of 0x10000 bytes starting at address 0x80000000
  61. * { ( uint8_t * ) 0x90000000UL, 0xa0000 }, << Defines a block of 0xa0000 bytes starting at address of 0x90000000
  62. * { NULL, 0 } << Terminates the array.
  63. * };
  64. *
  65. * vPortDefineHeapRegions( xHeapRegions ); << Pass the array into vPortDefineHeapRegions().
  66. *
  67. * Note 0x80000000 is the lower address so appears in the array first.
  68. *
  69. */
  70. #include <stdlib.h>
  71. #include <string.h>
  72. /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
  73. * all the API functions to use the MPU wrappers. That should only be done when
  74. * task.h is included from an application file. */
  75. #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
  76. #include "FreeRTOS.h"
  77. #include "task.h"
  78. #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
  79. #if ( configSUPPORT_DYNAMIC_ALLOCATION == 0 )
  80. #error This file must not be used if configSUPPORT_DYNAMIC_ALLOCATION is 0
  81. #endif
  82. #ifndef configHEAP_CLEAR_MEMORY_ON_FREE
  83. #define configHEAP_CLEAR_MEMORY_ON_FREE 0
  84. #endif
  85. /* Block sizes must not get too small. */
  86. #define heapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) )
  87. /* Assumes 8bit bytes! */
  88. #define heapBITS_PER_BYTE ( ( size_t ) 8 )
  89. /* Max value that fits in a size_t type. */
  90. #define heapSIZE_MAX ( ~( ( size_t ) 0 ) )
  91. /* Check if multiplying a and b will result in overflow. */
  92. #define heapMULTIPLY_WILL_OVERFLOW( a, b ) ( ( ( a ) > 0 ) && ( ( b ) > ( heapSIZE_MAX / ( a ) ) ) )
  93. /* Check if adding a and b will result in overflow. */
  94. #define heapADD_WILL_OVERFLOW( a, b ) ( ( a ) > ( heapSIZE_MAX - ( b ) ) )
  95. /* Check if the subtraction operation ( a - b ) will result in underflow. */
  96. #define heapSUBTRACT_WILL_UNDERFLOW( a, b ) ( ( a ) < ( b ) )
  97. /* MSB of the xBlockSize member of an BlockLink_t structure is used to track
  98. * the allocation status of a block. When MSB of the xBlockSize member of
  99. * an BlockLink_t structure is set then the block belongs to the application.
  100. * When the bit is free the block is still part of the free heap space. */
  101. #define heapBLOCK_ALLOCATED_BITMASK ( ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * heapBITS_PER_BYTE ) - 1 ) )
  102. #define heapBLOCK_SIZE_IS_VALID( xBlockSize ) ( ( ( xBlockSize ) & heapBLOCK_ALLOCATED_BITMASK ) == 0 )
  103. #define heapBLOCK_IS_ALLOCATED( pxBlock ) ( ( ( pxBlock->xBlockSize ) & heapBLOCK_ALLOCATED_BITMASK ) != 0 )
  104. #define heapALLOCATE_BLOCK( pxBlock ) ( ( pxBlock->xBlockSize ) |= heapBLOCK_ALLOCATED_BITMASK )
  105. #define heapFREE_BLOCK( pxBlock ) ( ( pxBlock->xBlockSize ) &= ~heapBLOCK_ALLOCATED_BITMASK )
  106. /* Setting configENABLE_HEAP_PROTECTOR to 1 enables heap block pointers
  107. * protection using an application supplied canary value to catch heap
  108. * corruption should a heap buffer overflow occur.
  109. */
  110. #if ( configENABLE_HEAP_PROTECTOR == 1 )
  111. /* Macro to load/store BlockLink_t pointers to memory. By XORing the
  112. * pointers with a random canary value, heap overflows will result
  113. * in randomly unpredictable pointer values which will be caught by
  114. * heapVALIDATE_BLOCK_POINTER assert. */
  115. #define heapPROTECT_BLOCK_POINTER( pxBlock ) ( ( BlockLink_t * ) ( ( ( portPOINTER_SIZE_TYPE ) ( pxBlock ) ) ^ xHeapCanary ) )
  116. /* Assert that a heap block pointer is within the heap bounds.
  117. * Setting configVALIDATE_HEAP_BLOCK_POINTER to 1 enables customized heap block pointers
  118. * protection on heap_5. */
  119. #ifndef configVALIDATE_HEAP_BLOCK_POINTER
  120. #define heapVALIDATE_BLOCK_POINTER( pxBlock ) \
  121. configASSERT( ( pucHeapHighAddress != NULL ) && \
  122. ( pucHeapLowAddress != NULL ) && \
  123. ( ( uint8_t * ) ( pxBlock ) >= pucHeapLowAddress ) && \
  124. ( ( uint8_t * ) ( pxBlock ) < pucHeapHighAddress ) )
  125. #else /* ifndef configVALIDATE_HEAP_BLOCK_POINTER */
  126. #define heapVALIDATE_BLOCK_POINTER( pxBlock ) \
  127. configVALIDATE_HEAP_BLOCK_POINTER( pxBlock )
  128. #endif /* configVALIDATE_HEAP_BLOCK_POINTER */
  129. #else /* if ( configENABLE_HEAP_PROTECTOR == 1 ) */
  130. #define heapPROTECT_BLOCK_POINTER( pxBlock ) ( pxBlock )
  131. #define heapVALIDATE_BLOCK_POINTER( pxBlock )
  132. #endif /* configENABLE_HEAP_PROTECTOR */
  133. /*-----------------------------------------------------------*/
  134. /* Define the linked list structure. This is used to link free blocks in order
  135. * of their memory address. */
  136. typedef struct A_BLOCK_LINK
  137. {
  138. struct A_BLOCK_LINK * pxNextFreeBlock; /**< The next free block in the list. */
  139. size_t xBlockSize; /**< The size of the free block. */
  140. } BlockLink_t;
  141. /*-----------------------------------------------------------*/
  142. /*
  143. * Inserts a block of memory that is being freed into the correct position in
  144. * the list of free memory blocks. The block being freed will be merged with
  145. * the block in front it and/or the block behind it if the memory blocks are
  146. * adjacent to each other.
  147. */
  148. static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) PRIVILEGED_FUNCTION;
  149. void vPortDefineHeapRegions( const HeapRegion_t * const pxHeapRegions ) PRIVILEGED_FUNCTION;
  150. #if ( configENABLE_HEAP_PROTECTOR == 1 )
  151. /**
  152. * @brief Application provided function to get a random value to be used as canary.
  153. *
  154. * @param pxHeapCanary [out] Output parameter to return the canary value.
  155. */
  156. extern void vApplicationGetRandomHeapCanary( portPOINTER_SIZE_TYPE * pxHeapCanary );
  157. #endif /* configENABLE_HEAP_PROTECTOR */
  158. /*-----------------------------------------------------------*/
  159. /* The size of the structure placed at the beginning of each allocated memory
  160. * block must by correctly byte aligned. */
  161. static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( portBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) portBYTE_ALIGNMENT_MASK );
  162. /* Create a couple of list links to mark the start and end of the list. */
  163. PRIVILEGED_DATA static BlockLink_t xStart;
  164. PRIVILEGED_DATA static BlockLink_t * pxEnd = NULL;
  165. /* Keeps track of the number of calls to allocate and free memory as well as the
  166. * number of free bytes remaining, but says nothing about fragmentation. */
  167. PRIVILEGED_DATA static size_t xFreeBytesRemaining = ( size_t ) 0U;
  168. PRIVILEGED_DATA static size_t xMinimumEverFreeBytesRemaining = ( size_t ) 0U;
  169. PRIVILEGED_DATA static size_t xNumberOfSuccessfulAllocations = ( size_t ) 0U;
  170. PRIVILEGED_DATA static size_t xNumberOfSuccessfulFrees = ( size_t ) 0U;
  171. #if ( configENABLE_HEAP_PROTECTOR == 1 )
  172. /* Canary value for protecting internal heap pointers. */
  173. PRIVILEGED_DATA static portPOINTER_SIZE_TYPE xHeapCanary;
  174. /* Highest and lowest heap addresses used for heap block bounds checking. */
  175. PRIVILEGED_DATA static uint8_t * pucHeapHighAddress = NULL;
  176. PRIVILEGED_DATA static uint8_t * pucHeapLowAddress = NULL;
  177. #endif /* configENABLE_HEAP_PROTECTOR */
  178. /*-----------------------------------------------------------*/
  179. void * pvPortMalloc( size_t xWantedSize )
  180. {
  181. BlockLink_t * pxBlock;
  182. BlockLink_t * pxPreviousBlock;
  183. BlockLink_t * pxNewBlockLink;
  184. void * pvReturn = NULL;
  185. size_t xAdditionalRequiredSize;
  186. size_t xAllocatedBlockSize = 0;
  187. /* The heap must be initialised before the first call to
  188. * pvPortMalloc(). */
  189. configASSERT( pxEnd );
  190. if( xWantedSize > 0 )
  191. {
  192. /* The wanted size must be increased so it can contain a BlockLink_t
  193. * structure in addition to the requested amount of bytes. */
  194. if( heapADD_WILL_OVERFLOW( xWantedSize, xHeapStructSize ) == 0 )
  195. {
  196. xWantedSize += xHeapStructSize;
  197. /* Ensure that blocks are always aligned to the required number
  198. * of bytes. */
  199. if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 )
  200. {
  201. /* Byte alignment required. */
  202. xAdditionalRequiredSize = portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK );
  203. if( heapADD_WILL_OVERFLOW( xWantedSize, xAdditionalRequiredSize ) == 0 )
  204. {
  205. xWantedSize += xAdditionalRequiredSize;
  206. }
  207. else
  208. {
  209. xWantedSize = 0;
  210. }
  211. }
  212. else
  213. {
  214. mtCOVERAGE_TEST_MARKER();
  215. }
  216. }
  217. else
  218. {
  219. xWantedSize = 0;
  220. }
  221. }
  222. else
  223. {
  224. mtCOVERAGE_TEST_MARKER();
  225. }
  226. vTaskSuspendAll();
  227. {
  228. /* Check the block size we are trying to allocate is not so large that the
  229. * top bit is set. The top bit of the block size member of the BlockLink_t
  230. * structure is used to determine who owns the block - the application or
  231. * the kernel, so it must be free. */
  232. if( heapBLOCK_SIZE_IS_VALID( xWantedSize ) != 0 )
  233. {
  234. if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) )
  235. {
  236. /* Traverse the list from the start (lowest address) block until
  237. * one of adequate size is found. */
  238. pxPreviousBlock = &xStart;
  239. pxBlock = heapPROTECT_BLOCK_POINTER( xStart.pxNextFreeBlock );
  240. heapVALIDATE_BLOCK_POINTER( pxBlock );
  241. while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) )
  242. {
  243. pxPreviousBlock = pxBlock;
  244. pxBlock = heapPROTECT_BLOCK_POINTER( pxBlock->pxNextFreeBlock );
  245. heapVALIDATE_BLOCK_POINTER( pxBlock );
  246. }
  247. /* If the end marker was reached then a block of adequate size
  248. * was not found. */
  249. if( pxBlock != pxEnd )
  250. {
  251. /* Return the memory space pointed to - jumping over the
  252. * BlockLink_t structure at its start. */
  253. pvReturn = ( void * ) ( ( ( uint8_t * ) heapPROTECT_BLOCK_POINTER( pxPreviousBlock->pxNextFreeBlock ) ) + xHeapStructSize );
  254. heapVALIDATE_BLOCK_POINTER( pvReturn );
  255. /* This block is being returned for use so must be taken out
  256. * of the list of free blocks. */
  257. pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;
  258. /* If the block is larger than required it can be split into
  259. * two. */
  260. configASSERT( heapSUBTRACT_WILL_UNDERFLOW( pxBlock->xBlockSize, xWantedSize ) == 0 );
  261. if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE )
  262. {
  263. /* This block is to be split into two. Create a new
  264. * block following the number of bytes requested. The void
  265. * cast is used to prevent byte alignment warnings from the
  266. * compiler. */
  267. pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize );
  268. configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
  269. /* Calculate the sizes of two blocks split from the
  270. * single block. */
  271. pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
  272. pxBlock->xBlockSize = xWantedSize;
  273. /* Insert the new block into the list of free blocks. */
  274. pxNewBlockLink->pxNextFreeBlock = pxPreviousBlock->pxNextFreeBlock;
  275. pxPreviousBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( pxNewBlockLink );
  276. }
  277. else
  278. {
  279. mtCOVERAGE_TEST_MARKER();
  280. }
  281. xFreeBytesRemaining -= pxBlock->xBlockSize;
  282. if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining )
  283. {
  284. xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
  285. }
  286. else
  287. {
  288. mtCOVERAGE_TEST_MARKER();
  289. }
  290. xAllocatedBlockSize = pxBlock->xBlockSize;
  291. /* The block is being returned - it is allocated and owned
  292. * by the application and has no "next" block. */
  293. heapALLOCATE_BLOCK( pxBlock );
  294. pxBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL );
  295. xNumberOfSuccessfulAllocations++;
  296. }
  297. else
  298. {
  299. mtCOVERAGE_TEST_MARKER();
  300. }
  301. }
  302. else
  303. {
  304. mtCOVERAGE_TEST_MARKER();
  305. }
  306. }
  307. else
  308. {
  309. mtCOVERAGE_TEST_MARKER();
  310. }
  311. traceMALLOC( pvReturn, xAllocatedBlockSize );
  312. /* Prevent compiler warnings when trace macros are not used. */
  313. ( void ) xAllocatedBlockSize;
  314. }
  315. ( void ) xTaskResumeAll();
  316. #if ( configUSE_MALLOC_FAILED_HOOK == 1 )
  317. {
  318. if( pvReturn == NULL )
  319. {
  320. vApplicationMallocFailedHook();
  321. }
  322. else
  323. {
  324. mtCOVERAGE_TEST_MARKER();
  325. }
  326. }
  327. #endif /* if ( configUSE_MALLOC_FAILED_HOOK == 1 ) */
  328. configASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) portBYTE_ALIGNMENT_MASK ) == 0 );
  329. return pvReturn;
  330. }
  331. /*-----------------------------------------------------------*/
  332. void vPortFree( void * pv )
  333. {
  334. uint8_t * puc = ( uint8_t * ) pv;
  335. BlockLink_t * pxLink;
  336. if( pv != NULL )
  337. {
  338. /* The memory being freed will have an BlockLink_t structure immediately
  339. * before it. */
  340. puc -= xHeapStructSize;
  341. /* This casting is to keep the compiler from issuing warnings. */
  342. pxLink = ( void * ) puc;
  343. heapVALIDATE_BLOCK_POINTER( pxLink );
  344. configASSERT( heapBLOCK_IS_ALLOCATED( pxLink ) != 0 );
  345. configASSERT( pxLink->pxNextFreeBlock == heapPROTECT_BLOCK_POINTER( NULL ) );
  346. if( heapBLOCK_IS_ALLOCATED( pxLink ) != 0 )
  347. {
  348. if( pxLink->pxNextFreeBlock == heapPROTECT_BLOCK_POINTER( NULL ) )
  349. {
  350. /* The block is being returned to the heap - it is no longer
  351. * allocated. */
  352. heapFREE_BLOCK( pxLink );
  353. #if ( configHEAP_CLEAR_MEMORY_ON_FREE == 1 )
  354. {
  355. /* Check for underflow as this can occur if xBlockSize is
  356. * overwritten in a heap block. */
  357. if( heapSUBTRACT_WILL_UNDERFLOW( pxLink->xBlockSize, xHeapStructSize ) == 0 )
  358. {
  359. ( void ) memset( puc + xHeapStructSize, 0, pxLink->xBlockSize - xHeapStructSize );
  360. }
  361. }
  362. #endif
  363. vTaskSuspendAll();
  364. {
  365. /* Add this block to the list of free blocks. */
  366. xFreeBytesRemaining += pxLink->xBlockSize;
  367. traceFREE( pv, pxLink->xBlockSize );
  368. prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) );
  369. xNumberOfSuccessfulFrees++;
  370. }
  371. ( void ) xTaskResumeAll();
  372. }
  373. else
  374. {
  375. mtCOVERAGE_TEST_MARKER();
  376. }
  377. }
  378. else
  379. {
  380. mtCOVERAGE_TEST_MARKER();
  381. }
  382. }
  383. }
  384. /*-----------------------------------------------------------*/
  385. size_t xPortGetFreeHeapSize( void )
  386. {
  387. return xFreeBytesRemaining;
  388. }
  389. /*-----------------------------------------------------------*/
  390. size_t xPortGetMinimumEverFreeHeapSize( void )
  391. {
  392. return xMinimumEverFreeBytesRemaining;
  393. }
  394. /*-----------------------------------------------------------*/
  395. void xPortResetHeapMinimumEverFreeHeapSize( void )
  396. {
  397. xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
  398. }
  399. /*-----------------------------------------------------------*/
  400. void * pvPortCalloc( size_t xNum,
  401. size_t xSize )
  402. {
  403. void * pv = NULL;
  404. if( heapMULTIPLY_WILL_OVERFLOW( xNum, xSize ) == 0 )
  405. {
  406. pv = pvPortMalloc( xNum * xSize );
  407. if( pv != NULL )
  408. {
  409. ( void ) memset( pv, 0, xNum * xSize );
  410. }
  411. }
  412. return pv;
  413. }
  414. /*-----------------------------------------------------------*/
  415. static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) /* PRIVILEGED_FUNCTION */
  416. {
  417. BlockLink_t * pxIterator;
  418. uint8_t * puc;
  419. /* Iterate through the list until a block is found that has a higher address
  420. * than the block being inserted. */
  421. for( pxIterator = &xStart; heapPROTECT_BLOCK_POINTER( pxIterator->pxNextFreeBlock ) < pxBlockToInsert; pxIterator = heapPROTECT_BLOCK_POINTER( pxIterator->pxNextFreeBlock ) )
  422. {
  423. /* Nothing to do here, just iterate to the right position. */
  424. }
  425. if( pxIterator != &xStart )
  426. {
  427. heapVALIDATE_BLOCK_POINTER( pxIterator );
  428. }
  429. /* Do the block being inserted, and the block it is being inserted after
  430. * make a contiguous block of memory? */
  431. puc = ( uint8_t * ) pxIterator;
  432. if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert )
  433. {
  434. pxIterator->xBlockSize += pxBlockToInsert->xBlockSize;
  435. pxBlockToInsert = pxIterator;
  436. }
  437. else
  438. {
  439. mtCOVERAGE_TEST_MARKER();
  440. }
  441. /* Do the block being inserted, and the block it is being inserted before
  442. * make a contiguous block of memory? */
  443. puc = ( uint8_t * ) pxBlockToInsert;
  444. if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) heapPROTECT_BLOCK_POINTER( pxIterator->pxNextFreeBlock ) )
  445. {
  446. if( heapPROTECT_BLOCK_POINTER( pxIterator->pxNextFreeBlock ) != pxEnd )
  447. {
  448. /* Form one big block from the two blocks. */
  449. pxBlockToInsert->xBlockSize += heapPROTECT_BLOCK_POINTER( pxIterator->pxNextFreeBlock )->xBlockSize;
  450. pxBlockToInsert->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( pxIterator->pxNextFreeBlock )->pxNextFreeBlock;
  451. }
  452. else
  453. {
  454. pxBlockToInsert->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( pxEnd );
  455. }
  456. }
  457. else
  458. {
  459. pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock;
  460. }
  461. /* If the block being inserted plugged a gap, so was merged with the block
  462. * before and the block after, then it's pxNextFreeBlock pointer will have
  463. * already been set, and should not be set here as that would make it point
  464. * to itself. */
  465. if( pxIterator != pxBlockToInsert )
  466. {
  467. pxIterator->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( pxBlockToInsert );
  468. }
  469. else
  470. {
  471. mtCOVERAGE_TEST_MARKER();
  472. }
  473. }
  474. /*-----------------------------------------------------------*/
  475. void vPortDefineHeapRegions( const HeapRegion_t * const pxHeapRegions ) /* PRIVILEGED_FUNCTION */
  476. {
  477. BlockLink_t * pxFirstFreeBlockInRegion = NULL;
  478. BlockLink_t * pxPreviousFreeBlock;
  479. portPOINTER_SIZE_TYPE xAlignedHeap;
  480. size_t xTotalRegionSize, xTotalHeapSize = 0;
  481. BaseType_t xDefinedRegions = 0;
  482. portPOINTER_SIZE_TYPE xAddress;
  483. const HeapRegion_t * pxHeapRegion;
  484. /* Can only call once! */
  485. configASSERT( pxEnd == NULL );
  486. #if ( configENABLE_HEAP_PROTECTOR == 1 )
  487. {
  488. vApplicationGetRandomHeapCanary( &( xHeapCanary ) );
  489. }
  490. #endif
  491. pxHeapRegion = &( pxHeapRegions[ xDefinedRegions ] );
  492. while( pxHeapRegion->xSizeInBytes > 0 )
  493. {
  494. xTotalRegionSize = pxHeapRegion->xSizeInBytes;
  495. /* Ensure the heap region starts on a correctly aligned boundary. */
  496. xAddress = ( portPOINTER_SIZE_TYPE ) pxHeapRegion->pucStartAddress;
  497. if( ( xAddress & portBYTE_ALIGNMENT_MASK ) != 0 )
  498. {
  499. xAddress += ( portBYTE_ALIGNMENT - 1 );
  500. xAddress &= ~( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK;
  501. /* Adjust the size for the bytes lost to alignment. */
  502. xTotalRegionSize -= ( size_t ) ( xAddress - ( portPOINTER_SIZE_TYPE ) pxHeapRegion->pucStartAddress );
  503. }
  504. xAlignedHeap = xAddress;
  505. /* Set xStart if it has not already been set. */
  506. if( xDefinedRegions == 0 )
  507. {
  508. /* xStart is used to hold a pointer to the first item in the list of
  509. * free blocks. The void cast is used to prevent compiler warnings. */
  510. xStart.pxNextFreeBlock = ( BlockLink_t * ) heapPROTECT_BLOCK_POINTER( xAlignedHeap );
  511. xStart.xBlockSize = ( size_t ) 0;
  512. }
  513. else
  514. {
  515. /* Should only get here if one region has already been added to the
  516. * heap. */
  517. configASSERT( pxEnd != heapPROTECT_BLOCK_POINTER( NULL ) );
  518. /* Check blocks are passed in with increasing start addresses. */
  519. configASSERT( ( size_t ) xAddress > ( size_t ) pxEnd );
  520. }
  521. #if ( configENABLE_HEAP_PROTECTOR == 1 )
  522. {
  523. if( ( pucHeapLowAddress == NULL ) ||
  524. ( ( uint8_t * ) xAlignedHeap < pucHeapLowAddress ) )
  525. {
  526. pucHeapLowAddress = ( uint8_t * ) xAlignedHeap;
  527. }
  528. }
  529. #endif /* configENABLE_HEAP_PROTECTOR */
  530. /* Remember the location of the end marker in the previous region, if
  531. * any. */
  532. pxPreviousFreeBlock = pxEnd;
  533. /* pxEnd is used to mark the end of the list of free blocks and is
  534. * inserted at the end of the region space. */
  535. xAddress = xAlignedHeap + ( portPOINTER_SIZE_TYPE ) xTotalRegionSize;
  536. xAddress -= ( portPOINTER_SIZE_TYPE ) xHeapStructSize;
  537. xAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK );
  538. pxEnd = ( BlockLink_t * ) xAddress;
  539. pxEnd->xBlockSize = 0;
  540. pxEnd->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL );
  541. /* To start with there is a single free block in this region that is
  542. * sized to take up the entire heap region minus the space taken by the
  543. * free block structure. */
  544. pxFirstFreeBlockInRegion = ( BlockLink_t * ) xAlignedHeap;
  545. pxFirstFreeBlockInRegion->xBlockSize = ( size_t ) ( xAddress - ( portPOINTER_SIZE_TYPE ) pxFirstFreeBlockInRegion );
  546. pxFirstFreeBlockInRegion->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( pxEnd );
  547. /* If this is not the first region that makes up the entire heap space
  548. * then link the previous region to this region. */
  549. if( pxPreviousFreeBlock != NULL )
  550. {
  551. pxPreviousFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( pxFirstFreeBlockInRegion );
  552. }
  553. xTotalHeapSize += pxFirstFreeBlockInRegion->xBlockSize;
  554. #if ( configENABLE_HEAP_PROTECTOR == 1 )
  555. {
  556. if( ( pucHeapHighAddress == NULL ) ||
  557. ( ( ( ( uint8_t * ) pxFirstFreeBlockInRegion ) + pxFirstFreeBlockInRegion->xBlockSize ) > pucHeapHighAddress ) )
  558. {
  559. pucHeapHighAddress = ( ( uint8_t * ) pxFirstFreeBlockInRegion ) + pxFirstFreeBlockInRegion->xBlockSize;
  560. }
  561. }
  562. #endif
  563. /* Move onto the next HeapRegion_t structure. */
  564. xDefinedRegions++;
  565. pxHeapRegion = &( pxHeapRegions[ xDefinedRegions ] );
  566. }
  567. xMinimumEverFreeBytesRemaining = xTotalHeapSize;
  568. xFreeBytesRemaining = xTotalHeapSize;
  569. /* Check something was actually defined before it is accessed. */
  570. configASSERT( xTotalHeapSize );
  571. }
  572. /*-----------------------------------------------------------*/
  573. void vPortGetHeapStats( HeapStats_t * pxHeapStats )
  574. {
  575. BlockLink_t * pxBlock;
  576. size_t xBlocks = 0, xMaxSize = 0, xMinSize = SIZE_MAX;
  577. vTaskSuspendAll();
  578. {
  579. pxBlock = heapPROTECT_BLOCK_POINTER( xStart.pxNextFreeBlock );
  580. /* pxBlock will be NULL if the heap has not been initialised. The heap
  581. * is initialised automatically when the first allocation is made. */
  582. if( pxBlock != NULL )
  583. {
  584. while( pxBlock != pxEnd )
  585. {
  586. /* Increment the number of blocks and record the largest block seen
  587. * so far. */
  588. xBlocks++;
  589. if( pxBlock->xBlockSize > xMaxSize )
  590. {
  591. xMaxSize = pxBlock->xBlockSize;
  592. }
  593. /* Heap five will have a zero sized block at the end of each
  594. * each region - the block is only used to link to the next
  595. * heap region so it not a real block. */
  596. if( pxBlock->xBlockSize != 0 )
  597. {
  598. if( pxBlock->xBlockSize < xMinSize )
  599. {
  600. xMinSize = pxBlock->xBlockSize;
  601. }
  602. }
  603. /* Move to the next block in the chain until the last block is
  604. * reached. */
  605. pxBlock = heapPROTECT_BLOCK_POINTER( pxBlock->pxNextFreeBlock );
  606. }
  607. }
  608. }
  609. ( void ) xTaskResumeAll();
  610. pxHeapStats->xSizeOfLargestFreeBlockInBytes = xMaxSize;
  611. pxHeapStats->xSizeOfSmallestFreeBlockInBytes = xMinSize;
  612. pxHeapStats->xNumberOfFreeBlocks = xBlocks;
  613. taskENTER_CRITICAL();
  614. {
  615. pxHeapStats->xAvailableHeapSpaceInBytes = xFreeBytesRemaining;
  616. pxHeapStats->xNumberOfSuccessfulAllocations = xNumberOfSuccessfulAllocations;
  617. pxHeapStats->xNumberOfSuccessfulFrees = xNumberOfSuccessfulFrees;
  618. pxHeapStats->xMinimumEverFreeBytesRemaining = xMinimumEverFreeBytesRemaining;
  619. }
  620. taskEXIT_CRITICAL();
  621. }
  622. /*-----------------------------------------------------------*/
  623. /*
  624. * Reset the state in this file. This state is normally initialized at start up.
  625. * This function must be called by the application before restarting the
  626. * scheduler.
  627. */
  628. void vPortHeapResetState( void )
  629. {
  630. pxEnd = NULL;
  631. xFreeBytesRemaining = ( size_t ) 0U;
  632. xMinimumEverFreeBytesRemaining = ( size_t ) 0U;
  633. xNumberOfSuccessfulAllocations = ( size_t ) 0U;
  634. xNumberOfSuccessfulFrees = ( size_t ) 0U;
  635. #if ( configENABLE_HEAP_PROTECTOR == 1 )
  636. pucHeapHighAddress = NULL;
  637. pucHeapLowAddress = NULL;
  638. #endif /* #if ( configENABLE_HEAP_PROTECTOR == 1 ) */
  639. }
  640. /*-----------------------------------------------------------*/