queue.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816
  1. /*
  2. * FreeRTOS Kernel V10.4.6
  3. * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
  4. *
  5. * SPDX-License-Identifier: MIT
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a copy of
  8. * this software and associated documentation files (the "Software"), to deal in
  9. * the Software without restriction, including without limitation the rights to
  10. * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
  11. * the Software, and to permit persons to whom the Software is furnished to do so,
  12. * subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in all
  15. * copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
  19. * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
  20. * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
  21. * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  22. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * https://www.FreeRTOS.org
  25. * https://github.com/FreeRTOS
  26. *
  27. */
  28. #include <stdlib.h>
  29. #include <string.h>
  30. #include "FreeRTOS.h"
  31. #include "queue.h"
  32. /* Semaphores do not actually store or copy data, so have an item size of
  33. * zero. */
  34. #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
  35. #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
  36. typedef struct QueueDefinition
  37. {
  38. struct rt_ipc_object *rt_ipc;
  39. } xQUEUE;
  40. typedef xQUEUE Queue_t;
  41. static volatile rt_uint8_t mutex_index = 0;
  42. static volatile rt_uint8_t sem_index = 0;
  43. static volatile rt_uint8_t queue_index = 0;
  44. /*-----------------------------------------------------------*/
  45. BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
  46. BaseType_t xNewQueue )
  47. {
  48. Queue_t * const pxQueue = xQueue;
  49. struct rt_ipc_object *pipc;
  50. rt_uint8_t type;
  51. configASSERT( pxQueue );
  52. pipc = pxQueue->rt_ipc;
  53. RT_ASSERT( pipc != RT_NULL );
  54. type = rt_object_get_type( &pipc->parent );
  55. if ( type == RT_Object_Class_Semaphore )
  56. {
  57. rt_sem_control( ( rt_sem_t ) pipc, RT_IPC_CMD_RESET, ( void * ) 0);
  58. }
  59. else if ( type == RT_Object_Class_MessageQueue )
  60. {
  61. rt_mq_control( ( rt_mq_t ) pipc, RT_IPC_CMD_RESET, RT_NULL );
  62. }
  63. return pdPASS;
  64. }
  65. /*-----------------------------------------------------------*/
  66. #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
  67. QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength,
  68. const UBaseType_t uxItemSize,
  69. uint8_t * pucQueueStorage,
  70. StaticQueue_t * pxStaticQueue,
  71. const uint8_t ucQueueType )
  72. {
  73. Queue_t * pxNewQueue = NULL;
  74. char name[RT_NAME_MAX] = {0};
  75. /* The StaticQueue_t structure and the queue storage area must be
  76. * supplied. */
  77. configASSERT( pxStaticQueue );
  78. if( ( uxQueueLength > ( UBaseType_t ) 0 ) &&
  79. ( pxStaticQueue != NULL ) &&
  80. /* A queue storage area should be provided if the item size is not 0, and
  81. * should not be provided if the item size is 0. */
  82. ( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0 ) ) ) &&
  83. ( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0 ) ) ) )
  84. {
  85. if ( ucQueueType == queueQUEUE_TYPE_RECURSIVE_MUTEX || ucQueueType == queueQUEUE_TYPE_MUTEX )
  86. {
  87. rt_snprintf( name, RT_NAME_MAX, "mutex%02d", mutex_index++ );
  88. rt_mutex_init( ( rt_mutex_t ) &( ( StaticSemaphore_t * ) pxStaticQueue )->ipc_obj.mutex, name, RT_IPC_FLAG_PRIO );
  89. }
  90. else if ( ucQueueType == queueQUEUE_TYPE_BINARY_SEMAPHORE || ucQueueType == queueQUEUE_TYPE_COUNTING_SEMAPHORE )
  91. {
  92. rt_snprintf( name, RT_NAME_MAX, "sem%02d", sem_index++ );
  93. rt_sem_init( ( rt_sem_t ) &( ( StaticSemaphore_t * ) pxStaticQueue )->ipc_obj.semaphore, name, 0, RT_IPC_FLAG_PRIO );
  94. ( ( StaticSemaphore_t * ) pxStaticQueue )->ipc_obj.semaphore.max_value = uxQueueLength;
  95. }
  96. else if ( ucQueueType == queueQUEUE_TYPE_BASE )
  97. {
  98. rt_snprintf( name, RT_NAME_MAX, "queue%02d", queue_index++ );
  99. rt_mq_init( &( pxStaticQueue->ipc_obj ), name, pucQueueStorage, uxItemSize, QUEUE_BUFFER_SIZE( uxQueueLength, uxItemSize ), RT_IPC_FLAG_PRIO );
  100. }
  101. else
  102. {
  103. return pxNewQueue;
  104. }
  105. pxStaticQueue->rt_ipc = ( struct rt_ipc_object * ) &pxStaticQueue->ipc_obj;
  106. pxNewQueue = ( QueueHandle_t ) pxStaticQueue;
  107. }
  108. return pxNewQueue;
  109. }
  110. #endif /* configSUPPORT_STATIC_ALLOCATION */
  111. /*-----------------------------------------------------------*/
  112. #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
  113. QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength,
  114. const UBaseType_t uxItemSize,
  115. const uint8_t ucQueueType )
  116. {
  117. Queue_t * pxNewQueue = NULL;
  118. char name[RT_NAME_MAX] = {0};
  119. struct rt_ipc_object * pipc = RT_NULL;
  120. if( ( uxQueueLength > ( UBaseType_t ) 0 ) &&
  121. /* Check for multiplication overflow. */
  122. ( ( SIZE_MAX / uxQueueLength ) >= uxItemSize ) &&
  123. /* Check for addition overflow. */
  124. ( ( SIZE_MAX - sizeof( Queue_t ) ) >= ( uxQueueLength * uxItemSize ) ) )
  125. {
  126. pxNewQueue = ( Queue_t * ) RT_KERNEL_MALLOC( sizeof( Queue_t ) );
  127. if ( pxNewQueue == NULL )
  128. {
  129. return ( QueueHandle_t ) pxNewQueue;
  130. }
  131. if ( ucQueueType == queueQUEUE_TYPE_RECURSIVE_MUTEX || ucQueueType == queueQUEUE_TYPE_MUTEX )
  132. {
  133. rt_snprintf( name, RT_NAME_MAX, "mutex%02d", mutex_index++ );
  134. pipc = ( struct rt_ipc_object * ) rt_mutex_create( name, RT_IPC_FLAG_PRIO );
  135. }
  136. else if ( ucQueueType == queueQUEUE_TYPE_BINARY_SEMAPHORE || ucQueueType == queueQUEUE_TYPE_COUNTING_SEMAPHORE )
  137. {
  138. rt_snprintf( name, RT_NAME_MAX, "sem%02d", sem_index++ );
  139. pipc = ( struct rt_ipc_object * ) RT_KERNEL_MALLOC( sizeof( struct rt_semaphore_wrapper ) );
  140. if ( pipc != RT_NULL )
  141. {
  142. rt_sem_init( ( rt_sem_t ) pipc, name, 0, RT_IPC_FLAG_PRIO );
  143. ( ( struct rt_semaphore_wrapper * ) pipc )->max_value = uxQueueLength;
  144. /* Mark as dynamic so we can distinguish in vQueueDelete */
  145. pipc->parent.type &= ~RT_Object_Class_Static;
  146. }
  147. }
  148. else if ( ucQueueType == queueQUEUE_TYPE_BASE )
  149. {
  150. rt_snprintf( name, RT_NAME_MAX, "queue%02d", queue_index++ );
  151. pipc = ( struct rt_ipc_object * ) rt_mq_create( name, uxItemSize, uxQueueLength, RT_IPC_FLAG_PRIO);
  152. }
  153. if ( pipc == RT_NULL )
  154. {
  155. RT_KERNEL_FREE( pxNewQueue );
  156. return NULL;
  157. }
  158. pxNewQueue->rt_ipc = pipc;
  159. }
  160. return ( QueueHandle_t ) pxNewQueue;
  161. }
  162. #endif /* configSUPPORT_STATIC_ALLOCATION */
  163. /*-----------------------------------------------------------*/
  164. #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
  165. QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
  166. {
  167. QueueHandle_t xNewQueue;
  168. const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
  169. xNewQueue = xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType );
  170. return xNewQueue;
  171. }
  172. #endif /* configUSE_MUTEXES */
  173. /*-----------------------------------------------------------*/
  174. #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
  175. QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType,
  176. StaticQueue_t * pxStaticQueue )
  177. {
  178. QueueHandle_t xNewQueue;
  179. const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
  180. xNewQueue = xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType );
  181. return xNewQueue;
  182. }
  183. #endif /* configUSE_MUTEXES */
  184. /*-----------------------------------------------------------*/
  185. #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
  186. TaskHandle_t xQueueGetMutexHolder( QueueHandle_t xSemaphore )
  187. {
  188. TaskHandle_t pxReturn;
  189. struct rt_ipc_object *pipc;
  190. rt_uint8_t type;
  191. rt_base_t level;
  192. configASSERT( xSemaphore );
  193. pipc = xSemaphore->rt_ipc;
  194. RT_ASSERT( pipc != RT_NULL );
  195. type = rt_object_get_type( &pipc->parent );
  196. if ( type == RT_Object_Class_Mutex )
  197. {
  198. level = rt_hw_interrupt_disable();
  199. pxReturn = ( TaskHandle_t ) ( ( rt_mutex_t ) pipc )->owner;
  200. rt_hw_interrupt_enable( level );
  201. }
  202. else
  203. {
  204. pxReturn = NULL;
  205. }
  206. return pxReturn;
  207. }
  208. #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
  209. /*-----------------------------------------------------------*/
  210. #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
  211. TaskHandle_t xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore )
  212. {
  213. return xQueueGetMutexHolder( xSemaphore );
  214. }
  215. #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
  216. /*-----------------------------------------------------------*/
  217. #if ( configUSE_RECURSIVE_MUTEXES == 1 )
  218. BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
  219. {
  220. Queue_t * const pxMutex = ( Queue_t * ) xMutex;
  221. configASSERT( pxMutex );
  222. return xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
  223. }
  224. #endif /* configUSE_RECURSIVE_MUTEXES */
  225. /*-----------------------------------------------------------*/
  226. #if ( configUSE_RECURSIVE_MUTEXES == 1 )
  227. BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex,
  228. TickType_t xTicksToWait )
  229. {
  230. Queue_t * const pxMutex = ( Queue_t * ) xMutex;
  231. configASSERT( pxMutex );
  232. return xQueueSemaphoreTake( pxMutex, xTicksToWait );
  233. }
  234. #endif /* configUSE_RECURSIVE_MUTEXES */
  235. /*-----------------------------------------------------------*/
  236. #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
  237. QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount,
  238. const UBaseType_t uxInitialCount,
  239. StaticQueue_t * pxStaticQueue )
  240. {
  241. QueueHandle_t xHandle = NULL;
  242. if( ( uxMaxCount != 0 ) &&
  243. ( uxInitialCount <= uxMaxCount ) )
  244. {
  245. xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
  246. if( xHandle != NULL )
  247. {
  248. ( ( rt_sem_t ) ( ( Queue_t * ) xHandle )->rt_ipc )->value = uxInitialCount;
  249. }
  250. }
  251. else
  252. {
  253. configASSERT( xHandle );
  254. }
  255. return xHandle;
  256. }
  257. #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
  258. /*-----------------------------------------------------------*/
  259. #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
  260. QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount,
  261. const UBaseType_t uxInitialCount )
  262. {
  263. QueueHandle_t xHandle = NULL;
  264. if( ( uxMaxCount != 0 ) &&
  265. ( uxInitialCount <= uxMaxCount ) )
  266. {
  267. xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
  268. if( xHandle != NULL )
  269. {
  270. ( ( rt_sem_t ) ( ( Queue_t * ) xHandle )->rt_ipc )->value = uxInitialCount;
  271. }
  272. }
  273. else
  274. {
  275. configASSERT( xHandle );
  276. }
  277. return xHandle;
  278. }
  279. #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
  280. /*-----------------------------------------------------------*/
  281. BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
  282. const void * const pvItemToQueue,
  283. TickType_t xTicksToWait,
  284. const BaseType_t xCopyPosition )
  285. {
  286. Queue_t * const pxQueue = xQueue;
  287. struct rt_ipc_object *pipc;
  288. rt_uint8_t type;
  289. rt_base_t level;
  290. rt_err_t err = -RT_ERROR;
  291. configASSERT( pxQueue );
  292. #if ( INCLUDE_xTaskGetSchedulerState == 1 )
  293. {
  294. configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
  295. }
  296. #endif
  297. pipc = pxQueue->rt_ipc;
  298. RT_ASSERT( pipc != RT_NULL );
  299. type = rt_object_get_type( &pipc->parent );
  300. if ( type == RT_Object_Class_Mutex )
  301. {
  302. err = rt_mutex_release( ( rt_mutex_t ) pipc );
  303. }
  304. else if ( type == RT_Object_Class_Semaphore )
  305. {
  306. level = rt_hw_interrupt_disable();
  307. if ( ( ( rt_sem_t ) pipc )->value < ( ( struct rt_semaphore_wrapper * ) pipc )->max_value )
  308. {
  309. err = rt_sem_release( ( rt_sem_t ) pipc );
  310. }
  311. rt_hw_interrupt_enable( level );
  312. }
  313. else if ( type == RT_Object_Class_MessageQueue )
  314. {
  315. if ( xCopyPosition == queueSEND_TO_BACK )
  316. {
  317. err = rt_mq_send_wait( ( rt_mq_t ) pipc, pvItemToQueue, ( ( rt_mq_t ) pipc )->msg_size, ( rt_int32_t ) xTicksToWait );
  318. }
  319. else if ( xCopyPosition == queueSEND_TO_FRONT )
  320. {
  321. // TODO: need to implement the timeout for LIFO
  322. err = rt_mq_urgent( ( rt_mq_t ) pipc, pvItemToQueue, ( ( rt_mq_t ) pipc )->msg_size );
  323. }
  324. }
  325. return rt_err_to_freertos( err );
  326. }
  327. /*-----------------------------------------------------------*/
  328. BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
  329. const void * const pvItemToQueue,
  330. BaseType_t * const pxHigherPriorityTaskWoken,
  331. const BaseType_t xCopyPosition )
  332. {
  333. Queue_t * const pxQueue = xQueue;
  334. struct rt_ipc_object *pipc;
  335. rt_uint8_t type;
  336. rt_err_t err = -RT_ERROR;
  337. configASSERT( pxQueue );
  338. pipc = pxQueue->rt_ipc;
  339. RT_ASSERT( pipc != RT_NULL );
  340. type = rt_object_get_type( &pipc->parent );
  341. if ( type == RT_Object_Class_MessageQueue )
  342. {
  343. if ( xCopyPosition == queueSEND_TO_BACK )
  344. {
  345. err = rt_mq_send( ( rt_mq_t ) pipc, pvItemToQueue, ( ( rt_mq_t ) pipc )->msg_size);
  346. }
  347. else if ( xCopyPosition == queueSEND_TO_FRONT )
  348. {
  349. err = rt_mq_urgent( ( rt_mq_t ) pipc, pvItemToQueue, ( ( rt_mq_t ) pipc )->msg_size );
  350. }
  351. }
  352. return rt_err_to_freertos( err );
  353. }
  354. /*-----------------------------------------------------------*/
  355. BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
  356. BaseType_t * const pxHigherPriorityTaskWoken )
  357. {
  358. Queue_t * const pxQueue = xQueue;
  359. struct rt_ipc_object *pipc;
  360. rt_uint8_t type;
  361. rt_base_t level;
  362. rt_err_t err = -RT_ERROR;
  363. configASSERT( pxQueue );
  364. pipc = pxQueue->rt_ipc;
  365. RT_ASSERT( pipc != RT_NULL );
  366. type = rt_object_get_type( &pipc->parent );
  367. RT_ASSERT( type != RT_Object_Class_Mutex );
  368. if ( type == RT_Object_Class_Semaphore )
  369. {
  370. level = rt_hw_interrupt_disable();
  371. if ( ( ( rt_sem_t ) pipc )->value < ( ( struct rt_semaphore_wrapper * ) pipc )->max_value )
  372. {
  373. err = rt_sem_release( ( rt_sem_t ) pipc );
  374. }
  375. rt_hw_interrupt_enable( level );
  376. }
  377. if ( pxHigherPriorityTaskWoken != NULL )
  378. {
  379. *pxHigherPriorityTaskWoken = pdFALSE;
  380. }
  381. return rt_err_to_freertos( err );
  382. }
  383. /*-----------------------------------------------------------*/
  384. BaseType_t xQueueReceive( QueueHandle_t xQueue,
  385. void * const pvBuffer,
  386. TickType_t xTicksToWait )
  387. {
  388. Queue_t * const pxQueue = xQueue;
  389. struct rt_ipc_object *pipc;
  390. rt_uint8_t type;
  391. rt_err_t err = -RT_ERROR;
  392. /* Check the queue pointer is not NULL. */
  393. configASSERT( ( pxQueue ) );
  394. /* Cannot block if the scheduler is suspended. */
  395. #if ( INCLUDE_xTaskGetSchedulerState == 1 )
  396. {
  397. configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
  398. }
  399. #endif
  400. pipc = pxQueue->rt_ipc;
  401. RT_ASSERT( pipc != RT_NULL );
  402. type = rt_object_get_type( &pipc->parent );
  403. if ( type == RT_Object_Class_MessageQueue )
  404. {
  405. err = ( rt_err_t ) rt_mq_recv( ( rt_mq_t ) pipc, pvBuffer, ( ( rt_mq_t ) pipc )->msg_size, ( rt_int32_t ) xTicksToWait );
  406. #if RT_VER_NUM >= 0x50001
  407. if (( rt_ssize_t ) err >= 0)
  408. {
  409. err = RT_EOK;
  410. }
  411. #endif
  412. }
  413. return rt_err_to_freertos( err );
  414. }
  415. /*-----------------------------------------------------------*/
  416. BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
  417. TickType_t xTicksToWait )
  418. {
  419. Queue_t * const pxQueue = xQueue;
  420. struct rt_ipc_object *pipc;
  421. rt_uint8_t type;
  422. rt_err_t err = -RT_ERROR;
  423. /* Check the queue pointer is not NULL. */
  424. configASSERT( ( pxQueue ) );
  425. /* Cannot block if the scheduler is suspended. */
  426. #if ( INCLUDE_xTaskGetSchedulerState == 1 )
  427. {
  428. configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
  429. }
  430. #endif
  431. pipc = pxQueue->rt_ipc;
  432. RT_ASSERT( pipc != RT_NULL );
  433. type = rt_object_get_type( &pipc->parent );
  434. if ( type == RT_Object_Class_Mutex )
  435. {
  436. err = rt_mutex_take( ( rt_mutex_t ) pipc, ( rt_int32_t ) xTicksToWait );
  437. }
  438. else if ( type == RT_Object_Class_Semaphore )
  439. {
  440. err = rt_sem_take( ( rt_sem_t ) pipc, ( rt_int32_t ) xTicksToWait );
  441. }
  442. return rt_err_to_freertos( err );
  443. }
  444. /*-----------------------------------------------------------*/
  445. BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
  446. void * const pvBuffer,
  447. BaseType_t * const pxHigherPriorityTaskWoken )
  448. {
  449. Queue_t * const pxQueue = xQueue;
  450. struct rt_ipc_object *pipc;
  451. rt_uint8_t type;
  452. rt_err_t err = -RT_ERROR;
  453. configASSERT( pxQueue );
  454. pipc = pxQueue->rt_ipc;
  455. RT_ASSERT( pipc != RT_NULL );
  456. type = rt_object_get_type( &pipc->parent );
  457. RT_ASSERT( type != RT_Object_Class_Mutex );
  458. if ( type == RT_Object_Class_Semaphore )
  459. {
  460. err = rt_sem_take( ( rt_sem_t ) pipc, RT_WAITING_NO );
  461. }
  462. else if ( type == RT_Object_Class_MessageQueue )
  463. {
  464. err = ( rt_err_t ) rt_mq_recv( ( rt_mq_t ) pipc, pvBuffer, ( ( rt_mq_t ) pipc )->msg_size, RT_WAITING_NO );
  465. #if RT_VER_NUM >= 0x50001
  466. if (( rt_ssize_t ) err >= 0)
  467. {
  468. err = RT_EOK;
  469. }
  470. #endif
  471. }
  472. if ( pxHigherPriorityTaskWoken != NULL )
  473. {
  474. *pxHigherPriorityTaskWoken = pdFALSE;
  475. }
  476. return rt_err_to_freertos( err );
  477. }
  478. /*-----------------------------------------------------------*/
  479. UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
  480. {
  481. UBaseType_t uxReturn = 0;
  482. struct rt_ipc_object *pipc;
  483. rt_uint8_t type;
  484. rt_base_t level;
  485. configASSERT( xQueue );
  486. pipc = xQueue->rt_ipc;
  487. RT_ASSERT( pipc != RT_NULL );
  488. type = rt_object_get_type( &pipc->parent );
  489. level = rt_hw_interrupt_disable();
  490. if ( type == RT_Object_Class_Mutex )
  491. {
  492. if ( ( ( rt_mutex_t ) pipc )->owner == RT_NULL )
  493. {
  494. uxReturn = 1;
  495. }
  496. else
  497. {
  498. uxReturn = 0;
  499. }
  500. }
  501. else if ( type == RT_Object_Class_Semaphore )
  502. {
  503. uxReturn = ( ( rt_sem_t ) pipc )->value;
  504. }
  505. else if ( type == RT_Object_Class_MessageQueue )
  506. {
  507. uxReturn = ( ( rt_mq_t ) pipc )->entry;
  508. }
  509. rt_hw_interrupt_enable( level );
  510. return uxReturn;
  511. }
  512. /*-----------------------------------------------------------*/
  513. UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
  514. {
  515. UBaseType_t uxReturn = 0;
  516. struct rt_ipc_object *pipc;
  517. rt_uint8_t type;
  518. rt_base_t level;
  519. configASSERT( xQueue );
  520. pipc = xQueue->rt_ipc;
  521. RT_ASSERT( pipc != RT_NULL );
  522. type = rt_object_get_type( &pipc->parent );
  523. level = rt_hw_interrupt_disable();
  524. if ( type == RT_Object_Class_Mutex )
  525. {
  526. if ( ( ( rt_mutex_t ) pipc )->owner == RT_NULL )
  527. {
  528. uxReturn = 0;
  529. }
  530. else
  531. {
  532. uxReturn = 1;
  533. }
  534. }
  535. else if ( type == RT_Object_Class_Semaphore )
  536. {
  537. uxReturn = ( ( struct rt_semaphore_wrapper * ) pipc )->max_value - ( ( rt_sem_t ) pipc )->value;
  538. }
  539. else if ( type == RT_Object_Class_MessageQueue )
  540. {
  541. uxReturn = ( ( rt_mq_t ) pipc )->max_msgs - ( ( rt_mq_t ) pipc )->entry;
  542. }
  543. rt_hw_interrupt_enable( level );
  544. return uxReturn;
  545. }
  546. /*-----------------------------------------------------------*/
  547. UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
  548. {
  549. return uxQueueMessagesWaiting( xQueue );
  550. }
  551. /*-----------------------------------------------------------*/
  552. void vQueueDelete( QueueHandle_t xQueue )
  553. {
  554. Queue_t * const pxQueue = xQueue;
  555. struct rt_ipc_object *pipc;
  556. rt_uint8_t type;
  557. configASSERT( pxQueue );
  558. pipc = pxQueue->rt_ipc;
  559. RT_ASSERT( pipc != RT_NULL );
  560. type = rt_object_get_type( &pipc->parent );
  561. #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
  562. if ( rt_object_is_systemobject( ( rt_object_t ) pipc ) )
  563. #endif
  564. {
  565. #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
  566. if ( type == RT_Object_Class_Mutex )
  567. {
  568. rt_mutex_detach( ( rt_mutex_t ) pipc );
  569. }
  570. else if ( type == RT_Object_Class_Semaphore )
  571. {
  572. rt_sem_detach( ( rt_sem_t ) pipc );
  573. }
  574. else if ( type == RT_Object_Class_MessageQueue )
  575. {
  576. rt_mq_detach( ( rt_mq_t ) pipc );
  577. }
  578. #endif
  579. #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
  580. }
  581. else
  582. {
  583. #endif
  584. #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
  585. if ( type == RT_Object_Class_Mutex )
  586. {
  587. rt_mutex_delete( ( rt_mutex_t ) pipc );
  588. }
  589. else if ( type == RT_Object_Class_Semaphore )
  590. {
  591. /* Allocated with rt_sem_init in xQueueGenericCreate */
  592. pipc->parent.type |= RT_Object_Class_Static;
  593. rt_sem_detach( ( rt_sem_t ) pipc );
  594. RT_KERNEL_FREE( pipc );
  595. }
  596. else if ( type == RT_Object_Class_MessageQueue )
  597. {
  598. rt_mq_delete( ( rt_mq_t ) pipc );
  599. }
  600. else
  601. {
  602. return;
  603. }
  604. RT_KERNEL_FREE( pxQueue );
  605. #endif
  606. }
  607. }
  608. /*-----------------------------------------------------------*/
  609. BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
  610. {
  611. BaseType_t xReturn;
  612. configASSERT( xQueue );
  613. if( uxQueueMessagesWaiting( xQueue ) == ( UBaseType_t ) 0 )
  614. {
  615. xReturn = pdTRUE;
  616. }
  617. else
  618. {
  619. xReturn = pdFALSE;
  620. }
  621. return xReturn;
  622. }
  623. /*-----------------------------------------------------------*/
  624. BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
  625. {
  626. BaseType_t xReturn;
  627. configASSERT( xQueue );
  628. if ( uxQueueSpacesAvailable( xQueue ) == ( UBaseType_t ) 0 )
  629. {
  630. xReturn = pdTRUE;
  631. }
  632. else
  633. {
  634. xReturn = pdFALSE;
  635. }
  636. return xReturn;
  637. }
  638. /*-----------------------------------------------------------*/
  639. #ifdef ESP_PLATFORM
  640. /* Unimplemented */
  641. #include "esp_log.h"
  642. static const char *TAG = "freertos";
  643. QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
  644. {
  645. ESP_LOGE(TAG, "xQueueCreateSet unimplemented");
  646. configASSERT(0);
  647. return NULL;
  648. }
  649. BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
  650. QueueSetHandle_t xQueueSet )
  651. {
  652. ESP_LOGE(TAG, "xQueueAddToSet unimplemented");
  653. configASSERT(0);
  654. return pdFAIL;
  655. }
  656. BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore,
  657. QueueSetHandle_t xQueueSet )
  658. {
  659. ESP_LOGE(TAG, "xQueueRemoveFromSet unimplemented");
  660. configASSERT(0);
  661. return pdFAIL;
  662. }
  663. QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
  664. const TickType_t xTicksToWait )
  665. {
  666. ESP_LOGE(TAG, "xQueueSelectFromSet unimplemented");
  667. configASSERT(0);
  668. return NULL;
  669. }
  670. QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
  671. {
  672. ESP_LOGE(TAG, "xQueueSelectFromSetFromISR unimplemented");
  673. configASSERT(0);
  674. return NULL;
  675. }
  676. BaseType_t xQueuePeek( QueueHandle_t xQueue,
  677. void * const pvBuffer,
  678. TickType_t xTicksToWait )
  679. {
  680. ESP_LOGE(TAG, "xQueuePeek unimplemented");
  681. configASSERT(0);
  682. return pdFAIL;
  683. }
  684. BaseType_t xQueueOverwrite(QueueHandle_t xQueue, const void * pvItemToQueue)
  685. {
  686. ESP_LOGE(TAG, "xQueueOverwrite unimplemented");
  687. configASSERT(0);
  688. return pdFAIL;
  689. }
  690. BaseType_t xQueueOverwriteFromISR(QueueHandle_t xQueue, const void * pvItemToQueue, BaseType_t *pxHigherPriorityTaskWoken)
  691. {
  692. ESP_LOGE(TAG, "xQueueOverwriteFromISR unimplemented");
  693. configASSERT(0);
  694. return pdFAIL;
  695. }
  696. #endif