queue.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512
  1. /*
  2. * FreeRTOS Kernel V10.4.6
  3. * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
  4. *
  5. * SPDX-License-Identifier: MIT
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a copy of
  8. * this software and associated documentation files (the "Software"), to deal in
  9. * the Software without restriction, including without limitation the rights to
  10. * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
  11. * the Software, and to permit persons to whom the Software is furnished to do so,
  12. * subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in all
  15. * copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
  19. * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
  20. * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
  21. * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  22. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * https://www.FreeRTOS.org
  25. * https://github.com/FreeRTOS
  26. *
  27. */
  28. #include <stdlib.h>
  29. #include <string.h>
  30. #include "FreeRTOS.h"
  31. #include "queue.h"
  32. /* Semaphores do not actually store or copy data, so have an item size of
  33. * zero. */
  34. #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
  35. #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
  36. typedef struct QueueDefinition
  37. {
  38. struct rt_ipc_object *rt_ipc;
  39. } xQUEUE;
  40. typedef xQUEUE Queue_t;
  41. static volatile rt_uint8_t mutex_index = 0;
  42. static volatile rt_uint8_t sem_index = 0;
  43. static volatile rt_uint8_t queue_index = 0;
  44. /*-----------------------------------------------------------*/
  45. #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
  46. QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength,
  47. const UBaseType_t uxItemSize,
  48. uint8_t * pucQueueStorage,
  49. StaticQueue_t * pxStaticQueue,
  50. const uint8_t ucQueueType )
  51. {
  52. Queue_t * pxNewQueue = NULL;
  53. char name[RT_NAME_MAX] = {0};
  54. /* The StaticQueue_t structure and the queue storage area must be
  55. * supplied. */
  56. configASSERT( pxStaticQueue );
  57. if( ( uxQueueLength > ( UBaseType_t ) 0 ) &&
  58. ( pxStaticQueue != NULL ) &&
  59. /* A queue storage area should be provided if the item size is not 0, and
  60. * should not be provided if the item size is 0. */
  61. ( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0 ) ) ) &&
  62. ( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0 ) ) ) )
  63. {
  64. if ( ucQueueType == queueQUEUE_TYPE_RECURSIVE_MUTEX || ucQueueType == queueQUEUE_TYPE_MUTEX )
  65. {
  66. rt_snprintf( name, RT_NAME_MAX, "mutex%02d", mutex_index++ );
  67. rt_mutex_init( ( rt_mutex_t ) &( ( StaticSemaphore_t * ) pxStaticQueue )->ipc_obj.mutex, name, RT_IPC_FLAG_PRIO );
  68. }
  69. else if ( ucQueueType == queueQUEUE_TYPE_BINARY_SEMAPHORE || ucQueueType == queueQUEUE_TYPE_COUNTING_SEMAPHORE )
  70. {
  71. rt_snprintf( name, RT_NAME_MAX, "sem%02d", sem_index++ );
  72. rt_sem_init( ( rt_sem_t ) &( ( StaticSemaphore_t * ) pxStaticQueue )->ipc_obj.semaphore, name, 0, RT_IPC_FLAG_PRIO );
  73. ( ( StaticSemaphore_t * ) pxStaticQueue )->ipc_obj.semaphore.max_value = uxQueueLength;
  74. }
  75. else if ( ucQueueType == queueQUEUE_TYPE_BASE )
  76. {
  77. rt_snprintf( name, RT_NAME_MAX, "queue%02d", queue_index++ );
  78. rt_mq_init( &( pxStaticQueue->ipc_obj ), name, pucQueueStorage, uxItemSize, QUEUE_BUFFER_SIZE( uxQueueLength, uxItemSize ), RT_IPC_FLAG_PRIO );
  79. }
  80. else
  81. {
  82. return pxNewQueue;
  83. }
  84. pxStaticQueue->rt_ipc = ( struct rt_ipc_object * ) &pxStaticQueue->ipc_obj;
  85. pxNewQueue = ( QueueHandle_t ) pxStaticQueue;
  86. }
  87. return pxNewQueue;
  88. }
  89. #endif /* configSUPPORT_STATIC_ALLOCATION */
  90. /*-----------------------------------------------------------*/
  91. #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
  92. QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength,
  93. const UBaseType_t uxItemSize,
  94. const uint8_t ucQueueType )
  95. {
  96. Queue_t * pxNewQueue = NULL;
  97. char name[RT_NAME_MAX] = {0};
  98. struct rt_ipc_object * pipc = RT_NULL;
  99. if( ( uxQueueLength > ( UBaseType_t ) 0 ) &&
  100. /* Check for multiplication overflow. */
  101. ( ( SIZE_MAX / uxQueueLength ) >= uxItemSize ) &&
  102. /* Check for addition overflow. */
  103. ( ( SIZE_MAX - sizeof( Queue_t ) ) >= ( uxQueueLength * uxItemSize ) ) )
  104. {
  105. pxNewQueue = ( Queue_t * ) RT_KERNEL_MALLOC( sizeof( Queue_t ) );
  106. if ( pxNewQueue == NULL )
  107. {
  108. return ( QueueHandle_t ) pxNewQueue;
  109. }
  110. if ( ucQueueType == queueQUEUE_TYPE_RECURSIVE_MUTEX || ucQueueType == queueQUEUE_TYPE_MUTEX )
  111. {
  112. rt_snprintf( name, RT_NAME_MAX, "mutex%02d", mutex_index++ );
  113. pipc = ( struct rt_ipc_object * ) rt_mutex_create( name, RT_IPC_FLAG_PRIO );
  114. }
  115. else if ( ucQueueType == queueQUEUE_TYPE_BINARY_SEMAPHORE || ucQueueType == queueQUEUE_TYPE_COUNTING_SEMAPHORE )
  116. {
  117. rt_snprintf( name, RT_NAME_MAX, "sem%02d", sem_index++ );
  118. pipc = ( struct rt_ipc_object * ) RT_KERNEL_MALLOC( sizeof( struct rt_semaphore_wrapper ) );
  119. if ( pipc != RT_NULL )
  120. {
  121. rt_sem_init( ( rt_sem_t ) pipc, name, 0, RT_IPC_FLAG_PRIO );
  122. ( ( struct rt_semaphore_wrapper * ) pipc )->max_value = uxQueueLength;
  123. /* Mark as dynamic so we can distinguish in vQueueDelete */
  124. pipc->parent.type &= ~RT_Object_Class_Static;
  125. }
  126. }
  127. else if ( ucQueueType == queueQUEUE_TYPE_BASE )
  128. {
  129. rt_snprintf( name, RT_NAME_MAX, "queue%02d", queue_index++ );
  130. pipc = ( struct rt_ipc_object * ) rt_mq_create( name, uxItemSize, uxQueueLength, RT_IPC_FLAG_PRIO);
  131. }
  132. if ( pipc == RT_NULL )
  133. {
  134. RT_KERNEL_FREE( pxNewQueue );
  135. return NULL;
  136. }
  137. pxNewQueue->rt_ipc = pipc;
  138. }
  139. return ( QueueHandle_t ) pxNewQueue;
  140. }
  141. #endif /* configSUPPORT_STATIC_ALLOCATION */
  142. /*-----------------------------------------------------------*/
  143. #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
  144. QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
  145. {
  146. QueueHandle_t xNewQueue;
  147. const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
  148. xNewQueue = xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType );
  149. return xNewQueue;
  150. }
  151. #endif /* configUSE_MUTEXES */
  152. /*-----------------------------------------------------------*/
  153. #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
  154. QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType,
  155. StaticQueue_t * pxStaticQueue )
  156. {
  157. QueueHandle_t xNewQueue;
  158. const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
  159. xNewQueue = xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType );
  160. return xNewQueue;
  161. }
  162. #endif /* configUSE_MUTEXES */
  163. /*-----------------------------------------------------------*/
  164. #if ( configUSE_RECURSIVE_MUTEXES == 1 )
  165. BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
  166. {
  167. Queue_t * const pxMutex = ( Queue_t * ) xMutex;
  168. configASSERT( pxMutex );
  169. return xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
  170. }
  171. #endif /* configUSE_RECURSIVE_MUTEXES */
  172. /*-----------------------------------------------------------*/
  173. #if ( configUSE_RECURSIVE_MUTEXES == 1 )
  174. BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex,
  175. TickType_t xTicksToWait )
  176. {
  177. Queue_t * const pxMutex = ( Queue_t * ) xMutex;
  178. configASSERT( pxMutex );
  179. return xQueueSemaphoreTake( pxMutex, xTicksToWait );
  180. }
  181. #endif /* configUSE_RECURSIVE_MUTEXES */
  182. /*-----------------------------------------------------------*/
  183. #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
  184. QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount,
  185. const UBaseType_t uxInitialCount,
  186. StaticQueue_t * pxStaticQueue )
  187. {
  188. QueueHandle_t xHandle = NULL;
  189. if( ( uxMaxCount != 0 ) &&
  190. ( uxInitialCount <= uxMaxCount ) )
  191. {
  192. xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
  193. if( xHandle != NULL )
  194. {
  195. ( ( rt_sem_t ) ( ( Queue_t * ) xHandle )->rt_ipc )->value = uxInitialCount;
  196. }
  197. }
  198. else
  199. {
  200. configASSERT( xHandle );
  201. }
  202. return xHandle;
  203. }
  204. #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
  205. /*-----------------------------------------------------------*/
  206. #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
  207. QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount,
  208. const UBaseType_t uxInitialCount )
  209. {
  210. QueueHandle_t xHandle = NULL;
  211. if( ( uxMaxCount != 0 ) &&
  212. ( uxInitialCount <= uxMaxCount ) )
  213. {
  214. xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
  215. if( xHandle != NULL )
  216. {
  217. ( ( rt_sem_t ) ( ( Queue_t * ) xHandle )->rt_ipc )->value = uxInitialCount;
  218. }
  219. }
  220. else
  221. {
  222. configASSERT( xHandle );
  223. }
  224. return xHandle;
  225. }
  226. #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
  227. /*-----------------------------------------------------------*/
  228. BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
  229. const void * const pvItemToQueue,
  230. TickType_t xTicksToWait,
  231. const BaseType_t xCopyPosition )
  232. {
  233. Queue_t * const pxQueue = xQueue;
  234. struct rt_ipc_object *pipc;
  235. rt_uint8_t type;
  236. rt_base_t level;
  237. rt_err_t err = -RT_ERROR;
  238. configASSERT( pxQueue );
  239. configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
  240. configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
  241. #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
  242. {
  243. configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
  244. }
  245. #endif
  246. pipc = pxQueue->rt_ipc;
  247. RT_ASSERT( pipc != RT_NULL );
  248. type = rt_object_get_type( &pipc->parent );
  249. if ( type == RT_Object_Class_Mutex )
  250. {
  251. err = rt_mutex_release( ( rt_mutex_t ) pipc );
  252. }
  253. else if ( type == RT_Object_Class_Semaphore )
  254. {
  255. level = rt_hw_interrupt_disable();
  256. if ( ( ( rt_sem_t ) pipc )->value < ( ( struct rt_semaphore_wrapper * ) pipc )->max_value )
  257. {
  258. err = rt_sem_release( ( rt_sem_t ) pipc );
  259. }
  260. rt_hw_interrupt_enable( level );
  261. }
  262. else if ( type == RT_Object_Class_MessageQueue )
  263. {
  264. if ( xCopyPosition == queueSEND_TO_BACK )
  265. {
  266. err = rt_mq_send_wait( ( rt_mq_t ) pipc, pvItemToQueue, ( ( rt_mq_t ) pipc )->msg_size, ( rt_int32_t ) xTicksToWait );
  267. }
  268. else if ( xCopyPosition == queueSEND_TO_FRONT )
  269. {
  270. // TODO: need to implement the timeout for LIFO
  271. err = rt_mq_urgent( ( rt_mq_t ) pipc, pvItemToQueue, ( ( rt_mq_t ) pipc )->msg_size );
  272. }
  273. }
  274. return rt_err_to_freertos( err );
  275. }
  276. /*-----------------------------------------------------------*/
  277. BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
  278. BaseType_t * const pxHigherPriorityTaskWoken )
  279. {
  280. Queue_t * const pxQueue = xQueue;
  281. struct rt_ipc_object *pipc;
  282. rt_uint8_t type;
  283. rt_base_t level;
  284. rt_err_t err = -RT_ERROR
  285. configASSERT( pxQueue );
  286. pipc = pxQueue->rt_ipc;
  287. RT_ASSERT( pipc != RT_NULL );
  288. type = rt_object_get_type( &pipc->parent );
  289. RT_ASSERT( type != RT_Object_Class_Mutex );
  290. if ( type == RT_Object_Class_Semaphore )
  291. {
  292. level = rt_hw_interrupt_disable();
  293. if ( ( ( rt_sem_t ) pipc )->value < ( ( struct rt_semaphore_wrapper * ) pipc )->max_value )
  294. {
  295. err = rt_sem_release( ( rt_sem_t ) pipc );
  296. }
  297. rt_hw_interrupt_enable( level );
  298. }
  299. if ( pxHigherPriorityTaskWoken != NULL )
  300. {
  301. *pxHigherPriorityTaskWoken = pdFALSE;
  302. }
  303. return rt_err_to_freertos( err );
  304. }
  305. /*-----------------------------------------------------------*/
  306. BaseType_t xQueueReceive( QueueHandle_t xQueue,
  307. void * const pvBuffer,
  308. TickType_t xTicksToWait )
  309. {
  310. Queue_t * const pxQueue = xQueue;
  311. struct rt_ipc_object *pipc;
  312. rt_uint8_t type;
  313. rt_err_t err = -RT_ERROR;
  314. /* Check the queue pointer is not NULL. */
  315. configASSERT( ( pxQueue ) );
  316. /* Cannot block if the scheduler is suspended. */
  317. #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
  318. {
  319. configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
  320. }
  321. #endif
  322. pipc = pxQueue->rt_ipc;
  323. RT_ASSERT( pipc != RT_NULL );
  324. type = rt_object_get_type( &pipc->parent );
  325. if ( type == RT_Object_Class_MessageQueue )
  326. {
  327. err = rt_mq_recv( ( rt_mq_t ) pipc, pvBuffer, ( ( rt_mq_t ) pipc )->msg_size, ( rt_int32_t ) xTicksToWait );
  328. }
  329. return rt_err_to_freertos( err );
  330. }
  331. /*-----------------------------------------------------------*/
  332. BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
  333. TickType_t xTicksToWait )
  334. {
  335. Queue_t * const pxQueue = xQueue;
  336. struct rt_ipc_object *pipc;
  337. rt_uint8_t type;
  338. rt_err_t err = -RT_ERROR;
  339. /* Check the queue pointer is not NULL. */
  340. configASSERT( ( pxQueue ) );
  341. /* Cannot block if the scheduler is suspended. */
  342. #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
  343. {
  344. configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
  345. }
  346. #endif
  347. pipc = pxQueue->rt_ipc;
  348. RT_ASSERT( pipc != RT_NULL );
  349. type = rt_object_get_type( &pipc->parent );
  350. if ( type == RT_Object_Class_Mutex )
  351. {
  352. err = rt_mutex_take( ( rt_mutex_t ) pipc, ( rt_int32_t ) xTicksToWait );
  353. }
  354. else if ( type == RT_Object_Class_Semaphore )
  355. {
  356. err = rt_sem_take( ( rt_sem_t ) pipc, ( rt_int32_t ) xTicksToWait );
  357. }
  358. return rt_err_to_freertos( err );
  359. }
  360. /*-----------------------------------------------------------*/
  361. BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
  362. void * const pvBuffer,
  363. BaseType_t * const pxHigherPriorityTaskWoken )
  364. {
  365. Queue_t * const pxQueue = xQueue;
  366. struct rt_ipc_object *pipc;
  367. rt_uint8_t type;
  368. rt_err_t err = -RT_ERROR
  369. configASSERT( pxQueue );
  370. pipc = pxQueue->rt_ipc;
  371. RT_ASSERT( pipc != RT_NULL );
  372. type = rt_object_get_type( &pipc->parent );
  373. RT_ASSERT( type != RT_Object_Class_Mutex );
  374. if ( type == RT_Object_Class_Semaphore )
  375. {
  376. err = rt_sem_take( ( rt_sem_t ) pipc, RT_WAITING_NO );
  377. }
  378. if ( pxHigherPriorityTaskWoken != NULL )
  379. {
  380. *pxHigherPriorityTaskWoken = pdFALSE;
  381. }
  382. return rt_err_to_freertos( err );
  383. }
  384. /*-----------------------------------------------------------*/
  385. void vQueueDelete( QueueHandle_t xQueue )
  386. {
  387. Queue_t * const pxQueue = xQueue;
  388. struct rt_ipc_object *pipc;
  389. rt_uint8_t type;
  390. configASSERT( pxQueue );
  391. pipc = pxQueue->rt_ipc;
  392. RT_ASSERT( pipc != RT_NULL );
  393. type = rt_object_get_type( &pipc->parent );
  394. #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
  395. if ( rt_object_is_systemobject( ( rt_object_t ) pipc ) )
  396. #endif
  397. {
  398. #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
  399. if ( type == RT_Object_Class_Mutex )
  400. {
  401. rt_mutex_detach( ( rt_mutex_t ) pipc );
  402. }
  403. else if ( type == RT_Object_Class_Semaphore )
  404. {
  405. rt_sem_detach( ( rt_sem_t ) pipc );
  406. }
  407. else if ( type == RT_Object_Class_MessageQueue )
  408. {
  409. rt_mq_detach( ( rt_mq_t ) pipc );
  410. }
  411. #endif
  412. #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
  413. }
  414. else
  415. {
  416. #endif
  417. #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
  418. if ( type == RT_Object_Class_Mutex )
  419. {
  420. rt_mutex_delete( ( rt_mutex_t ) pipc );
  421. }
  422. else if ( type == RT_Object_Class_Semaphore )
  423. {
  424. /* Allocated with rt_sem_init in xQueueGenericCreate */
  425. pipc->parent.type |= RT_Object_Class_Static;
  426. rt_sem_detach( ( rt_sem_t ) pipc );
  427. RT_KERNEL_FREE( pipc );
  428. }
  429. else if ( type == RT_Object_Class_MessageQueue )
  430. {
  431. rt_mq_delete( ( rt_mq_t ) pipc );
  432. }
  433. else
  434. {
  435. return;
  436. }
  437. RT_KERNEL_FREE( pxQueue );
  438. #endif
  439. }
  440. }
  441. /*-----------------------------------------------------------*/