ringbuf.c 64 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423
  1. /*
  2. * SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <stdlib.h>
  7. #include <string.h>
  8. #include "freertos/FreeRTOS.h"
  9. #include "freertos/list.h"
  10. #include "freertos/task.h"
  11. #include "freertos/queue.h"
  12. #include "freertos/ringbuf.h"
  13. #include "esp_heap_caps.h"
  14. // ------------------------------------------------- Macros and Types --------------------------------------------------
  15. //32-bit alignment macros
  16. #define rbALIGN_MASK (0x03)
  17. #define rbALIGN_SIZE( xSize ) ( ( xSize + rbALIGN_MASK ) & ~rbALIGN_MASK )
  18. #define rbCHECK_ALIGNED( pvPtr ) ( ( ( UBaseType_t ) ( pvPtr ) & rbALIGN_MASK ) == 0 )
  19. //Ring buffer flags
  20. #define rbALLOW_SPLIT_FLAG ( ( UBaseType_t ) 1 ) //The ring buffer allows items to be split
  21. #define rbBYTE_BUFFER_FLAG ( ( UBaseType_t ) 2 ) //The ring buffer is a byte buffer
  22. #define rbBUFFER_FULL_FLAG ( ( UBaseType_t ) 4 ) //The ring buffer is currently full (write pointer == free pointer)
  23. #define rbBUFFER_STATIC_FLAG ( ( UBaseType_t ) 8 ) //The ring buffer is statically allocated
  24. #define rbUSING_QUEUE_SET ( ( UBaseType_t ) 16 ) //The ring buffer has been added to a queue set
  25. //Item flags
  26. #define rbITEM_FREE_FLAG ( ( UBaseType_t ) 1 ) //Item has been retrieved and returned by application, free to overwrite
  27. #define rbITEM_DUMMY_DATA_FLAG ( ( UBaseType_t ) 2 ) //Data from here to end of the ring buffer is dummy data. Restart reading at start of head of the buffer
  28. #define rbITEM_SPLIT_FLAG ( ( UBaseType_t ) 4 ) //Valid for RINGBUF_TYPE_ALLOWSPLIT, indicating that rest of the data is wrapped around
  29. #define rbITEM_WRITTEN_FLAG ( ( UBaseType_t ) 8 ) //Item has been written to by the application, thus can be read
  30. typedef struct {
  31. //This size of this structure must be 32-bit aligned
  32. size_t xItemLen;
  33. UBaseType_t uxItemFlags;
  34. } ItemHeader_t;
  35. #define rbHEADER_SIZE sizeof(ItemHeader_t)
  36. typedef struct RingbufferDefinition Ringbuffer_t;
  37. typedef BaseType_t (*CheckItemFitsFunction_t)(Ringbuffer_t *pxRingbuffer, size_t xItemSize);
  38. typedef void (*CopyItemFunction_t)(Ringbuffer_t *pxRingbuffer, const uint8_t *pcItem, size_t xItemSize);
  39. typedef BaseType_t (*CheckItemAvailFunction_t) (Ringbuffer_t *pxRingbuffer);
  40. typedef void *(*GetItemFunction_t)(Ringbuffer_t *pxRingbuffer, BaseType_t *pxIsSplit, size_t xMaxSize, size_t *pxItemSize);
  41. typedef void (*ReturnItemFunction_t)(Ringbuffer_t *pxRingbuffer, uint8_t *pvItem);
  42. typedef size_t (*GetCurMaxSizeFunction_t)(Ringbuffer_t *pxRingbuffer);
  43. typedef struct RingbufferDefinition {
  44. size_t xSize; //Size of the data storage
  45. size_t xMaxItemSize; //Maximum item size
  46. UBaseType_t uxRingbufferFlags; //Flags to indicate the type and status of ring buffer
  47. CheckItemFitsFunction_t xCheckItemFits; //Function to check if item can currently fit in ring buffer
  48. CopyItemFunction_t vCopyItem; //Function to copy item to ring buffer
  49. GetItemFunction_t pvGetItem; //Function to get item from ring buffer
  50. ReturnItemFunction_t vReturnItem; //Function to return item to ring buffer
  51. GetCurMaxSizeFunction_t xGetCurMaxSize; //Function to get current free size
  52. uint8_t *pucAcquire; //Acquire Pointer. Points to where the next item should be acquired.
  53. uint8_t *pucWrite; //Write Pointer. Points to where the next item should be written
  54. uint8_t *pucRead; //Read Pointer. Points to where the next item should be read from
  55. uint8_t *pucFree; //Free Pointer. Points to the last item that has yet to be returned to the ring buffer
  56. uint8_t *pucHead; //Pointer to the start of the ring buffer storage area
  57. uint8_t *pucTail; //Pointer to the end of the ring buffer storage area
  58. BaseType_t xItemsWaiting; //Number of items/bytes(for byte buffers) currently in ring buffer that have not yet been read
  59. List_t xTasksWaitingToSend; //List of tasks that are blocked waiting to send/acquire onto this ring buffer. Stored in priority order.
  60. List_t xTasksWaitingToReceive; //List of tasks that are blocked waiting to receive from this ring buffer. Stored in priority order.
  61. QueueSetHandle_t xQueueSet; //Ring buffer's read queue set handle.
  62. portMUX_TYPE mux; //Spinlock required for SMP
  63. } Ringbuffer_t;
  64. _Static_assert(sizeof(StaticRingbuffer_t) == sizeof(Ringbuffer_t), "StaticRingbuffer_t != Ringbuffer_t");
  65. // ------------------------------------------------ Forward Declares ---------------------------------------------------
  66. /*
  67. * WARNING: All of the following static functions (except generic functions)
  68. * ARE NOT THREAD SAFE. Therefore they should only be called within a critical
  69. * section (using spin locks)
  70. */
  71. //Initialize a ring buffer after space has been allocated for it
  72. static void prvInitializeNewRingbuffer(size_t xBufferSize,
  73. RingbufferType_t xBufferType,
  74. Ringbuffer_t *pxNewRingbuffer,
  75. uint8_t *pucRingbufferStorage);
  76. //Calculate current amount of free space (in bytes) in the ring buffer
  77. static size_t prvGetFreeSize(Ringbuffer_t *pxRingbuffer);
  78. //Checks if an item/data is currently available for retrieval
  79. static BaseType_t prvCheckItemAvail(Ringbuffer_t *pxRingbuffer);
  80. //Checks if an item will currently fit in a no-split/allow-split ring buffer
  81. static BaseType_t prvCheckItemFitsDefault( Ringbuffer_t *pxRingbuffer, size_t xItemSize);
  82. //Checks if an item will currently fit in a byte buffer
  83. static BaseType_t prvCheckItemFitsByteBuffer( Ringbuffer_t *pxRingbuffer, size_t xItemSize);
  84. /*
  85. Copies an item to a no-split ring buffer
  86. Entry:
  87. - Must have already guaranteed there is sufficient space for item by calling prvCheckItemFitsDefault()
  88. Exit:
  89. - New item copied into ring buffer
  90. - pucAcquire and pucWrite updated.
  91. - Dummy item added if necessary
  92. */
  93. static void prvCopyItemNoSplit(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize);
  94. /*
  95. Copies an item to a allow-split ring buffer
  96. Entry:
  97. - Must have already guaranteed there is sufficient space for item by calling prvCheckItemFitsDefault()
  98. Exit:
  99. - New item copied into ring buffer
  100. - pucAcquire and pucWrite updated
  101. - Item may be split
  102. */
  103. static void prvCopyItemAllowSplit(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize);
  104. //Copies an item to a byte buffer. Only call this function after calling prvCheckItemFitsByteBuffer()
  105. static void prvCopyItemByteBuf(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize);
  106. //Retrieve item from no-split/allow-split ring buffer. *pxIsSplit is set to pdTRUE if the retrieved item is split
  107. /*
  108. Entry:
  109. - Must have already guaranteed that there is an item available for retrieval by calling prvCheckItemAvail()
  110. - Guaranteed that pucREAD points to a valid item (i.e., not a dummy item)
  111. Exit:
  112. - Item is returned. Only first half returned if split
  113. - pucREAD updated to point to next valid item to read, or equals to pucWrite if there are no more valid items to read
  114. - pucREAD update must skip over dummy items
  115. */
  116. static void *prvGetItemDefault(Ringbuffer_t *pxRingbuffer,
  117. BaseType_t *pxIsSplit,
  118. size_t xUnusedParam,
  119. size_t *pxItemSize);
  120. //Retrieve data from byte buffer. If xMaxSize is 0, all continuous data is retrieved
  121. static void *prvGetItemByteBuf(Ringbuffer_t *pxRingbuffer,
  122. BaseType_t *pxUnusedParam,
  123. size_t xMaxSize,
  124. size_t *pxItemSize);
  125. /*
  126. Return an item to a split/no-split ring buffer
  127. Exit:
  128. - Item is marked free rbITEM_FREE_FLAG
  129. - pucFree is progressed as far as possible, skipping over already freed items or dummy items
  130. */
  131. static void prvReturnItemDefault(Ringbuffer_t *pxRingbuffer, uint8_t *pucItem);
  132. //Return data to a byte buffer
  133. static void prvReturnItemByteBuf(Ringbuffer_t *pxRingbuffer, uint8_t *pucItem);
  134. //Get the maximum size an item that can currently have if sent to a no-split ring buffer
  135. static size_t prvGetCurMaxSizeNoSplit(Ringbuffer_t *pxRingbuffer);
  136. //Get the maximum size an item that can currently have if sent to a allow-split ring buffer
  137. static size_t prvGetCurMaxSizeAllowSplit(Ringbuffer_t *pxRingbuffer);
  138. //Get the maximum size an item that can currently have if sent to a byte buffer
  139. static size_t prvGetCurMaxSizeByteBuf(Ringbuffer_t *pxRingbuffer);
  140. /*
  141. Generic function used to send or acquire an item/buffer.
  142. - If sending, set ppvItem to NULL. pvItem remains unchanged on failure.
  143. - If acquiring, set pvItem to NULL. ppvItem remains unchanged on failure.
  144. */
  145. static BaseType_t prvSendAcquireGeneric(Ringbuffer_t *pxRingbuffer,
  146. const void *pvItem,
  147. void **ppvItem,
  148. size_t xItemSize,
  149. TickType_t xTicksToWait);
  150. /*
  151. Generic function used to retrieve an item/data from ring buffers. If called on
  152. an allow-split buffer, and pvItem2 and xItemSize2 are not NULL, both parts of
  153. a split item will be retrieved. xMaxSize will only take effect if called on
  154. byte buffers. xItemSize must remain unchanged if no item is retrieved.
  155. */
  156. static BaseType_t prvReceiveGeneric(Ringbuffer_t *pxRingbuffer,
  157. void **pvItem1,
  158. void **pvItem2,
  159. size_t *xItemSize1,
  160. size_t *xItemSize2,
  161. size_t xMaxSize,
  162. TickType_t xTicksToWait);
  163. //From ISR version of prvReceiveGeneric()
  164. static BaseType_t prvReceiveGenericFromISR(Ringbuffer_t *pxRingbuffer,
  165. void **pvItem1,
  166. void **pvItem2,
  167. size_t *xItemSize1,
  168. size_t *xItemSize2,
  169. size_t xMaxSize);
  170. // ------------------------------------------------ Static Functions ---------------------------------------------------
  171. static void prvInitializeNewRingbuffer(size_t xBufferSize,
  172. RingbufferType_t xBufferType,
  173. Ringbuffer_t *pxNewRingbuffer,
  174. uint8_t *pucRingbufferStorage)
  175. {
  176. //Initialize values
  177. pxNewRingbuffer->xSize = xBufferSize;
  178. pxNewRingbuffer->pucHead = pucRingbufferStorage;
  179. pxNewRingbuffer->pucTail = pucRingbufferStorage + xBufferSize;
  180. pxNewRingbuffer->pucFree = pucRingbufferStorage;
  181. pxNewRingbuffer->pucRead = pucRingbufferStorage;
  182. pxNewRingbuffer->pucWrite = pucRingbufferStorage;
  183. pxNewRingbuffer->pucAcquire = pucRingbufferStorage;
  184. pxNewRingbuffer->xItemsWaiting = 0;
  185. pxNewRingbuffer->uxRingbufferFlags = 0;
  186. //Initialize type dependent values and function pointers
  187. if (xBufferType == RINGBUF_TYPE_NOSPLIT) {
  188. pxNewRingbuffer->xCheckItemFits = prvCheckItemFitsDefault;
  189. pxNewRingbuffer->vCopyItem = prvCopyItemNoSplit;
  190. pxNewRingbuffer->pvGetItem = prvGetItemDefault;
  191. pxNewRingbuffer->vReturnItem = prvReturnItemDefault;
  192. /*
  193. * Worst case scenario is when the read/write/acquire/free pointers are all
  194. * pointing to the halfway point of the buffer.
  195. */
  196. pxNewRingbuffer->xMaxItemSize = rbALIGN_SIZE(pxNewRingbuffer->xSize / 2) - rbHEADER_SIZE;
  197. pxNewRingbuffer->xGetCurMaxSize = prvGetCurMaxSizeNoSplit;
  198. } else if (xBufferType == RINGBUF_TYPE_ALLOWSPLIT) {
  199. pxNewRingbuffer->uxRingbufferFlags |= rbALLOW_SPLIT_FLAG;
  200. pxNewRingbuffer->xCheckItemFits = prvCheckItemFitsDefault;
  201. pxNewRingbuffer->vCopyItem = prvCopyItemAllowSplit;
  202. pxNewRingbuffer->pvGetItem = prvGetItemDefault;
  203. pxNewRingbuffer->vReturnItem = prvReturnItemDefault;
  204. //Worst case an item is split into two, incurring two headers of overhead
  205. pxNewRingbuffer->xMaxItemSize = pxNewRingbuffer->xSize - (sizeof(ItemHeader_t) * 2);
  206. pxNewRingbuffer->xGetCurMaxSize = prvGetCurMaxSizeAllowSplit;
  207. } else { //Byte Buffer
  208. pxNewRingbuffer->uxRingbufferFlags |= rbBYTE_BUFFER_FLAG;
  209. pxNewRingbuffer->xCheckItemFits = prvCheckItemFitsByteBuffer;
  210. pxNewRingbuffer->vCopyItem = prvCopyItemByteBuf;
  211. pxNewRingbuffer->pvGetItem = prvGetItemByteBuf;
  212. pxNewRingbuffer->vReturnItem = prvReturnItemByteBuf;
  213. //Byte buffers do not incur any overhead
  214. pxNewRingbuffer->xMaxItemSize = pxNewRingbuffer->xSize;
  215. pxNewRingbuffer->xGetCurMaxSize = prvGetCurMaxSizeByteBuf;
  216. }
  217. vListInitialise(&pxNewRingbuffer->xTasksWaitingToSend);
  218. vListInitialise(&pxNewRingbuffer->xTasksWaitingToReceive);
  219. pxNewRingbuffer->xQueueSet = NULL;
  220. portMUX_INITIALIZE(&pxNewRingbuffer->mux);
  221. }
  222. static size_t prvGetFreeSize(Ringbuffer_t *pxRingbuffer)
  223. {
  224. size_t xReturn;
  225. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
  226. xReturn = 0;
  227. } else {
  228. BaseType_t xFreeSize = pxRingbuffer->pucFree - pxRingbuffer->pucAcquire;
  229. //Check if xFreeSize has underflowed
  230. if (xFreeSize <= 0) {
  231. xFreeSize += pxRingbuffer->xSize;
  232. }
  233. xReturn = xFreeSize;
  234. }
  235. configASSERT(xReturn <= pxRingbuffer->xSize);
  236. return xReturn;
  237. }
  238. static BaseType_t prvCheckItemFitsDefault( Ringbuffer_t *pxRingbuffer, size_t xItemSize)
  239. {
  240. //Check arguments and buffer state
  241. configASSERT(rbCHECK_ALIGNED(pxRingbuffer->pucAcquire)); //pucAcquire is always aligned in no-split/allow-split ring buffers
  242. configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check write pointer is within bounds
  243. size_t xTotalItemSize = rbALIGN_SIZE(xItemSize) + rbHEADER_SIZE; //Rounded up aligned item size with header
  244. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
  245. //Buffer is either complete empty or completely full
  246. return (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) ? pdFALSE : pdTRUE;
  247. }
  248. if (pxRingbuffer->pucFree > pxRingbuffer->pucAcquire) {
  249. //Free space does not wrap around
  250. return (xTotalItemSize <= pxRingbuffer->pucFree - pxRingbuffer->pucAcquire) ? pdTRUE : pdFALSE;
  251. }
  252. //Free space wraps around
  253. if (xTotalItemSize <= pxRingbuffer->pucTail - pxRingbuffer->pucAcquire) {
  254. return pdTRUE; //Item fits without wrapping around
  255. }
  256. //Check if item fits by wrapping
  257. if (pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG) {
  258. //Allow split wrapping incurs an extra header
  259. return (xTotalItemSize + rbHEADER_SIZE <= pxRingbuffer->xSize - (pxRingbuffer->pucAcquire - pxRingbuffer->pucFree)) ? pdTRUE : pdFALSE;
  260. } else {
  261. return (xTotalItemSize <= pxRingbuffer->pucFree - pxRingbuffer->pucHead) ? pdTRUE : pdFALSE;
  262. }
  263. }
  264. static BaseType_t prvCheckItemFitsByteBuffer( Ringbuffer_t *pxRingbuffer, size_t xItemSize)
  265. {
  266. //Check arguments and buffer state
  267. configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check acquire pointer is within bounds
  268. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
  269. //Buffer is either complete empty or completely full
  270. return (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) ? pdFALSE : pdTRUE;
  271. }
  272. if (pxRingbuffer->pucFree > pxRingbuffer->pucAcquire) {
  273. //Free space does not wrap around
  274. return (xItemSize <= pxRingbuffer->pucFree - pxRingbuffer->pucAcquire) ? pdTRUE : pdFALSE;
  275. }
  276. //Free space wraps around
  277. return (xItemSize <= pxRingbuffer->xSize - (pxRingbuffer->pucAcquire - pxRingbuffer->pucFree)) ? pdTRUE : pdFALSE;
  278. }
  279. static uint8_t* prvAcquireItemNoSplit(Ringbuffer_t *pxRingbuffer, size_t xItemSize)
  280. {
  281. //Check arguments and buffer state
  282. size_t xAlignedItemSize = rbALIGN_SIZE(xItemSize); //Rounded up aligned item size
  283. size_t xRemLen = pxRingbuffer->pucTail - pxRingbuffer->pucAcquire; //Length from pucAcquire until end of buffer
  284. configASSERT(rbCHECK_ALIGNED(pxRingbuffer->pucAcquire)); //pucAcquire is always aligned in no-split ring buffers
  285. configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check write pointer is within bounds
  286. configASSERT(xRemLen >= rbHEADER_SIZE); //Remaining length must be able to at least fit an item header
  287. //If remaining length can't fit item, set as dummy data and wrap around
  288. if (xRemLen < xAlignedItemSize + rbHEADER_SIZE) {
  289. ItemHeader_t *pxDummy = (ItemHeader_t *)pxRingbuffer->pucAcquire;
  290. pxDummy->uxItemFlags = rbITEM_DUMMY_DATA_FLAG; //Set remaining length as dummy data
  291. pxDummy->xItemLen = 0; //Dummy data should have no length
  292. pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Reset acquire pointer to wrap around
  293. }
  294. //Item should be guaranteed to fit at this point. Set item header and copy data
  295. ItemHeader_t *pxHeader = (ItemHeader_t *)pxRingbuffer->pucAcquire;
  296. pxHeader->xItemLen = xItemSize;
  297. pxHeader->uxItemFlags = 0;
  298. //hold the buffer address without touching pucWrite
  299. uint8_t* item_address = pxRingbuffer->pucAcquire + rbHEADER_SIZE;
  300. pxRingbuffer->pucAcquire += rbHEADER_SIZE + xAlignedItemSize; //Advance pucAcquire past header and the item to next aligned address
  301. //After the allocation, add some padding after the buffer and correct the flags
  302. //If current remaining length can't fit a header, wrap around write pointer
  303. if (pxRingbuffer->pucTail - pxRingbuffer->pucAcquire < rbHEADER_SIZE) {
  304. pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Wrap around pucAcquire
  305. }
  306. //Check if buffer is full
  307. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
  308. //Mark the buffer as full to distinguish with an empty buffer
  309. pxRingbuffer->uxRingbufferFlags |= rbBUFFER_FULL_FLAG;
  310. }
  311. return item_address;
  312. }
  313. static void prvSendItemDoneNoSplit(Ringbuffer_t *pxRingbuffer, uint8_t* pucItem)
  314. {
  315. //Check arguments and buffer state
  316. configASSERT(rbCHECK_ALIGNED(pucItem));
  317. configASSERT(pucItem >= pxRingbuffer->pucHead);
  318. configASSERT(pucItem <= pxRingbuffer->pucTail); //Inclusive of pucTail in the case of zero length item at the very end
  319. //Get and check header of the item
  320. ItemHeader_t *pxCurHeader = (ItemHeader_t *)(pucItem - rbHEADER_SIZE);
  321. configASSERT(pxCurHeader->xItemLen <= pxRingbuffer->xMaxItemSize);
  322. configASSERT((pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) == 0); //Dummy items should never have been written
  323. configASSERT((pxCurHeader->uxItemFlags & rbITEM_WRITTEN_FLAG) == 0); //Indicates item has already been written before
  324. pxCurHeader->uxItemFlags &= ~rbITEM_SPLIT_FLAG; //Clear wrap flag if set (not strictly necessary)
  325. pxCurHeader->uxItemFlags |= rbITEM_WRITTEN_FLAG; //Mark as written
  326. pxRingbuffer->xItemsWaiting++;
  327. /*
  328. * Items might not be written in the order they were acquired. Move the
  329. * write pointer up to the next item that has not been marked as written (by
  330. * written flag) or up till the acquire pointer. When advancing the write
  331. * pointer, items that have already been written or items with dummy data
  332. * should be skipped over
  333. */
  334. pxCurHeader = (ItemHeader_t *)pxRingbuffer->pucWrite;
  335. //Skip over Items that have already been written or are dummy items
  336. while (((pxCurHeader->uxItemFlags & rbITEM_WRITTEN_FLAG) || (pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG)) && pxRingbuffer->pucWrite != pxRingbuffer->pucAcquire) {
  337. if (pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) {
  338. pxCurHeader->uxItemFlags |= rbITEM_WRITTEN_FLAG; //Mark as freed (not strictly necessary but adds redundancy)
  339. pxRingbuffer->pucWrite = pxRingbuffer->pucHead; //Wrap around due to dummy data
  340. } else {
  341. //Item with data that has already been written, advance write pointer past this item
  342. size_t xAlignedItemSize = rbALIGN_SIZE(pxCurHeader->xItemLen);
  343. pxRingbuffer->pucWrite += xAlignedItemSize + rbHEADER_SIZE;
  344. //Redundancy check to ensure write pointer has not overshot buffer bounds
  345. configASSERT(pxRingbuffer->pucWrite <= pxRingbuffer->pucHead + pxRingbuffer->xSize);
  346. }
  347. //Check if pucWrite requires wrap around
  348. if ((pxRingbuffer->pucTail - pxRingbuffer->pucWrite) < rbHEADER_SIZE) {
  349. pxRingbuffer->pucWrite = pxRingbuffer->pucHead;
  350. }
  351. pxCurHeader = (ItemHeader_t *)pxRingbuffer->pucWrite; //Update header to point to item
  352. }
  353. }
  354. static void prvCopyItemNoSplit(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize)
  355. {
  356. uint8_t* item_addr = prvAcquireItemNoSplit(pxRingbuffer, xItemSize);
  357. memcpy(item_addr, pucItem, xItemSize);
  358. prvSendItemDoneNoSplit(pxRingbuffer, item_addr);
  359. }
  360. static void prvCopyItemAllowSplit(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize)
  361. {
  362. //Check arguments and buffer state
  363. size_t xAlignedItemSize = rbALIGN_SIZE(xItemSize); //Rounded up aligned item size
  364. size_t xRemLen = pxRingbuffer->pucTail - pxRingbuffer->pucAcquire; //Length from pucAcquire until end of buffer
  365. configASSERT(rbCHECK_ALIGNED(pxRingbuffer->pucAcquire)); //pucAcquire is always aligned in split ring buffers
  366. configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check write pointer is within bounds
  367. configASSERT(xRemLen >= rbHEADER_SIZE); //Remaining length must be able to at least fit an item header
  368. //Split item if necessary
  369. if (xRemLen < xAlignedItemSize + rbHEADER_SIZE) {
  370. //Write first part of the item
  371. ItemHeader_t *pxFirstHeader = (ItemHeader_t *)pxRingbuffer->pucAcquire;
  372. pxFirstHeader->uxItemFlags = 0;
  373. pxFirstHeader->xItemLen = xRemLen - rbHEADER_SIZE; //Fill remaining length with first part
  374. pxRingbuffer->pucAcquire += rbHEADER_SIZE; //Advance pucAcquire past header
  375. xRemLen -= rbHEADER_SIZE;
  376. if (xRemLen > 0) {
  377. memcpy(pxRingbuffer->pucAcquire, pucItem, xRemLen);
  378. pxRingbuffer->xItemsWaiting++;
  379. //Update item arguments to account for data already copied
  380. pucItem += xRemLen;
  381. xItemSize -= xRemLen;
  382. xAlignedItemSize -= xRemLen;
  383. pxFirstHeader->uxItemFlags |= rbITEM_SPLIT_FLAG; //There must be more data
  384. } else {
  385. //Remaining length was only large enough to fit header
  386. pxFirstHeader->uxItemFlags |= rbITEM_DUMMY_DATA_FLAG; //Item will completely be stored in 2nd part
  387. }
  388. pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Reset acquire pointer to start of buffer
  389. }
  390. //Item (whole or second part) should be guaranteed to fit at this point
  391. ItemHeader_t *pxSecondHeader = (ItemHeader_t *)pxRingbuffer->pucAcquire;
  392. pxSecondHeader->xItemLen = xItemSize;
  393. pxSecondHeader->uxItemFlags = 0;
  394. pxRingbuffer->pucAcquire += rbHEADER_SIZE; //Advance acquire pointer past header
  395. memcpy(pxRingbuffer->pucAcquire, pucItem, xItemSize);
  396. pxRingbuffer->xItemsWaiting++;
  397. pxRingbuffer->pucAcquire += xAlignedItemSize; //Advance pucAcquire past item to next aligned address
  398. //If current remaining length can't fit a header, wrap around write pointer
  399. if (pxRingbuffer->pucTail - pxRingbuffer->pucAcquire < rbHEADER_SIZE) {
  400. pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Wrap around pucAcquire
  401. }
  402. //Check if buffer is full
  403. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
  404. //Mark the buffer as full to distinguish with an empty buffer
  405. pxRingbuffer->uxRingbufferFlags |= rbBUFFER_FULL_FLAG;
  406. }
  407. //currently the Split mode is not supported, pucWrite tracks the pucAcquire
  408. pxRingbuffer->pucWrite = pxRingbuffer->pucAcquire;
  409. }
  410. static void prvCopyItemByteBuf(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize)
  411. {
  412. //Check arguments and buffer state
  413. configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check acquire pointer is within bounds
  414. size_t xRemLen = pxRingbuffer->pucTail - pxRingbuffer->pucAcquire; //Length from pucAcquire until end of buffer
  415. if (xRemLen < xItemSize) {
  416. //Copy as much as possible into remaining length
  417. memcpy(pxRingbuffer->pucAcquire, pucItem, xRemLen);
  418. pxRingbuffer->xItemsWaiting += xRemLen;
  419. //Update item arguments to account for data already written
  420. pucItem += xRemLen;
  421. xItemSize -= xRemLen;
  422. pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Reset acquire pointer to start of buffer
  423. }
  424. //Copy all or remaining portion of the item
  425. memcpy(pxRingbuffer->pucAcquire, pucItem, xItemSize);
  426. pxRingbuffer->xItemsWaiting += xItemSize;
  427. pxRingbuffer->pucAcquire += xItemSize;
  428. //Wrap around pucAcquire if it reaches the end
  429. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucTail) {
  430. pxRingbuffer->pucAcquire = pxRingbuffer->pucHead;
  431. }
  432. //Check if buffer is full
  433. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
  434. pxRingbuffer->uxRingbufferFlags |= rbBUFFER_FULL_FLAG; //Mark the buffer as full to avoid confusion with an empty buffer
  435. }
  436. //Currently, acquiring memory is not supported in byte mode. pucWrite tracks the pucAcquire.
  437. pxRingbuffer->pucWrite = pxRingbuffer->pucAcquire;
  438. }
  439. static BaseType_t prvCheckItemAvail(Ringbuffer_t *pxRingbuffer)
  440. {
  441. if ((pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) && pxRingbuffer->pucRead != pxRingbuffer->pucFree) {
  442. return pdFALSE; //Byte buffers do not allow multiple retrievals before return
  443. }
  444. if ((pxRingbuffer->xItemsWaiting > 0) && ((pxRingbuffer->pucRead != pxRingbuffer->pucWrite) || (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG))) {
  445. return pdTRUE; //Items/data available for retrieval
  446. } else {
  447. return pdFALSE; //No items/data available for retrieval
  448. }
  449. }
  450. static void *prvGetItemDefault(Ringbuffer_t *pxRingbuffer,
  451. BaseType_t *pxIsSplit,
  452. size_t xUnusedParam,
  453. size_t *pxItemSize)
  454. {
  455. //Check arguments and buffer state
  456. ItemHeader_t *pxHeader = (ItemHeader_t *)pxRingbuffer->pucRead;
  457. configASSERT(pxIsSplit != NULL);
  458. configASSERT((pxRingbuffer->xItemsWaiting > 0) && ((pxRingbuffer->pucRead != pxRingbuffer->pucWrite) || (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG))); //Check there are items to be read
  459. configASSERT(rbCHECK_ALIGNED(pxRingbuffer->pucRead)); //pucRead is always aligned in split ring buffers
  460. configASSERT(pxRingbuffer->pucRead >= pxRingbuffer->pucHead && pxRingbuffer->pucRead < pxRingbuffer->pucTail); //Check read pointer is within bounds
  461. configASSERT((pxHeader->xItemLen <= pxRingbuffer->xMaxItemSize) || (pxHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG));
  462. uint8_t *pcReturn;
  463. //Wrap around if dummy data (dummy data indicates wrap around in no-split buffers)
  464. if (pxHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) {
  465. pxRingbuffer->pucRead = pxRingbuffer->pucHead;
  466. //Check for errors with the next item
  467. pxHeader = (ItemHeader_t *)pxRingbuffer->pucRead;
  468. configASSERT(pxHeader->xItemLen <= pxRingbuffer->xMaxItemSize);
  469. }
  470. pcReturn = pxRingbuffer->pucRead + rbHEADER_SIZE; //Get pointer to part of item containing data (point past the header)
  471. if (pxHeader->xItemLen == 0) {
  472. //Inclusive of pucTail for special case where item of zero length just fits at the end of the buffer
  473. configASSERT(pcReturn >= pxRingbuffer->pucHead && pcReturn <= pxRingbuffer->pucTail);
  474. } else {
  475. //Exclusive of pucTail if length is larger than zero, pcReturn should never point to pucTail
  476. configASSERT(pcReturn >= pxRingbuffer->pucHead && pcReturn < pxRingbuffer->pucTail);
  477. }
  478. *pxItemSize = pxHeader->xItemLen; //Get length of item
  479. pxRingbuffer->xItemsWaiting --; //Update item count
  480. *pxIsSplit = (pxHeader->uxItemFlags & rbITEM_SPLIT_FLAG) ? pdTRUE : pdFALSE;
  481. pxRingbuffer->pucRead += rbHEADER_SIZE + rbALIGN_SIZE(pxHeader->xItemLen); //Update pucRead
  482. //Check if pucRead requires wrap around
  483. if ((pxRingbuffer->pucTail - pxRingbuffer->pucRead) < rbHEADER_SIZE) {
  484. pxRingbuffer->pucRead = pxRingbuffer->pucHead;
  485. }
  486. return (void *)pcReturn;
  487. }
  488. static void *prvGetItemByteBuf(Ringbuffer_t *pxRingbuffer,
  489. BaseType_t *pxUnusedParam,
  490. size_t xMaxSize,
  491. size_t *pxItemSize)
  492. {
  493. //Check arguments and buffer state
  494. configASSERT((pxRingbuffer->xItemsWaiting > 0) && ((pxRingbuffer->pucRead != pxRingbuffer->pucWrite) || (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG))); //Check there are items to be read
  495. configASSERT(pxRingbuffer->pucRead >= pxRingbuffer->pucHead && pxRingbuffer->pucRead < pxRingbuffer->pucTail); //Check read pointer is within bounds
  496. configASSERT(pxRingbuffer->pucRead == pxRingbuffer->pucFree);
  497. uint8_t *ret = pxRingbuffer->pucRead;
  498. if ((pxRingbuffer->pucRead > pxRingbuffer->pucWrite) || (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG)) { //Available data wraps around
  499. //Return contiguous piece from read pointer until buffer tail, or xMaxSize
  500. if (xMaxSize == 0 || pxRingbuffer->pucTail - pxRingbuffer->pucRead <= xMaxSize) {
  501. //All contiguous data from read pointer to tail
  502. *pxItemSize = pxRingbuffer->pucTail - pxRingbuffer->pucRead;
  503. pxRingbuffer->xItemsWaiting -= pxRingbuffer->pucTail - pxRingbuffer->pucRead;
  504. pxRingbuffer->pucRead = pxRingbuffer->pucHead; //Wrap around read pointer
  505. } else {
  506. //Return xMaxSize amount of data
  507. *pxItemSize = xMaxSize;
  508. pxRingbuffer->xItemsWaiting -= xMaxSize;
  509. pxRingbuffer->pucRead += xMaxSize; //Advance read pointer past retrieved data
  510. }
  511. } else { //Available data is contiguous between read and write pointer
  512. if (xMaxSize == 0 || pxRingbuffer->pucWrite - pxRingbuffer->pucRead <= xMaxSize) {
  513. //Return all contiguous data from read to write pointer
  514. *pxItemSize = pxRingbuffer->pucWrite - pxRingbuffer->pucRead;
  515. pxRingbuffer->xItemsWaiting -= pxRingbuffer->pucWrite - pxRingbuffer->pucRead;
  516. pxRingbuffer->pucRead = pxRingbuffer->pucWrite;
  517. } else {
  518. //Return xMaxSize data from read pointer
  519. *pxItemSize = xMaxSize;
  520. pxRingbuffer->xItemsWaiting -= xMaxSize;
  521. pxRingbuffer->pucRead += xMaxSize; //Advance read pointer past retrieved data
  522. }
  523. }
  524. return (void *)ret;
  525. }
  526. static void prvReturnItemDefault(Ringbuffer_t *pxRingbuffer, uint8_t *pucItem)
  527. {
  528. //Check arguments and buffer state
  529. configASSERT(rbCHECK_ALIGNED(pucItem));
  530. configASSERT(pucItem >= pxRingbuffer->pucHead);
  531. configASSERT(pucItem <= pxRingbuffer->pucTail); //Inclusive of pucTail in the case of zero length item at the very end
  532. //Get and check header of the item
  533. ItemHeader_t *pxCurHeader = (ItemHeader_t *)(pucItem - rbHEADER_SIZE);
  534. configASSERT(pxCurHeader->xItemLen <= pxRingbuffer->xMaxItemSize);
  535. configASSERT((pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) == 0); //Dummy items should never have been read
  536. configASSERT((pxCurHeader->uxItemFlags & rbITEM_FREE_FLAG) == 0); //Indicates item has already been returned before
  537. pxCurHeader->uxItemFlags &= ~rbITEM_SPLIT_FLAG; //Clear wrap flag if set (not strictly necessary)
  538. pxCurHeader->uxItemFlags |= rbITEM_FREE_FLAG; //Mark as free
  539. /*
  540. * Items might not be returned in the order they were retrieved. Move the free pointer
  541. * up to the next item that has not been marked as free (by free flag) or up
  542. * till the read pointer. When advancing the free pointer, items that have already been
  543. * freed or items with dummy data should be skipped over
  544. */
  545. pxCurHeader = (ItemHeader_t *)pxRingbuffer->pucFree;
  546. //Skip over Items that have already been freed or are dummy items
  547. while (((pxCurHeader->uxItemFlags & rbITEM_FREE_FLAG) || (pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG)) && pxRingbuffer->pucFree != pxRingbuffer->pucRead) {
  548. if (pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) {
  549. pxCurHeader->uxItemFlags |= rbITEM_FREE_FLAG; //Mark as freed (not strictly necessary but adds redundancy)
  550. pxRingbuffer->pucFree = pxRingbuffer->pucHead; //Wrap around due to dummy data
  551. } else {
  552. //Item with data that has already been freed, advance free pointer past this item
  553. size_t xAlignedItemSize = rbALIGN_SIZE(pxCurHeader->xItemLen);
  554. pxRingbuffer->pucFree += xAlignedItemSize + rbHEADER_SIZE;
  555. //Redundancy check to ensure free pointer has not overshot buffer bounds
  556. configASSERT(pxRingbuffer->pucFree <= pxRingbuffer->pucHead + pxRingbuffer->xSize);
  557. }
  558. //Check if pucFree requires wrap around
  559. if ((pxRingbuffer->pucTail - pxRingbuffer->pucFree) < rbHEADER_SIZE) {
  560. pxRingbuffer->pucFree = pxRingbuffer->pucHead;
  561. }
  562. pxCurHeader = (ItemHeader_t *)pxRingbuffer->pucFree; //Update header to point to item
  563. }
  564. //Check if the buffer full flag should be reset
  565. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
  566. if (pxRingbuffer->pucFree != pxRingbuffer->pucAcquire) {
  567. pxRingbuffer->uxRingbufferFlags &= ~rbBUFFER_FULL_FLAG;
  568. } else if (pxRingbuffer->pucFree == pxRingbuffer->pucAcquire && pxRingbuffer->pucFree == pxRingbuffer->pucRead) {
  569. //Special case where a full buffer is completely freed in one go
  570. pxRingbuffer->uxRingbufferFlags &= ~rbBUFFER_FULL_FLAG;
  571. }
  572. }
  573. }
  574. static void prvReturnItemByteBuf(Ringbuffer_t *pxRingbuffer, uint8_t *pucItem)
  575. {
  576. //Check pointer points to address inside buffer
  577. configASSERT((uint8_t *)pucItem >= pxRingbuffer->pucHead);
  578. configASSERT((uint8_t *)pucItem < pxRingbuffer->pucTail);
  579. //Free the read memory. Simply moves free pointer to read pointer as byte buffers do not allow multiple outstanding reads
  580. pxRingbuffer->pucFree = pxRingbuffer->pucRead;
  581. //If buffer was full before, reset full flag as free pointer has moved
  582. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
  583. pxRingbuffer->uxRingbufferFlags &= ~rbBUFFER_FULL_FLAG;
  584. }
  585. }
  586. static size_t prvGetCurMaxSizeNoSplit(Ringbuffer_t *pxRingbuffer)
  587. {
  588. BaseType_t xFreeSize;
  589. //Check if buffer is full
  590. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
  591. return 0;
  592. }
  593. if (pxRingbuffer->pucAcquire < pxRingbuffer->pucFree) {
  594. //Free space is contiguous between pucAcquire and pucFree
  595. xFreeSize = pxRingbuffer->pucFree - pxRingbuffer->pucAcquire;
  596. } else {
  597. //Free space wraps around (or overlapped at pucHead), select largest
  598. //contiguous free space as no-split items require contiguous space
  599. size_t xSize1 = pxRingbuffer->pucTail - pxRingbuffer->pucAcquire;
  600. size_t xSize2 = pxRingbuffer->pucFree - pxRingbuffer->pucHead;
  601. xFreeSize = (xSize1 > xSize2) ? xSize1 : xSize2;
  602. }
  603. //No-split ring buffer items need space for a header
  604. xFreeSize -= rbHEADER_SIZE;
  605. //Check for xFreeSize < 0 before checking xFreeSize > pxRingbuffer->xMaxItemSize
  606. //to avoid incorrect comparison operation when xFreeSize is negative
  607. if (xFreeSize < 0) {
  608. //Occurs when free space is less than header size
  609. xFreeSize = 0;
  610. } else if (xFreeSize > pxRingbuffer->xMaxItemSize) {
  611. //Limit free size to be within bounds
  612. xFreeSize = pxRingbuffer->xMaxItemSize;
  613. }
  614. return xFreeSize;
  615. }
  616. static size_t prvGetCurMaxSizeAllowSplit(Ringbuffer_t *pxRingbuffer)
  617. {
  618. BaseType_t xFreeSize;
  619. //Check if buffer is full
  620. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
  621. return 0;
  622. }
  623. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucHead && pxRingbuffer->pucFree == pxRingbuffer->pucHead) {
  624. //Check for special case where pucAcquire and pucFree are both at pucHead
  625. xFreeSize = pxRingbuffer->xSize - rbHEADER_SIZE;
  626. } else if (pxRingbuffer->pucAcquire < pxRingbuffer->pucFree) {
  627. //Free space is contiguous between pucAcquire and pucFree, requires single header
  628. xFreeSize = (pxRingbuffer->pucFree - pxRingbuffer->pucAcquire) - rbHEADER_SIZE;
  629. } else {
  630. //Free space wraps around, requires two headers
  631. xFreeSize = (pxRingbuffer->pucFree - pxRingbuffer->pucHead) +
  632. (pxRingbuffer->pucTail - pxRingbuffer->pucAcquire) -
  633. (rbHEADER_SIZE * 2);
  634. }
  635. //Check for xFreeSize < 0 before checking xFreeSize > pxRingbuffer->xMaxItemSize
  636. //to avoid incorrect comparison operation when xFreeSize is negative
  637. if (xFreeSize < 0) {
  638. xFreeSize = 0;
  639. } else if (xFreeSize > pxRingbuffer->xMaxItemSize) {
  640. //Limit free size to be within bounds
  641. xFreeSize = pxRingbuffer->xMaxItemSize;
  642. }
  643. return xFreeSize;
  644. }
  645. static size_t prvGetCurMaxSizeByteBuf(Ringbuffer_t *pxRingbuffer)
  646. {
  647. BaseType_t xFreeSize;
  648. //Check if buffer is full
  649. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
  650. return 0;
  651. }
  652. /*
  653. * Return whatever space is available depending on relative positions of the free
  654. * pointer and Acquire pointer. There is no overhead of headers in this mode
  655. */
  656. xFreeSize = pxRingbuffer->pucFree - pxRingbuffer->pucAcquire;
  657. if (xFreeSize <= 0) {
  658. xFreeSize += pxRingbuffer->xSize;
  659. }
  660. return xFreeSize;
  661. }
  662. static BaseType_t prvSendAcquireGeneric(Ringbuffer_t *pxRingbuffer,
  663. const void *pvItem,
  664. void **ppvItem,
  665. size_t xItemSize,
  666. TickType_t xTicksToWait)
  667. {
  668. BaseType_t xReturn = pdFALSE;
  669. BaseType_t xExitLoop = pdFALSE;
  670. BaseType_t xEntryTimeSet = pdFALSE;
  671. BaseType_t xNotifyQueueSet = pdFALSE;
  672. TimeOut_t xTimeOut;
  673. while (xExitLoop == pdFALSE) {
  674. portENTER_CRITICAL(&pxRingbuffer->mux);
  675. if (pxRingbuffer->xCheckItemFits(pxRingbuffer, xItemSize) == pdTRUE) {
  676. //xItemSize will fit. Copy or acquire the buffer immediately
  677. if (ppvItem) {
  678. //Acquire the buffer
  679. *ppvItem = prvAcquireItemNoSplit(pxRingbuffer, xItemSize);
  680. } else {
  681. //Copy item into buffer
  682. pxRingbuffer->vCopyItem(pxRingbuffer, pvItem, xItemSize);
  683. if (pxRingbuffer->xQueueSet) {
  684. //If ring buffer was added to a queue set, notify the queue set
  685. xNotifyQueueSet = pdTRUE;
  686. } else {
  687. //If a task was waiting for data to arrive on the ring buffer, unblock it immediately.
  688. if (listLIST_IS_EMPTY(&pxRingbuffer->xTasksWaitingToReceive) == pdFALSE) {
  689. if (xTaskRemoveFromEventList(&pxRingbuffer->xTasksWaitingToReceive) == pdTRUE) {
  690. //The unblocked task will preempt us. Trigger a yield here.
  691. portYIELD_WITHIN_API();
  692. }
  693. }
  694. }
  695. }
  696. xReturn = pdTRUE;
  697. xExitLoop = pdTRUE;
  698. goto loop_end;
  699. } else if (xTicksToWait == (TickType_t) 0) {
  700. //No block time. Return immediately.
  701. xExitLoop = pdTRUE;
  702. goto loop_end;
  703. } else if (xEntryTimeSet == pdFALSE) {
  704. //This is our first block. Set entry time
  705. vTaskInternalSetTimeOutState(&xTimeOut);
  706. xEntryTimeSet = pdTRUE;
  707. }
  708. if (xTaskCheckForTimeOut(&xTimeOut, &xTicksToWait) == pdFALSE) {
  709. //Not timed out yet. Block the current task
  710. vTaskPlaceOnEventList(&pxRingbuffer->xTasksWaitingToSend, xTicksToWait);
  711. portYIELD_WITHIN_API();
  712. } else {
  713. //We have timed out
  714. xExitLoop = pdTRUE;
  715. }
  716. loop_end:
  717. portEXIT_CRITICAL(&pxRingbuffer->mux);
  718. }
  719. //Defer notifying the queue set until we are outside the loop and critical section.
  720. if (xNotifyQueueSet == pdTRUE) {
  721. xQueueSend((QueueHandle_t)pxRingbuffer->xQueueSet, (QueueSetMemberHandle_t *)&pxRingbuffer, 0);
  722. }
  723. return xReturn;
  724. }
  725. static BaseType_t prvReceiveGeneric(Ringbuffer_t *pxRingbuffer,
  726. void **pvItem1,
  727. void **pvItem2,
  728. size_t *xItemSize1,
  729. size_t *xItemSize2,
  730. size_t xMaxSize,
  731. TickType_t xTicksToWait)
  732. {
  733. BaseType_t xReturn = pdFALSE;
  734. BaseType_t xExitLoop = pdFALSE;
  735. BaseType_t xEntryTimeSet = pdFALSE;
  736. TimeOut_t xTimeOut;
  737. #ifdef __clang_analyzer__
  738. // Teach clang-tidy that if NULL pointers are provided, this function will never dereference them
  739. if (!pvItem1 || !pvItem2 || !xItemSize1 || !xItemSize2) {
  740. return pdFALSE;
  741. }
  742. #endif /*__clang_analyzer__ */
  743. while (xExitLoop == pdFALSE) {
  744. portENTER_CRITICAL(&pxRingbuffer->mux);
  745. if (prvCheckItemAvail(pxRingbuffer) == pdTRUE) {
  746. //Item/data is available for retrieval
  747. BaseType_t xIsSplit = pdFALSE;
  748. if (pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) {
  749. //Read up to xMaxSize bytes from byte buffer
  750. *pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, NULL, xMaxSize, xItemSize1);
  751. } else {
  752. //Get (first) item from no-split/allow-split buffers
  753. *pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize1);
  754. }
  755. //If split buffer, check for split items
  756. if (pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG) {
  757. if (xIsSplit == pdTRUE) {
  758. *pvItem2 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize2);
  759. configASSERT(*pvItem2 < *pvItem1); //Check wrap around has occurred
  760. configASSERT(xIsSplit == pdFALSE); //Second part should not have wrapped flag
  761. } else {
  762. *pvItem2 = NULL;
  763. }
  764. }
  765. xReturn = pdTRUE;
  766. xExitLoop = pdTRUE;
  767. goto loop_end;
  768. } else if (xTicksToWait == (TickType_t) 0) {
  769. //No block time. Return immediately.
  770. xExitLoop = pdTRUE;
  771. goto loop_end;
  772. } else if (xEntryTimeSet == pdFALSE) {
  773. //This is our first block. Set entry time
  774. vTaskInternalSetTimeOutState(&xTimeOut);
  775. xEntryTimeSet = pdTRUE;
  776. }
  777. if (xTaskCheckForTimeOut(&xTimeOut, &xTicksToWait) == pdFALSE) {
  778. //Not timed out yet. Block the current task
  779. vTaskPlaceOnEventList(&pxRingbuffer->xTasksWaitingToReceive, xTicksToWait);
  780. portYIELD_WITHIN_API();
  781. } else {
  782. //We have timed out.
  783. xExitLoop = pdTRUE;
  784. }
  785. loop_end:
  786. portEXIT_CRITICAL(&pxRingbuffer->mux);
  787. }
  788. return xReturn;
  789. }
  790. static BaseType_t prvReceiveGenericFromISR(Ringbuffer_t *pxRingbuffer,
  791. void **pvItem1,
  792. void **pvItem2,
  793. size_t *xItemSize1,
  794. size_t *xItemSize2,
  795. size_t xMaxSize)
  796. {
  797. BaseType_t xReturn = pdFALSE;
  798. #ifdef __clang_analyzer__
  799. // Teach clang-tidy that if NULL pointers are provided, this function will never dereference them
  800. if (!pvItem1 || !pvItem2 || !xItemSize1 || !xItemSize2) {
  801. return pdFALSE;
  802. }
  803. #endif /*__clang_analyzer__ */
  804. portENTER_CRITICAL_ISR(&pxRingbuffer->mux);
  805. if (prvCheckItemAvail(pxRingbuffer) == pdTRUE) {
  806. BaseType_t xIsSplit = pdFALSE;
  807. if (pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) {
  808. //Read up to xMaxSize bytes from byte buffer
  809. *pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, NULL, xMaxSize, xItemSize1);
  810. } else {
  811. //Get (first) item from no-split/allow-split buffers
  812. *pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize1);
  813. }
  814. //If split buffer, check for split items
  815. if (pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG) {
  816. if (xIsSplit == pdTRUE) {
  817. *pvItem2 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize2);
  818. configASSERT(*pvItem2 < *pvItem1); //Check wrap around has occurred
  819. configASSERT(xIsSplit == pdFALSE); //Second part should not have wrapped flag
  820. } else {
  821. *pvItem2 = NULL;
  822. }
  823. }
  824. xReturn = pdTRUE;
  825. } else {
  826. xReturn = pdFALSE;
  827. }
  828. portEXIT_CRITICAL_ISR(&pxRingbuffer->mux);
  829. return xReturn;
  830. }
  831. // ------------------------------------------------ Public Functions ---------------------------------------------------
  832. RingbufHandle_t xRingbufferCreate(size_t xBufferSize, RingbufferType_t xBufferType)
  833. {
  834. configASSERT(xBufferSize > 0);
  835. configASSERT(xBufferType < RINGBUF_TYPE_MAX);
  836. //Allocate memory
  837. if (xBufferType != RINGBUF_TYPE_BYTEBUF) {
  838. xBufferSize = rbALIGN_SIZE(xBufferSize); //xBufferSize is rounded up for no-split/allow-split buffers
  839. }
  840. Ringbuffer_t *pxNewRingbuffer = calloc(1, sizeof(Ringbuffer_t));
  841. uint8_t *pucRingbufferStorage = malloc(xBufferSize);
  842. if (pxNewRingbuffer == NULL || pucRingbufferStorage == NULL) {
  843. goto err;
  844. }
  845. prvInitializeNewRingbuffer(xBufferSize, xBufferType, pxNewRingbuffer, pucRingbufferStorage);
  846. return (RingbufHandle_t)pxNewRingbuffer;
  847. err:
  848. //An error has occurred, Free memory and return NULL
  849. free(pxNewRingbuffer);
  850. free(pucRingbufferStorage);
  851. return NULL;
  852. }
  853. RingbufHandle_t xRingbufferCreateNoSplit(size_t xItemSize, size_t xItemNum)
  854. {
  855. return xRingbufferCreate((rbALIGN_SIZE(xItemSize) + rbHEADER_SIZE) * xItemNum, RINGBUF_TYPE_NOSPLIT);
  856. }
  857. RingbufHandle_t xRingbufferCreateStatic(size_t xBufferSize,
  858. RingbufferType_t xBufferType,
  859. uint8_t *pucRingbufferStorage,
  860. StaticRingbuffer_t *pxStaticRingbuffer)
  861. {
  862. //Check arguments
  863. configASSERT(xBufferSize > 0);
  864. configASSERT(xBufferType < RINGBUF_TYPE_MAX);
  865. configASSERT(pucRingbufferStorage != NULL && pxStaticRingbuffer != NULL);
  866. if (xBufferType != RINGBUF_TYPE_BYTEBUF) {
  867. //No-split/allow-split buffer sizes must be 32-bit aligned
  868. configASSERT(rbCHECK_ALIGNED(xBufferSize));
  869. }
  870. Ringbuffer_t *pxNewRingbuffer = (Ringbuffer_t *)pxStaticRingbuffer;
  871. prvInitializeNewRingbuffer(xBufferSize, xBufferType, pxNewRingbuffer, pucRingbufferStorage);
  872. pxNewRingbuffer->uxRingbufferFlags |= rbBUFFER_STATIC_FLAG;
  873. return (RingbufHandle_t)pxNewRingbuffer;
  874. }
  875. BaseType_t xRingbufferSendAcquire(RingbufHandle_t xRingbuffer, void **ppvItem, size_t xItemSize, TickType_t xTicksToWait)
  876. {
  877. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  878. //Check arguments
  879. configASSERT(pxRingbuffer);
  880. configASSERT(ppvItem != NULL);
  881. configASSERT((pxRingbuffer->uxRingbufferFlags & (rbBYTE_BUFFER_FLAG | rbALLOW_SPLIT_FLAG)) == 0); //Send acquire currently only supported in NoSplit buffers
  882. *ppvItem = NULL;
  883. if (xItemSize > pxRingbuffer->xMaxItemSize) {
  884. return pdFALSE; //Data will never ever fit in the queue.
  885. }
  886. if ((pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) && xItemSize == 0) {
  887. return pdTRUE; //Sending 0 bytes to byte buffer has no effect
  888. }
  889. return prvSendAcquireGeneric(pxRingbuffer, NULL, ppvItem, xItemSize, xTicksToWait);
  890. }
  891. BaseType_t xRingbufferSendComplete(RingbufHandle_t xRingbuffer, void *pvItem)
  892. {
  893. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  894. BaseType_t xNotifyQueueSet = pdFALSE;
  895. //Check arguments
  896. configASSERT(pxRingbuffer);
  897. configASSERT(pvItem != NULL);
  898. configASSERT((pxRingbuffer->uxRingbufferFlags & (rbBYTE_BUFFER_FLAG | rbALLOW_SPLIT_FLAG)) == 0);
  899. portENTER_CRITICAL(&pxRingbuffer->mux);
  900. prvSendItemDoneNoSplit(pxRingbuffer, pvItem);
  901. if (pxRingbuffer->xQueueSet) {
  902. //If ring buffer was added to a queue set, notify the queue set
  903. xNotifyQueueSet = pdTRUE;
  904. } else {
  905. //If a task was waiting for data to arrive on the ring buffer, unblock it immediately.
  906. if (listLIST_IS_EMPTY(&pxRingbuffer->xTasksWaitingToReceive) == pdFALSE) {
  907. if (xTaskRemoveFromEventList(&pxRingbuffer->xTasksWaitingToReceive) == pdTRUE) {
  908. //The unblocked task will preempt us. Trigger a yield here.
  909. portYIELD_WITHIN_API();
  910. }
  911. }
  912. }
  913. portEXIT_CRITICAL(&pxRingbuffer->mux);
  914. if (xNotifyQueueSet == pdTRUE) {
  915. xQueueSend((QueueHandle_t)pxRingbuffer->xQueueSet, (QueueSetMemberHandle_t *)&pxRingbuffer, 0);
  916. }
  917. return pdTRUE;
  918. }
  919. BaseType_t xRingbufferSend(RingbufHandle_t xRingbuffer,
  920. const void *pvItem,
  921. size_t xItemSize,
  922. TickType_t xTicksToWait)
  923. {
  924. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  925. //Check arguments
  926. configASSERT(pxRingbuffer);
  927. configASSERT(pvItem != NULL || xItemSize == 0);
  928. if (xItemSize > pxRingbuffer->xMaxItemSize) {
  929. return pdFALSE; //Data will never ever fit in the queue.
  930. }
  931. if ((pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) && xItemSize == 0) {
  932. return pdTRUE; //Sending 0 bytes to byte buffer has no effect
  933. }
  934. return prvSendAcquireGeneric(pxRingbuffer, pvItem, NULL, xItemSize, xTicksToWait);
  935. }
  936. BaseType_t xRingbufferSendFromISR(RingbufHandle_t xRingbuffer,
  937. const void *pvItem,
  938. size_t xItemSize,
  939. BaseType_t *pxHigherPriorityTaskWoken)
  940. {
  941. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  942. BaseType_t xNotifyQueueSet = pdFALSE;
  943. BaseType_t xReturn;
  944. //Check arguments
  945. configASSERT(pxRingbuffer);
  946. configASSERT(pvItem != NULL || xItemSize == 0);
  947. if (xItemSize > pxRingbuffer->xMaxItemSize) {
  948. return pdFALSE; //Data will never ever fit in the queue.
  949. }
  950. if ((pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) && xItemSize == 0) {
  951. return pdTRUE; //Sending 0 bytes to byte buffer has no effect
  952. }
  953. portENTER_CRITICAL_ISR(&pxRingbuffer->mux);
  954. if (pxRingbuffer->xCheckItemFits(xRingbuffer, xItemSize) == pdTRUE) {
  955. pxRingbuffer->vCopyItem(xRingbuffer, pvItem, xItemSize);
  956. if (pxRingbuffer->xQueueSet) {
  957. //If ring buffer was added to a queue set, notify the queue set
  958. xNotifyQueueSet = pdTRUE;
  959. } else {
  960. //If a task was waiting for data to arrive on the ring buffer, unblock it immediately.
  961. if (listLIST_IS_EMPTY(&pxRingbuffer->xTasksWaitingToReceive) == pdFALSE) {
  962. if (xTaskRemoveFromEventList(&pxRingbuffer->xTasksWaitingToReceive) == pdTRUE) {
  963. //The unblocked task will preempt us. Record that a context switch is required.
  964. if (pxHigherPriorityTaskWoken != NULL) {
  965. *pxHigherPriorityTaskWoken = pdTRUE;
  966. }
  967. }
  968. }
  969. }
  970. xReturn = pdTRUE;
  971. } else {
  972. xReturn = pdFALSE;
  973. }
  974. portEXIT_CRITICAL_ISR(&pxRingbuffer->mux);
  975. //Defer notifying the queue set until we are outside the critical section.
  976. if (xNotifyQueueSet == pdTRUE) {
  977. xQueueSendFromISR((QueueHandle_t)pxRingbuffer->xQueueSet, (QueueSetMemberHandle_t *)&pxRingbuffer, pxHigherPriorityTaskWoken);
  978. }
  979. return xReturn;
  980. }
  981. void *xRingbufferReceive(RingbufHandle_t xRingbuffer, size_t *pxItemSize, TickType_t xTicksToWait)
  982. {
  983. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  984. //Check arguments
  985. configASSERT(pxRingbuffer && pxItemSize);
  986. //Attempt to retrieve an item
  987. void *pvTempItem;
  988. if (prvReceiveGeneric(pxRingbuffer, &pvTempItem, NULL, pxItemSize, NULL, 0, xTicksToWait) == pdTRUE) {
  989. return pvTempItem;
  990. } else {
  991. return NULL;
  992. }
  993. }
  994. void *xRingbufferReceiveFromISR(RingbufHandle_t xRingbuffer, size_t *pxItemSize)
  995. {
  996. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  997. //Check arguments
  998. configASSERT(pxRingbuffer && pxItemSize);
  999. //Attempt to retrieve an item
  1000. void *pvTempItem;
  1001. if (prvReceiveGenericFromISR(pxRingbuffer, &pvTempItem, NULL, pxItemSize, NULL, 0) == pdTRUE) {
  1002. return pvTempItem;
  1003. } else {
  1004. return NULL;
  1005. }
  1006. }
  1007. BaseType_t xRingbufferReceiveSplit(RingbufHandle_t xRingbuffer,
  1008. void **ppvHeadItem,
  1009. void **ppvTailItem,
  1010. size_t *pxHeadItemSize,
  1011. size_t *pxTailItemSize,
  1012. TickType_t xTicksToWait)
  1013. {
  1014. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1015. //Check arguments
  1016. configASSERT(pxRingbuffer && ppvHeadItem && ppvTailItem && pxHeadItemSize && pxTailItemSize);
  1017. configASSERT(pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG);
  1018. return prvReceiveGeneric(pxRingbuffer, ppvHeadItem, ppvTailItem, pxHeadItemSize, pxTailItemSize, 0, xTicksToWait);
  1019. }
  1020. BaseType_t xRingbufferReceiveSplitFromISR(RingbufHandle_t xRingbuffer,
  1021. void **ppvHeadItem,
  1022. void **ppvTailItem,
  1023. size_t *pxHeadItemSize,
  1024. size_t *pxTailItemSize)
  1025. {
  1026. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1027. //Check arguments
  1028. configASSERT(pxRingbuffer && ppvHeadItem && ppvTailItem && pxHeadItemSize && pxTailItemSize);
  1029. configASSERT(pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG);
  1030. return prvReceiveGenericFromISR(pxRingbuffer, ppvHeadItem, ppvTailItem, pxHeadItemSize, pxTailItemSize, 0);
  1031. }
  1032. void *xRingbufferReceiveUpTo(RingbufHandle_t xRingbuffer,
  1033. size_t *pxItemSize,
  1034. TickType_t xTicksToWait,
  1035. size_t xMaxSize)
  1036. {
  1037. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1038. //Check arguments
  1039. configASSERT(pxRingbuffer && pxItemSize);
  1040. configASSERT(pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG); //This function should only be called for byte buffers
  1041. if (xMaxSize == 0) {
  1042. return NULL;
  1043. }
  1044. //Attempt to retrieve up to xMaxSize bytes
  1045. void *pvTempItem;
  1046. if (prvReceiveGeneric(pxRingbuffer, &pvTempItem, NULL, pxItemSize, NULL, xMaxSize, xTicksToWait) == pdTRUE) {
  1047. return pvTempItem;
  1048. } else {
  1049. return NULL;
  1050. }
  1051. }
  1052. void *xRingbufferReceiveUpToFromISR(RingbufHandle_t xRingbuffer, size_t *pxItemSize, size_t xMaxSize)
  1053. {
  1054. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1055. //Check arguments
  1056. configASSERT(pxRingbuffer && pxItemSize);
  1057. configASSERT(pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG); //This function should only be called for byte buffers
  1058. if (xMaxSize == 0) {
  1059. return NULL;
  1060. }
  1061. //Attempt to retrieve up to xMaxSize bytes
  1062. void *pvTempItem;
  1063. if (prvReceiveGenericFromISR(pxRingbuffer, &pvTempItem, NULL, pxItemSize, NULL, xMaxSize) == pdTRUE) {
  1064. return pvTempItem;
  1065. } else {
  1066. return NULL;
  1067. }
  1068. }
  1069. void vRingbufferReturnItem(RingbufHandle_t xRingbuffer, void *pvItem)
  1070. {
  1071. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1072. configASSERT(pxRingbuffer);
  1073. configASSERT(pvItem != NULL);
  1074. portENTER_CRITICAL(&pxRingbuffer->mux);
  1075. pxRingbuffer->vReturnItem(pxRingbuffer, (uint8_t *)pvItem);
  1076. //If a task was waiting for space to send, unblock it immediately.
  1077. if (listLIST_IS_EMPTY(&pxRingbuffer->xTasksWaitingToSend) == pdFALSE) {
  1078. if (xTaskRemoveFromEventList(&pxRingbuffer->xTasksWaitingToSend) == pdTRUE) {
  1079. //The unblocked task will preempt us. Trigger a yield here.
  1080. portYIELD_WITHIN_API();
  1081. }
  1082. }
  1083. portEXIT_CRITICAL(&pxRingbuffer->mux);
  1084. }
  1085. void vRingbufferReturnItemFromISR(RingbufHandle_t xRingbuffer, void *pvItem, BaseType_t *pxHigherPriorityTaskWoken)
  1086. {
  1087. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1088. configASSERT(pxRingbuffer);
  1089. configASSERT(pvItem != NULL);
  1090. portENTER_CRITICAL_ISR(&pxRingbuffer->mux);
  1091. pxRingbuffer->vReturnItem(pxRingbuffer, (uint8_t *)pvItem);
  1092. //If a task was waiting for space to send, unblock it immediately.
  1093. if (listLIST_IS_EMPTY(&pxRingbuffer->xTasksWaitingToSend) == pdFALSE) {
  1094. if (xTaskRemoveFromEventList(&pxRingbuffer->xTasksWaitingToSend) == pdTRUE) {
  1095. //The unblocked task will preempt us. Record that a context switch is required.
  1096. if (pxHigherPriorityTaskWoken != NULL) {
  1097. *pxHigherPriorityTaskWoken = pdTRUE;
  1098. }
  1099. }
  1100. }
  1101. portEXIT_CRITICAL_ISR(&pxRingbuffer->mux);
  1102. }
  1103. void vRingbufferDelete(RingbufHandle_t xRingbuffer)
  1104. {
  1105. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1106. configASSERT(pxRingbuffer);
  1107. //Ring buffer was not statically allocated. Free its memory.
  1108. if ( !( pxRingbuffer->uxRingbufferFlags & rbBUFFER_STATIC_FLAG ) ) {
  1109. free(pxRingbuffer->pucHead);
  1110. free(pxRingbuffer);
  1111. }
  1112. }
  1113. size_t xRingbufferGetMaxItemSize(RingbufHandle_t xRingbuffer)
  1114. {
  1115. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1116. configASSERT(pxRingbuffer);
  1117. return pxRingbuffer->xMaxItemSize;
  1118. }
  1119. size_t xRingbufferGetCurFreeSize(RingbufHandle_t xRingbuffer)
  1120. {
  1121. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1122. configASSERT(pxRingbuffer);
  1123. size_t xFreeSize;
  1124. portENTER_CRITICAL(&pxRingbuffer->mux);
  1125. xFreeSize = pxRingbuffer->xGetCurMaxSize(pxRingbuffer);
  1126. portEXIT_CRITICAL(&pxRingbuffer->mux);
  1127. return xFreeSize;
  1128. }
  1129. BaseType_t xRingbufferAddToQueueSetRead(RingbufHandle_t xRingbuffer, QueueSetHandle_t xQueueSet)
  1130. {
  1131. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1132. BaseType_t xReturn;
  1133. configASSERT(pxRingbuffer && xQueueSet);
  1134. portENTER_CRITICAL(&pxRingbuffer->mux);
  1135. if (pxRingbuffer->xQueueSet != NULL || prvCheckItemAvail(pxRingbuffer) == pdTRUE) {
  1136. /*
  1137. - Cannot add ring buffer to more than one queue set
  1138. - It is dangerous to add a ring buffer to a queue set if the ring buffer currently has data to be read.
  1139. */
  1140. xReturn = pdFALSE;
  1141. } else {
  1142. //Add ring buffer to queue set
  1143. pxRingbuffer->xQueueSet = xQueueSet;
  1144. xReturn = pdTRUE;
  1145. }
  1146. portEXIT_CRITICAL(&pxRingbuffer->mux);
  1147. return xReturn;
  1148. }
  1149. BaseType_t xRingbufferRemoveFromQueueSetRead(RingbufHandle_t xRingbuffer, QueueSetHandle_t xQueueSet)
  1150. {
  1151. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1152. BaseType_t xReturn;
  1153. configASSERT(pxRingbuffer && xQueueSet);
  1154. portENTER_CRITICAL(&pxRingbuffer->mux);
  1155. if (pxRingbuffer->xQueueSet != xQueueSet || prvCheckItemAvail(pxRingbuffer) == pdTRUE) {
  1156. /*
  1157. - Ring buffer was never added to this queue set
  1158. - It is dangerous to remove a ring buffer from a queue set if the ring buffer currently has data to be read.
  1159. */
  1160. xReturn = pdFALSE;
  1161. } else {
  1162. //Remove ring buffer from queue set
  1163. pxRingbuffer->xQueueSet = NULL;
  1164. xReturn = pdTRUE;
  1165. }
  1166. portEXIT_CRITICAL(&pxRingbuffer->mux);
  1167. return xReturn;
  1168. }
  1169. void vRingbufferGetInfo(RingbufHandle_t xRingbuffer,
  1170. UBaseType_t *uxFree,
  1171. UBaseType_t *uxRead,
  1172. UBaseType_t *uxWrite,
  1173. UBaseType_t *uxAcquire,
  1174. UBaseType_t *uxItemsWaiting)
  1175. {
  1176. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1177. configASSERT(pxRingbuffer);
  1178. portENTER_CRITICAL(&pxRingbuffer->mux);
  1179. if (uxFree != NULL) {
  1180. *uxFree = (UBaseType_t)(pxRingbuffer->pucFree - pxRingbuffer->pucHead);
  1181. }
  1182. if (uxRead != NULL) {
  1183. *uxRead = (UBaseType_t)(pxRingbuffer->pucRead - pxRingbuffer->pucHead);
  1184. }
  1185. if (uxWrite != NULL) {
  1186. *uxWrite = (UBaseType_t)(pxRingbuffer->pucWrite - pxRingbuffer->pucHead);
  1187. }
  1188. if (uxAcquire != NULL) {
  1189. *uxAcquire = (UBaseType_t)(pxRingbuffer->pucAcquire - pxRingbuffer->pucHead);
  1190. }
  1191. if (uxItemsWaiting != NULL) {
  1192. *uxItemsWaiting = (UBaseType_t)(pxRingbuffer->xItemsWaiting);
  1193. }
  1194. portEXIT_CRITICAL(&pxRingbuffer->mux);
  1195. }
  1196. void xRingbufferPrintInfo(RingbufHandle_t xRingbuffer)
  1197. {
  1198. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1199. configASSERT(pxRingbuffer);
  1200. printf("Rb size:%d\tfree: %d\trptr: %d\tfreeptr: %d\twptr: %d, aptr: %d\n",
  1201. pxRingbuffer->xSize, prvGetFreeSize(pxRingbuffer),
  1202. pxRingbuffer->pucRead - pxRingbuffer->pucHead,
  1203. pxRingbuffer->pucFree - pxRingbuffer->pucHead,
  1204. pxRingbuffer->pucWrite - pxRingbuffer->pucHead,
  1205. pxRingbuffer->pucAcquire - pxRingbuffer->pucHead);
  1206. }
  1207. BaseType_t xRingbufferGetStaticBuffer(RingbufHandle_t xRingbuffer, uint8_t **ppucRingbufferStorage, StaticRingbuffer_t **ppxStaticRingbuffer)
  1208. {
  1209. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1210. BaseType_t xReturn;
  1211. configASSERT(pxRingbuffer && ppucRingbufferStorage && ppxStaticRingbuffer);
  1212. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_STATIC_FLAG) {
  1213. *ppucRingbufferStorage = pxRingbuffer->pucHead;
  1214. *ppxStaticRingbuffer = (StaticRingbuffer_t *)pxRingbuffer;
  1215. xReturn = pdTRUE;
  1216. } else {
  1217. xReturn = pdFALSE;
  1218. }
  1219. return xReturn;
  1220. }
  1221. RingbufHandle_t xRingbufferCreateWithCaps(size_t xBufferSize, RingbufferType_t xBufferType, UBaseType_t uxMemoryCaps)
  1222. {
  1223. RingbufHandle_t xRingbuffer;
  1224. StaticRingbuffer_t *pxStaticRingbuffer;
  1225. uint8_t *pucRingbufferStorage;
  1226. pxStaticRingbuffer = heap_caps_malloc(sizeof(StaticRingbuffer_t), (uint32_t)uxMemoryCaps);
  1227. pucRingbufferStorage = heap_caps_malloc(xBufferSize, (uint32_t)uxMemoryCaps);
  1228. if (pxStaticRingbuffer == NULL || pucRingbufferStorage == NULL) {
  1229. goto err;
  1230. }
  1231. // Create the ring buffer using static creation API
  1232. xRingbuffer = xRingbufferCreateStatic(xBufferSize, xBufferType, pucRingbufferStorage, pxStaticRingbuffer);
  1233. if (xRingbuffer == NULL) {
  1234. goto err;
  1235. }
  1236. return xRingbuffer;
  1237. err:
  1238. heap_caps_free(pxStaticRingbuffer);
  1239. heap_caps_free(pucRingbufferStorage);
  1240. return NULL;
  1241. }
  1242. void vRingbufferDeleteWithCaps(RingbufHandle_t xRingbuffer)
  1243. {
  1244. BaseType_t xResult;
  1245. StaticRingbuffer_t *pxStaticRingbuffer = NULL;
  1246. uint8_t *pucRingbufferStorage = NULL;
  1247. // Retrieve the buffers used to create the ring buffer before deleting it
  1248. xResult = xRingbufferGetStaticBuffer(xRingbuffer, &pucRingbufferStorage, &pxStaticRingbuffer);
  1249. configASSERT(xResult == pdTRUE);
  1250. // Delete the ring buffer
  1251. vRingbufferDelete(xRingbuffer);
  1252. // Free the memory buffers
  1253. heap_caps_free(pxStaticRingbuffer);
  1254. heap_caps_free(pucRingbufferStorage);
  1255. }