ringbuf.c 64 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424
  1. /*
  2. * SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <inttypes.h>
  7. #include <stdlib.h>
  8. #include <string.h>
  9. #include "freertos/FreeRTOS.h"
  10. #include "freertos/list.h"
  11. #include "freertos/task.h"
  12. #include "freertos/queue.h"
  13. #include "freertos/ringbuf.h"
  14. #include "esp_heap_caps.h"
  15. // ------------------------------------------------- Macros and Types --------------------------------------------------
  16. //32-bit alignment macros
  17. #define rbALIGN_MASK (0x03)
  18. #define rbALIGN_SIZE( xSize ) ( ( xSize + rbALIGN_MASK ) & ~rbALIGN_MASK )
  19. #define rbCHECK_ALIGNED( pvPtr ) ( ( ( UBaseType_t ) ( pvPtr ) & rbALIGN_MASK ) == 0 )
  20. //Ring buffer flags
  21. #define rbALLOW_SPLIT_FLAG ( ( UBaseType_t ) 1 ) //The ring buffer allows items to be split
  22. #define rbBYTE_BUFFER_FLAG ( ( UBaseType_t ) 2 ) //The ring buffer is a byte buffer
  23. #define rbBUFFER_FULL_FLAG ( ( UBaseType_t ) 4 ) //The ring buffer is currently full (write pointer == free pointer)
  24. #define rbBUFFER_STATIC_FLAG ( ( UBaseType_t ) 8 ) //The ring buffer is statically allocated
  25. #define rbUSING_QUEUE_SET ( ( UBaseType_t ) 16 ) //The ring buffer has been added to a queue set
  26. //Item flags
  27. #define rbITEM_FREE_FLAG ( ( UBaseType_t ) 1 ) //Item has been retrieved and returned by application, free to overwrite
  28. #define rbITEM_DUMMY_DATA_FLAG ( ( UBaseType_t ) 2 ) //Data from here to end of the ring buffer is dummy data. Restart reading at start of head of the buffer
  29. #define rbITEM_SPLIT_FLAG ( ( UBaseType_t ) 4 ) //Valid for RINGBUF_TYPE_ALLOWSPLIT, indicating that rest of the data is wrapped around
  30. #define rbITEM_WRITTEN_FLAG ( ( UBaseType_t ) 8 ) //Item has been written to by the application, thus can be read
  31. typedef struct {
  32. //This size of this structure must be 32-bit aligned
  33. size_t xItemLen;
  34. UBaseType_t uxItemFlags;
  35. } ItemHeader_t;
  36. #define rbHEADER_SIZE sizeof(ItemHeader_t)
  37. typedef struct RingbufferDefinition Ringbuffer_t;
  38. typedef BaseType_t (*CheckItemFitsFunction_t)(Ringbuffer_t *pxRingbuffer, size_t xItemSize);
  39. typedef void (*CopyItemFunction_t)(Ringbuffer_t *pxRingbuffer, const uint8_t *pcItem, size_t xItemSize);
  40. typedef BaseType_t (*CheckItemAvailFunction_t) (Ringbuffer_t *pxRingbuffer);
  41. typedef void *(*GetItemFunction_t)(Ringbuffer_t *pxRingbuffer, BaseType_t *pxIsSplit, size_t xMaxSize, size_t *pxItemSize);
  42. typedef void (*ReturnItemFunction_t)(Ringbuffer_t *pxRingbuffer, uint8_t *pvItem);
  43. typedef size_t (*GetCurMaxSizeFunction_t)(Ringbuffer_t *pxRingbuffer);
  44. typedef struct RingbufferDefinition {
  45. size_t xSize; //Size of the data storage
  46. size_t xMaxItemSize; //Maximum item size
  47. UBaseType_t uxRingbufferFlags; //Flags to indicate the type and status of ring buffer
  48. CheckItemFitsFunction_t xCheckItemFits; //Function to check if item can currently fit in ring buffer
  49. CopyItemFunction_t vCopyItem; //Function to copy item to ring buffer
  50. GetItemFunction_t pvGetItem; //Function to get item from ring buffer
  51. ReturnItemFunction_t vReturnItem; //Function to return item to ring buffer
  52. GetCurMaxSizeFunction_t xGetCurMaxSize; //Function to get current free size
  53. uint8_t *pucAcquire; //Acquire Pointer. Points to where the next item should be acquired.
  54. uint8_t *pucWrite; //Write Pointer. Points to where the next item should be written
  55. uint8_t *pucRead; //Read Pointer. Points to where the next item should be read from
  56. uint8_t *pucFree; //Free Pointer. Points to the last item that has yet to be returned to the ring buffer
  57. uint8_t *pucHead; //Pointer to the start of the ring buffer storage area
  58. uint8_t *pucTail; //Pointer to the end of the ring buffer storage area
  59. BaseType_t xItemsWaiting; //Number of items/bytes(for byte buffers) currently in ring buffer that have not yet been read
  60. List_t xTasksWaitingToSend; //List of tasks that are blocked waiting to send/acquire onto this ring buffer. Stored in priority order.
  61. List_t xTasksWaitingToReceive; //List of tasks that are blocked waiting to receive from this ring buffer. Stored in priority order.
  62. QueueSetHandle_t xQueueSet; //Ring buffer's read queue set handle.
  63. portMUX_TYPE mux; //Spinlock required for SMP
  64. } Ringbuffer_t;
  65. _Static_assert(sizeof(StaticRingbuffer_t) == sizeof(Ringbuffer_t), "StaticRingbuffer_t != Ringbuffer_t");
  66. // ------------------------------------------------ Forward Declares ---------------------------------------------------
  67. /*
  68. * WARNING: All of the following static functions (except generic functions)
  69. * ARE NOT THREAD SAFE. Therefore they should only be called within a critical
  70. * section (using spin locks)
  71. */
  72. //Initialize a ring buffer after space has been allocated for it
  73. static void prvInitializeNewRingbuffer(size_t xBufferSize,
  74. RingbufferType_t xBufferType,
  75. Ringbuffer_t *pxNewRingbuffer,
  76. uint8_t *pucRingbufferStorage);
  77. //Calculate current amount of free space (in bytes) in the ring buffer
  78. static size_t prvGetFreeSize(Ringbuffer_t *pxRingbuffer);
  79. //Checks if an item/data is currently available for retrieval
  80. static BaseType_t prvCheckItemAvail(Ringbuffer_t *pxRingbuffer);
  81. //Checks if an item will currently fit in a no-split/allow-split ring buffer
  82. static BaseType_t prvCheckItemFitsDefault( Ringbuffer_t *pxRingbuffer, size_t xItemSize);
  83. //Checks if an item will currently fit in a byte buffer
  84. static BaseType_t prvCheckItemFitsByteBuffer( Ringbuffer_t *pxRingbuffer, size_t xItemSize);
  85. /*
  86. Copies an item to a no-split ring buffer
  87. Entry:
  88. - Must have already guaranteed there is sufficient space for item by calling prvCheckItemFitsDefault()
  89. Exit:
  90. - New item copied into ring buffer
  91. - pucAcquire and pucWrite updated.
  92. - Dummy item added if necessary
  93. */
  94. static void prvCopyItemNoSplit(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize);
  95. /*
  96. Copies an item to a allow-split ring buffer
  97. Entry:
  98. - Must have already guaranteed there is sufficient space for item by calling prvCheckItemFitsDefault()
  99. Exit:
  100. - New item copied into ring buffer
  101. - pucAcquire and pucWrite updated
  102. - Item may be split
  103. */
  104. static void prvCopyItemAllowSplit(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize);
  105. //Copies an item to a byte buffer. Only call this function after calling prvCheckItemFitsByteBuffer()
  106. static void prvCopyItemByteBuf(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize);
  107. //Retrieve item from no-split/allow-split ring buffer. *pxIsSplit is set to pdTRUE if the retrieved item is split
  108. /*
  109. Entry:
  110. - Must have already guaranteed that there is an item available for retrieval by calling prvCheckItemAvail()
  111. - Guaranteed that pucREAD points to a valid item (i.e., not a dummy item)
  112. Exit:
  113. - Item is returned. Only first half returned if split
  114. - pucREAD updated to point to next valid item to read, or equals to pucWrite if there are no more valid items to read
  115. - pucREAD update must skip over dummy items
  116. */
  117. static void *prvGetItemDefault(Ringbuffer_t *pxRingbuffer,
  118. BaseType_t *pxIsSplit,
  119. size_t xUnusedParam,
  120. size_t *pxItemSize);
  121. //Retrieve data from byte buffer. If xMaxSize is 0, all continuous data is retrieved
  122. static void *prvGetItemByteBuf(Ringbuffer_t *pxRingbuffer,
  123. BaseType_t *pxUnusedParam,
  124. size_t xMaxSize,
  125. size_t *pxItemSize);
  126. /*
  127. Return an item to a split/no-split ring buffer
  128. Exit:
  129. - Item is marked free rbITEM_FREE_FLAG
  130. - pucFree is progressed as far as possible, skipping over already freed items or dummy items
  131. */
  132. static void prvReturnItemDefault(Ringbuffer_t *pxRingbuffer, uint8_t *pucItem);
  133. //Return data to a byte buffer
  134. static void prvReturnItemByteBuf(Ringbuffer_t *pxRingbuffer, uint8_t *pucItem);
  135. //Get the maximum size an item that can currently have if sent to a no-split ring buffer
  136. static size_t prvGetCurMaxSizeNoSplit(Ringbuffer_t *pxRingbuffer);
  137. //Get the maximum size an item that can currently have if sent to a allow-split ring buffer
  138. static size_t prvGetCurMaxSizeAllowSplit(Ringbuffer_t *pxRingbuffer);
  139. //Get the maximum size an item that can currently have if sent to a byte buffer
  140. static size_t prvGetCurMaxSizeByteBuf(Ringbuffer_t *pxRingbuffer);
  141. /*
  142. Generic function used to send or acquire an item/buffer.
  143. - If sending, set ppvItem to NULL. pvItem remains unchanged on failure.
  144. - If acquiring, set pvItem to NULL. ppvItem remains unchanged on failure.
  145. */
  146. static BaseType_t prvSendAcquireGeneric(Ringbuffer_t *pxRingbuffer,
  147. const void *pvItem,
  148. void **ppvItem,
  149. size_t xItemSize,
  150. TickType_t xTicksToWait);
  151. /*
  152. Generic function used to retrieve an item/data from ring buffers. If called on
  153. an allow-split buffer, and pvItem2 and xItemSize2 are not NULL, both parts of
  154. a split item will be retrieved. xMaxSize will only take effect if called on
  155. byte buffers. xItemSize must remain unchanged if no item is retrieved.
  156. */
  157. static BaseType_t prvReceiveGeneric(Ringbuffer_t *pxRingbuffer,
  158. void **pvItem1,
  159. void **pvItem2,
  160. size_t *xItemSize1,
  161. size_t *xItemSize2,
  162. size_t xMaxSize,
  163. TickType_t xTicksToWait);
  164. //From ISR version of prvReceiveGeneric()
  165. static BaseType_t prvReceiveGenericFromISR(Ringbuffer_t *pxRingbuffer,
  166. void **pvItem1,
  167. void **pvItem2,
  168. size_t *xItemSize1,
  169. size_t *xItemSize2,
  170. size_t xMaxSize);
  171. // ------------------------------------------------ Static Functions ---------------------------------------------------
  172. static void prvInitializeNewRingbuffer(size_t xBufferSize,
  173. RingbufferType_t xBufferType,
  174. Ringbuffer_t *pxNewRingbuffer,
  175. uint8_t *pucRingbufferStorage)
  176. {
  177. //Initialize values
  178. pxNewRingbuffer->xSize = xBufferSize;
  179. pxNewRingbuffer->pucHead = pucRingbufferStorage;
  180. pxNewRingbuffer->pucTail = pucRingbufferStorage + xBufferSize;
  181. pxNewRingbuffer->pucFree = pucRingbufferStorage;
  182. pxNewRingbuffer->pucRead = pucRingbufferStorage;
  183. pxNewRingbuffer->pucWrite = pucRingbufferStorage;
  184. pxNewRingbuffer->pucAcquire = pucRingbufferStorage;
  185. pxNewRingbuffer->xItemsWaiting = 0;
  186. pxNewRingbuffer->uxRingbufferFlags = 0;
  187. //Initialize type dependent values and function pointers
  188. if (xBufferType == RINGBUF_TYPE_NOSPLIT) {
  189. pxNewRingbuffer->xCheckItemFits = prvCheckItemFitsDefault;
  190. pxNewRingbuffer->vCopyItem = prvCopyItemNoSplit;
  191. pxNewRingbuffer->pvGetItem = prvGetItemDefault;
  192. pxNewRingbuffer->vReturnItem = prvReturnItemDefault;
  193. /*
  194. * Worst case scenario is when the read/write/acquire/free pointers are all
  195. * pointing to the halfway point of the buffer.
  196. */
  197. pxNewRingbuffer->xMaxItemSize = rbALIGN_SIZE(pxNewRingbuffer->xSize / 2) - rbHEADER_SIZE;
  198. pxNewRingbuffer->xGetCurMaxSize = prvGetCurMaxSizeNoSplit;
  199. } else if (xBufferType == RINGBUF_TYPE_ALLOWSPLIT) {
  200. pxNewRingbuffer->uxRingbufferFlags |= rbALLOW_SPLIT_FLAG;
  201. pxNewRingbuffer->xCheckItemFits = prvCheckItemFitsDefault;
  202. pxNewRingbuffer->vCopyItem = prvCopyItemAllowSplit;
  203. pxNewRingbuffer->pvGetItem = prvGetItemDefault;
  204. pxNewRingbuffer->vReturnItem = prvReturnItemDefault;
  205. //Worst case an item is split into two, incurring two headers of overhead
  206. pxNewRingbuffer->xMaxItemSize = pxNewRingbuffer->xSize - (sizeof(ItemHeader_t) * 2);
  207. pxNewRingbuffer->xGetCurMaxSize = prvGetCurMaxSizeAllowSplit;
  208. } else { //Byte Buffer
  209. pxNewRingbuffer->uxRingbufferFlags |= rbBYTE_BUFFER_FLAG;
  210. pxNewRingbuffer->xCheckItemFits = prvCheckItemFitsByteBuffer;
  211. pxNewRingbuffer->vCopyItem = prvCopyItemByteBuf;
  212. pxNewRingbuffer->pvGetItem = prvGetItemByteBuf;
  213. pxNewRingbuffer->vReturnItem = prvReturnItemByteBuf;
  214. //Byte buffers do not incur any overhead
  215. pxNewRingbuffer->xMaxItemSize = pxNewRingbuffer->xSize;
  216. pxNewRingbuffer->xGetCurMaxSize = prvGetCurMaxSizeByteBuf;
  217. }
  218. vListInitialise(&pxNewRingbuffer->xTasksWaitingToSend);
  219. vListInitialise(&pxNewRingbuffer->xTasksWaitingToReceive);
  220. pxNewRingbuffer->xQueueSet = NULL;
  221. portMUX_INITIALIZE(&pxNewRingbuffer->mux);
  222. }
  223. static size_t prvGetFreeSize(Ringbuffer_t *pxRingbuffer)
  224. {
  225. size_t xReturn;
  226. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
  227. xReturn = 0;
  228. } else {
  229. BaseType_t xFreeSize = pxRingbuffer->pucFree - pxRingbuffer->pucAcquire;
  230. //Check if xFreeSize has underflowed
  231. if (xFreeSize <= 0) {
  232. xFreeSize += pxRingbuffer->xSize;
  233. }
  234. xReturn = xFreeSize;
  235. }
  236. configASSERT(xReturn <= pxRingbuffer->xSize);
  237. return xReturn;
  238. }
  239. static BaseType_t prvCheckItemFitsDefault( Ringbuffer_t *pxRingbuffer, size_t xItemSize)
  240. {
  241. //Check arguments and buffer state
  242. configASSERT(rbCHECK_ALIGNED(pxRingbuffer->pucAcquire)); //pucAcquire is always aligned in no-split/allow-split ring buffers
  243. configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check write pointer is within bounds
  244. size_t xTotalItemSize = rbALIGN_SIZE(xItemSize) + rbHEADER_SIZE; //Rounded up aligned item size with header
  245. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
  246. //Buffer is either complete empty or completely full
  247. return (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) ? pdFALSE : pdTRUE;
  248. }
  249. if (pxRingbuffer->pucFree > pxRingbuffer->pucAcquire) {
  250. //Free space does not wrap around
  251. return (xTotalItemSize <= pxRingbuffer->pucFree - pxRingbuffer->pucAcquire) ? pdTRUE : pdFALSE;
  252. }
  253. //Free space wraps around
  254. if (xTotalItemSize <= pxRingbuffer->pucTail - pxRingbuffer->pucAcquire) {
  255. return pdTRUE; //Item fits without wrapping around
  256. }
  257. //Check if item fits by wrapping
  258. if (pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG) {
  259. //Allow split wrapping incurs an extra header
  260. return (xTotalItemSize + rbHEADER_SIZE <= pxRingbuffer->xSize - (pxRingbuffer->pucAcquire - pxRingbuffer->pucFree)) ? pdTRUE : pdFALSE;
  261. } else {
  262. return (xTotalItemSize <= pxRingbuffer->pucFree - pxRingbuffer->pucHead) ? pdTRUE : pdFALSE;
  263. }
  264. }
  265. static BaseType_t prvCheckItemFitsByteBuffer( Ringbuffer_t *pxRingbuffer, size_t xItemSize)
  266. {
  267. //Check arguments and buffer state
  268. configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check acquire pointer is within bounds
  269. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
  270. //Buffer is either complete empty or completely full
  271. return (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) ? pdFALSE : pdTRUE;
  272. }
  273. if (pxRingbuffer->pucFree > pxRingbuffer->pucAcquire) {
  274. //Free space does not wrap around
  275. return (xItemSize <= pxRingbuffer->pucFree - pxRingbuffer->pucAcquire) ? pdTRUE : pdFALSE;
  276. }
  277. //Free space wraps around
  278. return (xItemSize <= pxRingbuffer->xSize - (pxRingbuffer->pucAcquire - pxRingbuffer->pucFree)) ? pdTRUE : pdFALSE;
  279. }
  280. static uint8_t* prvAcquireItemNoSplit(Ringbuffer_t *pxRingbuffer, size_t xItemSize)
  281. {
  282. //Check arguments and buffer state
  283. size_t xAlignedItemSize = rbALIGN_SIZE(xItemSize); //Rounded up aligned item size
  284. size_t xRemLen = pxRingbuffer->pucTail - pxRingbuffer->pucAcquire; //Length from pucAcquire until end of buffer
  285. configASSERT(rbCHECK_ALIGNED(pxRingbuffer->pucAcquire)); //pucAcquire is always aligned in no-split ring buffers
  286. configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check write pointer is within bounds
  287. configASSERT(xRemLen >= rbHEADER_SIZE); //Remaining length must be able to at least fit an item header
  288. //If remaining length can't fit item, set as dummy data and wrap around
  289. if (xRemLen < xAlignedItemSize + rbHEADER_SIZE) {
  290. ItemHeader_t *pxDummy = (ItemHeader_t *)pxRingbuffer->pucAcquire;
  291. pxDummy->uxItemFlags = rbITEM_DUMMY_DATA_FLAG; //Set remaining length as dummy data
  292. pxDummy->xItemLen = 0; //Dummy data should have no length
  293. pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Reset acquire pointer to wrap around
  294. }
  295. //Item should be guaranteed to fit at this point. Set item header and copy data
  296. ItemHeader_t *pxHeader = (ItemHeader_t *)pxRingbuffer->pucAcquire;
  297. pxHeader->xItemLen = xItemSize;
  298. pxHeader->uxItemFlags = 0;
  299. //hold the buffer address without touching pucWrite
  300. uint8_t* item_address = pxRingbuffer->pucAcquire + rbHEADER_SIZE;
  301. pxRingbuffer->pucAcquire += rbHEADER_SIZE + xAlignedItemSize; //Advance pucAcquire past header and the item to next aligned address
  302. //After the allocation, add some padding after the buffer and correct the flags
  303. //If current remaining length can't fit a header, wrap around write pointer
  304. if (pxRingbuffer->pucTail - pxRingbuffer->pucAcquire < rbHEADER_SIZE) {
  305. pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Wrap around pucAcquire
  306. }
  307. //Check if buffer is full
  308. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
  309. //Mark the buffer as full to distinguish with an empty buffer
  310. pxRingbuffer->uxRingbufferFlags |= rbBUFFER_FULL_FLAG;
  311. }
  312. return item_address;
  313. }
  314. static void prvSendItemDoneNoSplit(Ringbuffer_t *pxRingbuffer, uint8_t* pucItem)
  315. {
  316. //Check arguments and buffer state
  317. configASSERT(rbCHECK_ALIGNED(pucItem));
  318. configASSERT(pucItem >= pxRingbuffer->pucHead);
  319. configASSERT(pucItem <= pxRingbuffer->pucTail); //Inclusive of pucTail in the case of zero length item at the very end
  320. //Get and check header of the item
  321. ItemHeader_t *pxCurHeader = (ItemHeader_t *)(pucItem - rbHEADER_SIZE);
  322. configASSERT(pxCurHeader->xItemLen <= pxRingbuffer->xMaxItemSize);
  323. configASSERT((pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) == 0); //Dummy items should never have been written
  324. configASSERT((pxCurHeader->uxItemFlags & rbITEM_WRITTEN_FLAG) == 0); //Indicates item has already been written before
  325. pxCurHeader->uxItemFlags &= ~rbITEM_SPLIT_FLAG; //Clear wrap flag if set (not strictly necessary)
  326. pxCurHeader->uxItemFlags |= rbITEM_WRITTEN_FLAG; //Mark as written
  327. pxRingbuffer->xItemsWaiting++;
  328. /*
  329. * Items might not be written in the order they were acquired. Move the
  330. * write pointer up to the next item that has not been marked as written (by
  331. * written flag) or up till the acquire pointer. When advancing the write
  332. * pointer, items that have already been written or items with dummy data
  333. * should be skipped over
  334. */
  335. pxCurHeader = (ItemHeader_t *)pxRingbuffer->pucWrite;
  336. //Skip over Items that have already been written or are dummy items
  337. while (((pxCurHeader->uxItemFlags & rbITEM_WRITTEN_FLAG) || (pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG)) && pxRingbuffer->pucWrite != pxRingbuffer->pucAcquire) {
  338. if (pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) {
  339. pxCurHeader->uxItemFlags |= rbITEM_WRITTEN_FLAG; //Mark as freed (not strictly necessary but adds redundancy)
  340. pxRingbuffer->pucWrite = pxRingbuffer->pucHead; //Wrap around due to dummy data
  341. } else {
  342. //Item with data that has already been written, advance write pointer past this item
  343. size_t xAlignedItemSize = rbALIGN_SIZE(pxCurHeader->xItemLen);
  344. pxRingbuffer->pucWrite += xAlignedItemSize + rbHEADER_SIZE;
  345. //Redundancy check to ensure write pointer has not overshot buffer bounds
  346. configASSERT(pxRingbuffer->pucWrite <= pxRingbuffer->pucHead + pxRingbuffer->xSize);
  347. }
  348. //Check if pucWrite requires wrap around
  349. if ((pxRingbuffer->pucTail - pxRingbuffer->pucWrite) < rbHEADER_SIZE) {
  350. pxRingbuffer->pucWrite = pxRingbuffer->pucHead;
  351. }
  352. pxCurHeader = (ItemHeader_t *)pxRingbuffer->pucWrite; //Update header to point to item
  353. }
  354. }
  355. static void prvCopyItemNoSplit(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize)
  356. {
  357. uint8_t* item_addr = prvAcquireItemNoSplit(pxRingbuffer, xItemSize);
  358. memcpy(item_addr, pucItem, xItemSize);
  359. prvSendItemDoneNoSplit(pxRingbuffer, item_addr);
  360. }
  361. static void prvCopyItemAllowSplit(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize)
  362. {
  363. //Check arguments and buffer state
  364. size_t xAlignedItemSize = rbALIGN_SIZE(xItemSize); //Rounded up aligned item size
  365. size_t xRemLen = pxRingbuffer->pucTail - pxRingbuffer->pucAcquire; //Length from pucAcquire until end of buffer
  366. configASSERT(rbCHECK_ALIGNED(pxRingbuffer->pucAcquire)); //pucAcquire is always aligned in split ring buffers
  367. configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check write pointer is within bounds
  368. configASSERT(xRemLen >= rbHEADER_SIZE); //Remaining length must be able to at least fit an item header
  369. //Split item if necessary
  370. if (xRemLen < xAlignedItemSize + rbHEADER_SIZE) {
  371. //Write first part of the item
  372. ItemHeader_t *pxFirstHeader = (ItemHeader_t *)pxRingbuffer->pucAcquire;
  373. pxFirstHeader->uxItemFlags = 0;
  374. pxFirstHeader->xItemLen = xRemLen - rbHEADER_SIZE; //Fill remaining length with first part
  375. pxRingbuffer->pucAcquire += rbHEADER_SIZE; //Advance pucAcquire past header
  376. xRemLen -= rbHEADER_SIZE;
  377. if (xRemLen > 0) {
  378. memcpy(pxRingbuffer->pucAcquire, pucItem, xRemLen);
  379. pxRingbuffer->xItemsWaiting++;
  380. //Update item arguments to account for data already copied
  381. pucItem += xRemLen;
  382. xItemSize -= xRemLen;
  383. xAlignedItemSize -= xRemLen;
  384. pxFirstHeader->uxItemFlags |= rbITEM_SPLIT_FLAG; //There must be more data
  385. } else {
  386. //Remaining length was only large enough to fit header
  387. pxFirstHeader->uxItemFlags |= rbITEM_DUMMY_DATA_FLAG; //Item will completely be stored in 2nd part
  388. }
  389. pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Reset acquire pointer to start of buffer
  390. }
  391. //Item (whole or second part) should be guaranteed to fit at this point
  392. ItemHeader_t *pxSecondHeader = (ItemHeader_t *)pxRingbuffer->pucAcquire;
  393. pxSecondHeader->xItemLen = xItemSize;
  394. pxSecondHeader->uxItemFlags = 0;
  395. pxRingbuffer->pucAcquire += rbHEADER_SIZE; //Advance acquire pointer past header
  396. memcpy(pxRingbuffer->pucAcquire, pucItem, xItemSize);
  397. pxRingbuffer->xItemsWaiting++;
  398. pxRingbuffer->pucAcquire += xAlignedItemSize; //Advance pucAcquire past item to next aligned address
  399. //If current remaining length can't fit a header, wrap around write pointer
  400. if (pxRingbuffer->pucTail - pxRingbuffer->pucAcquire < rbHEADER_SIZE) {
  401. pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Wrap around pucAcquire
  402. }
  403. //Check if buffer is full
  404. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
  405. //Mark the buffer as full to distinguish with an empty buffer
  406. pxRingbuffer->uxRingbufferFlags |= rbBUFFER_FULL_FLAG;
  407. }
  408. //currently the Split mode is not supported, pucWrite tracks the pucAcquire
  409. pxRingbuffer->pucWrite = pxRingbuffer->pucAcquire;
  410. }
  411. static void prvCopyItemByteBuf(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize)
  412. {
  413. //Check arguments and buffer state
  414. configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check acquire pointer is within bounds
  415. size_t xRemLen = pxRingbuffer->pucTail - pxRingbuffer->pucAcquire; //Length from pucAcquire until end of buffer
  416. if (xRemLen < xItemSize) {
  417. //Copy as much as possible into remaining length
  418. memcpy(pxRingbuffer->pucAcquire, pucItem, xRemLen);
  419. pxRingbuffer->xItemsWaiting += xRemLen;
  420. //Update item arguments to account for data already written
  421. pucItem += xRemLen;
  422. xItemSize -= xRemLen;
  423. pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Reset acquire pointer to start of buffer
  424. }
  425. //Copy all or remaining portion of the item
  426. memcpy(pxRingbuffer->pucAcquire, pucItem, xItemSize);
  427. pxRingbuffer->xItemsWaiting += xItemSize;
  428. pxRingbuffer->pucAcquire += xItemSize;
  429. //Wrap around pucAcquire if it reaches the end
  430. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucTail) {
  431. pxRingbuffer->pucAcquire = pxRingbuffer->pucHead;
  432. }
  433. //Check if buffer is full
  434. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
  435. pxRingbuffer->uxRingbufferFlags |= rbBUFFER_FULL_FLAG; //Mark the buffer as full to avoid confusion with an empty buffer
  436. }
  437. //Currently, acquiring memory is not supported in byte mode. pucWrite tracks the pucAcquire.
  438. pxRingbuffer->pucWrite = pxRingbuffer->pucAcquire;
  439. }
  440. static BaseType_t prvCheckItemAvail(Ringbuffer_t *pxRingbuffer)
  441. {
  442. if ((pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) && pxRingbuffer->pucRead != pxRingbuffer->pucFree) {
  443. return pdFALSE; //Byte buffers do not allow multiple retrievals before return
  444. }
  445. if ((pxRingbuffer->xItemsWaiting > 0) && ((pxRingbuffer->pucRead != pxRingbuffer->pucWrite) || (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG))) {
  446. return pdTRUE; //Items/data available for retrieval
  447. } else {
  448. return pdFALSE; //No items/data available for retrieval
  449. }
  450. }
  451. static void *prvGetItemDefault(Ringbuffer_t *pxRingbuffer,
  452. BaseType_t *pxIsSplit,
  453. size_t xUnusedParam,
  454. size_t *pxItemSize)
  455. {
  456. //Check arguments and buffer state
  457. ItemHeader_t *pxHeader = (ItemHeader_t *)pxRingbuffer->pucRead;
  458. configASSERT(pxIsSplit != NULL);
  459. configASSERT((pxRingbuffer->xItemsWaiting > 0) && ((pxRingbuffer->pucRead != pxRingbuffer->pucWrite) || (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG))); //Check there are items to be read
  460. configASSERT(rbCHECK_ALIGNED(pxRingbuffer->pucRead)); //pucRead is always aligned in split ring buffers
  461. configASSERT(pxRingbuffer->pucRead >= pxRingbuffer->pucHead && pxRingbuffer->pucRead < pxRingbuffer->pucTail); //Check read pointer is within bounds
  462. configASSERT((pxHeader->xItemLen <= pxRingbuffer->xMaxItemSize) || (pxHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG));
  463. uint8_t *pcReturn;
  464. //Wrap around if dummy data (dummy data indicates wrap around in no-split buffers)
  465. if (pxHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) {
  466. pxRingbuffer->pucRead = pxRingbuffer->pucHead;
  467. //Check for errors with the next item
  468. pxHeader = (ItemHeader_t *)pxRingbuffer->pucRead;
  469. configASSERT(pxHeader->xItemLen <= pxRingbuffer->xMaxItemSize);
  470. }
  471. pcReturn = pxRingbuffer->pucRead + rbHEADER_SIZE; //Get pointer to part of item containing data (point past the header)
  472. if (pxHeader->xItemLen == 0) {
  473. //Inclusive of pucTail for special case where item of zero length just fits at the end of the buffer
  474. configASSERT(pcReturn >= pxRingbuffer->pucHead && pcReturn <= pxRingbuffer->pucTail);
  475. } else {
  476. //Exclusive of pucTail if length is larger than zero, pcReturn should never point to pucTail
  477. configASSERT(pcReturn >= pxRingbuffer->pucHead && pcReturn < pxRingbuffer->pucTail);
  478. }
  479. *pxItemSize = pxHeader->xItemLen; //Get length of item
  480. pxRingbuffer->xItemsWaiting --; //Update item count
  481. *pxIsSplit = (pxHeader->uxItemFlags & rbITEM_SPLIT_FLAG) ? pdTRUE : pdFALSE;
  482. pxRingbuffer->pucRead += rbHEADER_SIZE + rbALIGN_SIZE(pxHeader->xItemLen); //Update pucRead
  483. //Check if pucRead requires wrap around
  484. if ((pxRingbuffer->pucTail - pxRingbuffer->pucRead) < rbHEADER_SIZE) {
  485. pxRingbuffer->pucRead = pxRingbuffer->pucHead;
  486. }
  487. return (void *)pcReturn;
  488. }
  489. static void *prvGetItemByteBuf(Ringbuffer_t *pxRingbuffer,
  490. BaseType_t *pxUnusedParam,
  491. size_t xMaxSize,
  492. size_t *pxItemSize)
  493. {
  494. //Check arguments and buffer state
  495. configASSERT((pxRingbuffer->xItemsWaiting > 0) && ((pxRingbuffer->pucRead != pxRingbuffer->pucWrite) || (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG))); //Check there are items to be read
  496. configASSERT(pxRingbuffer->pucRead >= pxRingbuffer->pucHead && pxRingbuffer->pucRead < pxRingbuffer->pucTail); //Check read pointer is within bounds
  497. configASSERT(pxRingbuffer->pucRead == pxRingbuffer->pucFree);
  498. uint8_t *ret = pxRingbuffer->pucRead;
  499. if ((pxRingbuffer->pucRead > pxRingbuffer->pucWrite) || (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG)) { //Available data wraps around
  500. //Return contiguous piece from read pointer until buffer tail, or xMaxSize
  501. if (xMaxSize == 0 || pxRingbuffer->pucTail - pxRingbuffer->pucRead <= xMaxSize) {
  502. //All contiguous data from read pointer to tail
  503. *pxItemSize = pxRingbuffer->pucTail - pxRingbuffer->pucRead;
  504. pxRingbuffer->xItemsWaiting -= pxRingbuffer->pucTail - pxRingbuffer->pucRead;
  505. pxRingbuffer->pucRead = pxRingbuffer->pucHead; //Wrap around read pointer
  506. } else {
  507. //Return xMaxSize amount of data
  508. *pxItemSize = xMaxSize;
  509. pxRingbuffer->xItemsWaiting -= xMaxSize;
  510. pxRingbuffer->pucRead += xMaxSize; //Advance read pointer past retrieved data
  511. }
  512. } else { //Available data is contiguous between read and write pointer
  513. if (xMaxSize == 0 || pxRingbuffer->pucWrite - pxRingbuffer->pucRead <= xMaxSize) {
  514. //Return all contiguous data from read to write pointer
  515. *pxItemSize = pxRingbuffer->pucWrite - pxRingbuffer->pucRead;
  516. pxRingbuffer->xItemsWaiting -= pxRingbuffer->pucWrite - pxRingbuffer->pucRead;
  517. pxRingbuffer->pucRead = pxRingbuffer->pucWrite;
  518. } else {
  519. //Return xMaxSize data from read pointer
  520. *pxItemSize = xMaxSize;
  521. pxRingbuffer->xItemsWaiting -= xMaxSize;
  522. pxRingbuffer->pucRead += xMaxSize; //Advance read pointer past retrieved data
  523. }
  524. }
  525. return (void *)ret;
  526. }
  527. static void prvReturnItemDefault(Ringbuffer_t *pxRingbuffer, uint8_t *pucItem)
  528. {
  529. //Check arguments and buffer state
  530. configASSERT(rbCHECK_ALIGNED(pucItem));
  531. configASSERT(pucItem >= pxRingbuffer->pucHead);
  532. configASSERT(pucItem <= pxRingbuffer->pucTail); //Inclusive of pucTail in the case of zero length item at the very end
  533. //Get and check header of the item
  534. ItemHeader_t *pxCurHeader = (ItemHeader_t *)(pucItem - rbHEADER_SIZE);
  535. configASSERT(pxCurHeader->xItemLen <= pxRingbuffer->xMaxItemSize);
  536. configASSERT((pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) == 0); //Dummy items should never have been read
  537. configASSERT((pxCurHeader->uxItemFlags & rbITEM_FREE_FLAG) == 0); //Indicates item has already been returned before
  538. pxCurHeader->uxItemFlags &= ~rbITEM_SPLIT_FLAG; //Clear wrap flag if set (not strictly necessary)
  539. pxCurHeader->uxItemFlags |= rbITEM_FREE_FLAG; //Mark as free
  540. /*
  541. * Items might not be returned in the order they were retrieved. Move the free pointer
  542. * up to the next item that has not been marked as free (by free flag) or up
  543. * till the read pointer. When advancing the free pointer, items that have already been
  544. * freed or items with dummy data should be skipped over
  545. */
  546. pxCurHeader = (ItemHeader_t *)pxRingbuffer->pucFree;
  547. //Skip over Items that have already been freed or are dummy items
  548. while (((pxCurHeader->uxItemFlags & rbITEM_FREE_FLAG) || (pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG)) && pxRingbuffer->pucFree != pxRingbuffer->pucRead) {
  549. if (pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) {
  550. pxCurHeader->uxItemFlags |= rbITEM_FREE_FLAG; //Mark as freed (not strictly necessary but adds redundancy)
  551. pxRingbuffer->pucFree = pxRingbuffer->pucHead; //Wrap around due to dummy data
  552. } else {
  553. //Item with data that has already been freed, advance free pointer past this item
  554. size_t xAlignedItemSize = rbALIGN_SIZE(pxCurHeader->xItemLen);
  555. pxRingbuffer->pucFree += xAlignedItemSize + rbHEADER_SIZE;
  556. //Redundancy check to ensure free pointer has not overshot buffer bounds
  557. configASSERT(pxRingbuffer->pucFree <= pxRingbuffer->pucHead + pxRingbuffer->xSize);
  558. }
  559. //Check if pucFree requires wrap around
  560. if ((pxRingbuffer->pucTail - pxRingbuffer->pucFree) < rbHEADER_SIZE) {
  561. pxRingbuffer->pucFree = pxRingbuffer->pucHead;
  562. }
  563. pxCurHeader = (ItemHeader_t *)pxRingbuffer->pucFree; //Update header to point to item
  564. }
  565. //Check if the buffer full flag should be reset
  566. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
  567. if (pxRingbuffer->pucFree != pxRingbuffer->pucAcquire) {
  568. pxRingbuffer->uxRingbufferFlags &= ~rbBUFFER_FULL_FLAG;
  569. } else if (pxRingbuffer->pucFree == pxRingbuffer->pucAcquire && pxRingbuffer->pucFree == pxRingbuffer->pucRead) {
  570. //Special case where a full buffer is completely freed in one go
  571. pxRingbuffer->uxRingbufferFlags &= ~rbBUFFER_FULL_FLAG;
  572. }
  573. }
  574. }
  575. static void prvReturnItemByteBuf(Ringbuffer_t *pxRingbuffer, uint8_t *pucItem)
  576. {
  577. //Check pointer points to address inside buffer
  578. configASSERT((uint8_t *)pucItem >= pxRingbuffer->pucHead);
  579. configASSERT((uint8_t *)pucItem < pxRingbuffer->pucTail);
  580. //Free the read memory. Simply moves free pointer to read pointer as byte buffers do not allow multiple outstanding reads
  581. pxRingbuffer->pucFree = pxRingbuffer->pucRead;
  582. //If buffer was full before, reset full flag as free pointer has moved
  583. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
  584. pxRingbuffer->uxRingbufferFlags &= ~rbBUFFER_FULL_FLAG;
  585. }
  586. }
  587. static size_t prvGetCurMaxSizeNoSplit(Ringbuffer_t *pxRingbuffer)
  588. {
  589. BaseType_t xFreeSize;
  590. //Check if buffer is full
  591. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
  592. return 0;
  593. }
  594. if (pxRingbuffer->pucAcquire < pxRingbuffer->pucFree) {
  595. //Free space is contiguous between pucAcquire and pucFree
  596. xFreeSize = pxRingbuffer->pucFree - pxRingbuffer->pucAcquire;
  597. } else {
  598. //Free space wraps around (or overlapped at pucHead), select largest
  599. //contiguous free space as no-split items require contiguous space
  600. size_t xSize1 = pxRingbuffer->pucTail - pxRingbuffer->pucAcquire;
  601. size_t xSize2 = pxRingbuffer->pucFree - pxRingbuffer->pucHead;
  602. xFreeSize = (xSize1 > xSize2) ? xSize1 : xSize2;
  603. }
  604. //No-split ring buffer items need space for a header
  605. xFreeSize -= rbHEADER_SIZE;
  606. //Check for xFreeSize < 0 before checking xFreeSize > pxRingbuffer->xMaxItemSize
  607. //to avoid incorrect comparison operation when xFreeSize is negative
  608. if (xFreeSize < 0) {
  609. //Occurs when free space is less than header size
  610. xFreeSize = 0;
  611. } else if (xFreeSize > pxRingbuffer->xMaxItemSize) {
  612. //Limit free size to be within bounds
  613. xFreeSize = pxRingbuffer->xMaxItemSize;
  614. }
  615. return xFreeSize;
  616. }
  617. static size_t prvGetCurMaxSizeAllowSplit(Ringbuffer_t *pxRingbuffer)
  618. {
  619. BaseType_t xFreeSize;
  620. //Check if buffer is full
  621. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
  622. return 0;
  623. }
  624. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucHead && pxRingbuffer->pucFree == pxRingbuffer->pucHead) {
  625. //Check for special case where pucAcquire and pucFree are both at pucHead
  626. xFreeSize = pxRingbuffer->xSize - rbHEADER_SIZE;
  627. } else if (pxRingbuffer->pucAcquire < pxRingbuffer->pucFree) {
  628. //Free space is contiguous between pucAcquire and pucFree, requires single header
  629. xFreeSize = (pxRingbuffer->pucFree - pxRingbuffer->pucAcquire) - rbHEADER_SIZE;
  630. } else {
  631. //Free space wraps around, requires two headers
  632. xFreeSize = (pxRingbuffer->pucFree - pxRingbuffer->pucHead) +
  633. (pxRingbuffer->pucTail - pxRingbuffer->pucAcquire) -
  634. (rbHEADER_SIZE * 2);
  635. }
  636. //Check for xFreeSize < 0 before checking xFreeSize > pxRingbuffer->xMaxItemSize
  637. //to avoid incorrect comparison operation when xFreeSize is negative
  638. if (xFreeSize < 0) {
  639. xFreeSize = 0;
  640. } else if (xFreeSize > pxRingbuffer->xMaxItemSize) {
  641. //Limit free size to be within bounds
  642. xFreeSize = pxRingbuffer->xMaxItemSize;
  643. }
  644. return xFreeSize;
  645. }
  646. static size_t prvGetCurMaxSizeByteBuf(Ringbuffer_t *pxRingbuffer)
  647. {
  648. BaseType_t xFreeSize;
  649. //Check if buffer is full
  650. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
  651. return 0;
  652. }
  653. /*
  654. * Return whatever space is available depending on relative positions of the free
  655. * pointer and Acquire pointer. There is no overhead of headers in this mode
  656. */
  657. xFreeSize = pxRingbuffer->pucFree - pxRingbuffer->pucAcquire;
  658. if (xFreeSize <= 0) {
  659. xFreeSize += pxRingbuffer->xSize;
  660. }
  661. return xFreeSize;
  662. }
  663. static BaseType_t prvSendAcquireGeneric(Ringbuffer_t *pxRingbuffer,
  664. const void *pvItem,
  665. void **ppvItem,
  666. size_t xItemSize,
  667. TickType_t xTicksToWait)
  668. {
  669. BaseType_t xReturn = pdFALSE;
  670. BaseType_t xExitLoop = pdFALSE;
  671. BaseType_t xEntryTimeSet = pdFALSE;
  672. BaseType_t xNotifyQueueSet = pdFALSE;
  673. TimeOut_t xTimeOut;
  674. while (xExitLoop == pdFALSE) {
  675. portENTER_CRITICAL(&pxRingbuffer->mux);
  676. if (pxRingbuffer->xCheckItemFits(pxRingbuffer, xItemSize) == pdTRUE) {
  677. //xItemSize will fit. Copy or acquire the buffer immediately
  678. if (ppvItem) {
  679. //Acquire the buffer
  680. *ppvItem = prvAcquireItemNoSplit(pxRingbuffer, xItemSize);
  681. } else {
  682. //Copy item into buffer
  683. pxRingbuffer->vCopyItem(pxRingbuffer, pvItem, xItemSize);
  684. if (pxRingbuffer->xQueueSet) {
  685. //If ring buffer was added to a queue set, notify the queue set
  686. xNotifyQueueSet = pdTRUE;
  687. } else {
  688. //If a task was waiting for data to arrive on the ring buffer, unblock it immediately.
  689. if (listLIST_IS_EMPTY(&pxRingbuffer->xTasksWaitingToReceive) == pdFALSE) {
  690. if (xTaskRemoveFromEventList(&pxRingbuffer->xTasksWaitingToReceive) == pdTRUE) {
  691. //The unblocked task will preempt us. Trigger a yield here.
  692. portYIELD_WITHIN_API();
  693. }
  694. }
  695. }
  696. }
  697. xReturn = pdTRUE;
  698. xExitLoop = pdTRUE;
  699. goto loop_end;
  700. } else if (xTicksToWait == (TickType_t) 0) {
  701. //No block time. Return immediately.
  702. xExitLoop = pdTRUE;
  703. goto loop_end;
  704. } else if (xEntryTimeSet == pdFALSE) {
  705. //This is our first block. Set entry time
  706. vTaskInternalSetTimeOutState(&xTimeOut);
  707. xEntryTimeSet = pdTRUE;
  708. }
  709. if (xTaskCheckForTimeOut(&xTimeOut, &xTicksToWait) == pdFALSE) {
  710. //Not timed out yet. Block the current task
  711. vTaskPlaceOnEventList(&pxRingbuffer->xTasksWaitingToSend, xTicksToWait);
  712. portYIELD_WITHIN_API();
  713. } else {
  714. //We have timed out
  715. xExitLoop = pdTRUE;
  716. }
  717. loop_end:
  718. portEXIT_CRITICAL(&pxRingbuffer->mux);
  719. }
  720. //Defer notifying the queue set until we are outside the loop and critical section.
  721. if (xNotifyQueueSet == pdTRUE) {
  722. xQueueSend((QueueHandle_t)pxRingbuffer->xQueueSet, (QueueSetMemberHandle_t *)&pxRingbuffer, 0);
  723. }
  724. return xReturn;
  725. }
  726. static BaseType_t prvReceiveGeneric(Ringbuffer_t *pxRingbuffer,
  727. void **pvItem1,
  728. void **pvItem2,
  729. size_t *xItemSize1,
  730. size_t *xItemSize2,
  731. size_t xMaxSize,
  732. TickType_t xTicksToWait)
  733. {
  734. BaseType_t xReturn = pdFALSE;
  735. BaseType_t xExitLoop = pdFALSE;
  736. BaseType_t xEntryTimeSet = pdFALSE;
  737. TimeOut_t xTimeOut;
  738. #ifdef __clang_analyzer__
  739. // Teach clang-tidy that if NULL pointers are provided, this function will never dereference them
  740. if (!pvItem1 || !pvItem2 || !xItemSize1 || !xItemSize2) {
  741. return pdFALSE;
  742. }
  743. #endif /*__clang_analyzer__ */
  744. while (xExitLoop == pdFALSE) {
  745. portENTER_CRITICAL(&pxRingbuffer->mux);
  746. if (prvCheckItemAvail(pxRingbuffer) == pdTRUE) {
  747. //Item/data is available for retrieval
  748. BaseType_t xIsSplit = pdFALSE;
  749. if (pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) {
  750. //Read up to xMaxSize bytes from byte buffer
  751. *pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, NULL, xMaxSize, xItemSize1);
  752. } else {
  753. //Get (first) item from no-split/allow-split buffers
  754. *pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize1);
  755. }
  756. //If split buffer, check for split items
  757. if (pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG) {
  758. if (xIsSplit == pdTRUE) {
  759. *pvItem2 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize2);
  760. configASSERT(*pvItem2 < *pvItem1); //Check wrap around has occurred
  761. configASSERT(xIsSplit == pdFALSE); //Second part should not have wrapped flag
  762. } else {
  763. *pvItem2 = NULL;
  764. }
  765. }
  766. xReturn = pdTRUE;
  767. xExitLoop = pdTRUE;
  768. goto loop_end;
  769. } else if (xTicksToWait == (TickType_t) 0) {
  770. //No block time. Return immediately.
  771. xExitLoop = pdTRUE;
  772. goto loop_end;
  773. } else if (xEntryTimeSet == pdFALSE) {
  774. //This is our first block. Set entry time
  775. vTaskInternalSetTimeOutState(&xTimeOut);
  776. xEntryTimeSet = pdTRUE;
  777. }
  778. if (xTaskCheckForTimeOut(&xTimeOut, &xTicksToWait) == pdFALSE) {
  779. //Not timed out yet. Block the current task
  780. vTaskPlaceOnEventList(&pxRingbuffer->xTasksWaitingToReceive, xTicksToWait);
  781. portYIELD_WITHIN_API();
  782. } else {
  783. //We have timed out.
  784. xExitLoop = pdTRUE;
  785. }
  786. loop_end:
  787. portEXIT_CRITICAL(&pxRingbuffer->mux);
  788. }
  789. return xReturn;
  790. }
  791. static BaseType_t prvReceiveGenericFromISR(Ringbuffer_t *pxRingbuffer,
  792. void **pvItem1,
  793. void **pvItem2,
  794. size_t *xItemSize1,
  795. size_t *xItemSize2,
  796. size_t xMaxSize)
  797. {
  798. BaseType_t xReturn = pdFALSE;
  799. #ifdef __clang_analyzer__
  800. // Teach clang-tidy that if NULL pointers are provided, this function will never dereference them
  801. if (!pvItem1 || !pvItem2 || !xItemSize1 || !xItemSize2) {
  802. return pdFALSE;
  803. }
  804. #endif /*__clang_analyzer__ */
  805. portENTER_CRITICAL_ISR(&pxRingbuffer->mux);
  806. if (prvCheckItemAvail(pxRingbuffer) == pdTRUE) {
  807. BaseType_t xIsSplit = pdFALSE;
  808. if (pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) {
  809. //Read up to xMaxSize bytes from byte buffer
  810. *pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, NULL, xMaxSize, xItemSize1);
  811. } else {
  812. //Get (first) item from no-split/allow-split buffers
  813. *pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize1);
  814. }
  815. //If split buffer, check for split items
  816. if (pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG) {
  817. if (xIsSplit == pdTRUE) {
  818. *pvItem2 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize2);
  819. configASSERT(*pvItem2 < *pvItem1); //Check wrap around has occurred
  820. configASSERT(xIsSplit == pdFALSE); //Second part should not have wrapped flag
  821. } else {
  822. *pvItem2 = NULL;
  823. }
  824. }
  825. xReturn = pdTRUE;
  826. } else {
  827. xReturn = pdFALSE;
  828. }
  829. portEXIT_CRITICAL_ISR(&pxRingbuffer->mux);
  830. return xReturn;
  831. }
  832. // ------------------------------------------------ Public Functions ---------------------------------------------------
  833. RingbufHandle_t xRingbufferCreate(size_t xBufferSize, RingbufferType_t xBufferType)
  834. {
  835. configASSERT(xBufferSize > 0);
  836. configASSERT(xBufferType < RINGBUF_TYPE_MAX);
  837. //Allocate memory
  838. if (xBufferType != RINGBUF_TYPE_BYTEBUF) {
  839. xBufferSize = rbALIGN_SIZE(xBufferSize); //xBufferSize is rounded up for no-split/allow-split buffers
  840. }
  841. Ringbuffer_t *pxNewRingbuffer = calloc(1, sizeof(Ringbuffer_t));
  842. uint8_t *pucRingbufferStorage = malloc(xBufferSize);
  843. if (pxNewRingbuffer == NULL || pucRingbufferStorage == NULL) {
  844. goto err;
  845. }
  846. prvInitializeNewRingbuffer(xBufferSize, xBufferType, pxNewRingbuffer, pucRingbufferStorage);
  847. return (RingbufHandle_t)pxNewRingbuffer;
  848. err:
  849. //An error has occurred, Free memory and return NULL
  850. free(pxNewRingbuffer);
  851. free(pucRingbufferStorage);
  852. return NULL;
  853. }
  854. RingbufHandle_t xRingbufferCreateNoSplit(size_t xItemSize, size_t xItemNum)
  855. {
  856. return xRingbufferCreate((rbALIGN_SIZE(xItemSize) + rbHEADER_SIZE) * xItemNum, RINGBUF_TYPE_NOSPLIT);
  857. }
  858. RingbufHandle_t xRingbufferCreateStatic(size_t xBufferSize,
  859. RingbufferType_t xBufferType,
  860. uint8_t *pucRingbufferStorage,
  861. StaticRingbuffer_t *pxStaticRingbuffer)
  862. {
  863. //Check arguments
  864. configASSERT(xBufferSize > 0);
  865. configASSERT(xBufferType < RINGBUF_TYPE_MAX);
  866. configASSERT(pucRingbufferStorage != NULL && pxStaticRingbuffer != NULL);
  867. if (xBufferType != RINGBUF_TYPE_BYTEBUF) {
  868. //No-split/allow-split buffer sizes must be 32-bit aligned
  869. configASSERT(rbCHECK_ALIGNED(xBufferSize));
  870. }
  871. Ringbuffer_t *pxNewRingbuffer = (Ringbuffer_t *)pxStaticRingbuffer;
  872. prvInitializeNewRingbuffer(xBufferSize, xBufferType, pxNewRingbuffer, pucRingbufferStorage);
  873. pxNewRingbuffer->uxRingbufferFlags |= rbBUFFER_STATIC_FLAG;
  874. return (RingbufHandle_t)pxNewRingbuffer;
  875. }
  876. BaseType_t xRingbufferSendAcquire(RingbufHandle_t xRingbuffer, void **ppvItem, size_t xItemSize, TickType_t xTicksToWait)
  877. {
  878. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  879. //Check arguments
  880. configASSERT(pxRingbuffer);
  881. configASSERT(ppvItem != NULL);
  882. configASSERT((pxRingbuffer->uxRingbufferFlags & (rbBYTE_BUFFER_FLAG | rbALLOW_SPLIT_FLAG)) == 0); //Send acquire currently only supported in NoSplit buffers
  883. *ppvItem = NULL;
  884. if (xItemSize > pxRingbuffer->xMaxItemSize) {
  885. return pdFALSE; //Data will never ever fit in the queue.
  886. }
  887. if ((pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) && xItemSize == 0) {
  888. return pdTRUE; //Sending 0 bytes to byte buffer has no effect
  889. }
  890. return prvSendAcquireGeneric(pxRingbuffer, NULL, ppvItem, xItemSize, xTicksToWait);
  891. }
  892. BaseType_t xRingbufferSendComplete(RingbufHandle_t xRingbuffer, void *pvItem)
  893. {
  894. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  895. BaseType_t xNotifyQueueSet = pdFALSE;
  896. //Check arguments
  897. configASSERT(pxRingbuffer);
  898. configASSERT(pvItem != NULL);
  899. configASSERT((pxRingbuffer->uxRingbufferFlags & (rbBYTE_BUFFER_FLAG | rbALLOW_SPLIT_FLAG)) == 0);
  900. portENTER_CRITICAL(&pxRingbuffer->mux);
  901. prvSendItemDoneNoSplit(pxRingbuffer, pvItem);
  902. if (pxRingbuffer->xQueueSet) {
  903. //If ring buffer was added to a queue set, notify the queue set
  904. xNotifyQueueSet = pdTRUE;
  905. } else {
  906. //If a task was waiting for data to arrive on the ring buffer, unblock it immediately.
  907. if (listLIST_IS_EMPTY(&pxRingbuffer->xTasksWaitingToReceive) == pdFALSE) {
  908. if (xTaskRemoveFromEventList(&pxRingbuffer->xTasksWaitingToReceive) == pdTRUE) {
  909. //The unblocked task will preempt us. Trigger a yield here.
  910. portYIELD_WITHIN_API();
  911. }
  912. }
  913. }
  914. portEXIT_CRITICAL(&pxRingbuffer->mux);
  915. if (xNotifyQueueSet == pdTRUE) {
  916. xQueueSend((QueueHandle_t)pxRingbuffer->xQueueSet, (QueueSetMemberHandle_t *)&pxRingbuffer, 0);
  917. }
  918. return pdTRUE;
  919. }
  920. BaseType_t xRingbufferSend(RingbufHandle_t xRingbuffer,
  921. const void *pvItem,
  922. size_t xItemSize,
  923. TickType_t xTicksToWait)
  924. {
  925. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  926. //Check arguments
  927. configASSERT(pxRingbuffer);
  928. configASSERT(pvItem != NULL || xItemSize == 0);
  929. if (xItemSize > pxRingbuffer->xMaxItemSize) {
  930. return pdFALSE; //Data will never ever fit in the queue.
  931. }
  932. if ((pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) && xItemSize == 0) {
  933. return pdTRUE; //Sending 0 bytes to byte buffer has no effect
  934. }
  935. return prvSendAcquireGeneric(pxRingbuffer, pvItem, NULL, xItemSize, xTicksToWait);
  936. }
  937. BaseType_t xRingbufferSendFromISR(RingbufHandle_t xRingbuffer,
  938. const void *pvItem,
  939. size_t xItemSize,
  940. BaseType_t *pxHigherPriorityTaskWoken)
  941. {
  942. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  943. BaseType_t xNotifyQueueSet = pdFALSE;
  944. BaseType_t xReturn;
  945. //Check arguments
  946. configASSERT(pxRingbuffer);
  947. configASSERT(pvItem != NULL || xItemSize == 0);
  948. if (xItemSize > pxRingbuffer->xMaxItemSize) {
  949. return pdFALSE; //Data will never ever fit in the queue.
  950. }
  951. if ((pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) && xItemSize == 0) {
  952. return pdTRUE; //Sending 0 bytes to byte buffer has no effect
  953. }
  954. portENTER_CRITICAL_ISR(&pxRingbuffer->mux);
  955. if (pxRingbuffer->xCheckItemFits(xRingbuffer, xItemSize) == pdTRUE) {
  956. pxRingbuffer->vCopyItem(xRingbuffer, pvItem, xItemSize);
  957. if (pxRingbuffer->xQueueSet) {
  958. //If ring buffer was added to a queue set, notify the queue set
  959. xNotifyQueueSet = pdTRUE;
  960. } else {
  961. //If a task was waiting for data to arrive on the ring buffer, unblock it immediately.
  962. if (listLIST_IS_EMPTY(&pxRingbuffer->xTasksWaitingToReceive) == pdFALSE) {
  963. if (xTaskRemoveFromEventList(&pxRingbuffer->xTasksWaitingToReceive) == pdTRUE) {
  964. //The unblocked task will preempt us. Record that a context switch is required.
  965. if (pxHigherPriorityTaskWoken != NULL) {
  966. *pxHigherPriorityTaskWoken = pdTRUE;
  967. }
  968. }
  969. }
  970. }
  971. xReturn = pdTRUE;
  972. } else {
  973. xReturn = pdFALSE;
  974. }
  975. portEXIT_CRITICAL_ISR(&pxRingbuffer->mux);
  976. //Defer notifying the queue set until we are outside the critical section.
  977. if (xNotifyQueueSet == pdTRUE) {
  978. xQueueSendFromISR((QueueHandle_t)pxRingbuffer->xQueueSet, (QueueSetMemberHandle_t *)&pxRingbuffer, pxHigherPriorityTaskWoken);
  979. }
  980. return xReturn;
  981. }
  982. void *xRingbufferReceive(RingbufHandle_t xRingbuffer, size_t *pxItemSize, TickType_t xTicksToWait)
  983. {
  984. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  985. //Check arguments
  986. configASSERT(pxRingbuffer && pxItemSize);
  987. //Attempt to retrieve an item
  988. void *pvTempItem;
  989. if (prvReceiveGeneric(pxRingbuffer, &pvTempItem, NULL, pxItemSize, NULL, 0, xTicksToWait) == pdTRUE) {
  990. return pvTempItem;
  991. } else {
  992. return NULL;
  993. }
  994. }
  995. void *xRingbufferReceiveFromISR(RingbufHandle_t xRingbuffer, size_t *pxItemSize)
  996. {
  997. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  998. //Check arguments
  999. configASSERT(pxRingbuffer && pxItemSize);
  1000. //Attempt to retrieve an item
  1001. void *pvTempItem;
  1002. if (prvReceiveGenericFromISR(pxRingbuffer, &pvTempItem, NULL, pxItemSize, NULL, 0) == pdTRUE) {
  1003. return pvTempItem;
  1004. } else {
  1005. return NULL;
  1006. }
  1007. }
  1008. BaseType_t xRingbufferReceiveSplit(RingbufHandle_t xRingbuffer,
  1009. void **ppvHeadItem,
  1010. void **ppvTailItem,
  1011. size_t *pxHeadItemSize,
  1012. size_t *pxTailItemSize,
  1013. TickType_t xTicksToWait)
  1014. {
  1015. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1016. //Check arguments
  1017. configASSERT(pxRingbuffer && ppvHeadItem && ppvTailItem && pxHeadItemSize && pxTailItemSize);
  1018. configASSERT(pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG);
  1019. return prvReceiveGeneric(pxRingbuffer, ppvHeadItem, ppvTailItem, pxHeadItemSize, pxTailItemSize, 0, xTicksToWait);
  1020. }
  1021. BaseType_t xRingbufferReceiveSplitFromISR(RingbufHandle_t xRingbuffer,
  1022. void **ppvHeadItem,
  1023. void **ppvTailItem,
  1024. size_t *pxHeadItemSize,
  1025. size_t *pxTailItemSize)
  1026. {
  1027. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1028. //Check arguments
  1029. configASSERT(pxRingbuffer && ppvHeadItem && ppvTailItem && pxHeadItemSize && pxTailItemSize);
  1030. configASSERT(pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG);
  1031. return prvReceiveGenericFromISR(pxRingbuffer, ppvHeadItem, ppvTailItem, pxHeadItemSize, pxTailItemSize, 0);
  1032. }
  1033. void *xRingbufferReceiveUpTo(RingbufHandle_t xRingbuffer,
  1034. size_t *pxItemSize,
  1035. TickType_t xTicksToWait,
  1036. size_t xMaxSize)
  1037. {
  1038. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1039. //Check arguments
  1040. configASSERT(pxRingbuffer && pxItemSize);
  1041. configASSERT(pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG); //This function should only be called for byte buffers
  1042. if (xMaxSize == 0) {
  1043. return NULL;
  1044. }
  1045. //Attempt to retrieve up to xMaxSize bytes
  1046. void *pvTempItem;
  1047. if (prvReceiveGeneric(pxRingbuffer, &pvTempItem, NULL, pxItemSize, NULL, xMaxSize, xTicksToWait) == pdTRUE) {
  1048. return pvTempItem;
  1049. } else {
  1050. return NULL;
  1051. }
  1052. }
  1053. void *xRingbufferReceiveUpToFromISR(RingbufHandle_t xRingbuffer, size_t *pxItemSize, size_t xMaxSize)
  1054. {
  1055. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1056. //Check arguments
  1057. configASSERT(pxRingbuffer && pxItemSize);
  1058. configASSERT(pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG); //This function should only be called for byte buffers
  1059. if (xMaxSize == 0) {
  1060. return NULL;
  1061. }
  1062. //Attempt to retrieve up to xMaxSize bytes
  1063. void *pvTempItem;
  1064. if (prvReceiveGenericFromISR(pxRingbuffer, &pvTempItem, NULL, pxItemSize, NULL, xMaxSize) == pdTRUE) {
  1065. return pvTempItem;
  1066. } else {
  1067. return NULL;
  1068. }
  1069. }
  1070. void vRingbufferReturnItem(RingbufHandle_t xRingbuffer, void *pvItem)
  1071. {
  1072. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1073. configASSERT(pxRingbuffer);
  1074. configASSERT(pvItem != NULL);
  1075. portENTER_CRITICAL(&pxRingbuffer->mux);
  1076. pxRingbuffer->vReturnItem(pxRingbuffer, (uint8_t *)pvItem);
  1077. //If a task was waiting for space to send, unblock it immediately.
  1078. if (listLIST_IS_EMPTY(&pxRingbuffer->xTasksWaitingToSend) == pdFALSE) {
  1079. if (xTaskRemoveFromEventList(&pxRingbuffer->xTasksWaitingToSend) == pdTRUE) {
  1080. //The unblocked task will preempt us. Trigger a yield here.
  1081. portYIELD_WITHIN_API();
  1082. }
  1083. }
  1084. portEXIT_CRITICAL(&pxRingbuffer->mux);
  1085. }
  1086. void vRingbufferReturnItemFromISR(RingbufHandle_t xRingbuffer, void *pvItem, BaseType_t *pxHigherPriorityTaskWoken)
  1087. {
  1088. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1089. configASSERT(pxRingbuffer);
  1090. configASSERT(pvItem != NULL);
  1091. portENTER_CRITICAL_ISR(&pxRingbuffer->mux);
  1092. pxRingbuffer->vReturnItem(pxRingbuffer, (uint8_t *)pvItem);
  1093. //If a task was waiting for space to send, unblock it immediately.
  1094. if (listLIST_IS_EMPTY(&pxRingbuffer->xTasksWaitingToSend) == pdFALSE) {
  1095. if (xTaskRemoveFromEventList(&pxRingbuffer->xTasksWaitingToSend) == pdTRUE) {
  1096. //The unblocked task will preempt us. Record that a context switch is required.
  1097. if (pxHigherPriorityTaskWoken != NULL) {
  1098. *pxHigherPriorityTaskWoken = pdTRUE;
  1099. }
  1100. }
  1101. }
  1102. portEXIT_CRITICAL_ISR(&pxRingbuffer->mux);
  1103. }
  1104. void vRingbufferDelete(RingbufHandle_t xRingbuffer)
  1105. {
  1106. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1107. configASSERT(pxRingbuffer);
  1108. //Ring buffer was not statically allocated. Free its memory.
  1109. if ( !( pxRingbuffer->uxRingbufferFlags & rbBUFFER_STATIC_FLAG ) ) {
  1110. free(pxRingbuffer->pucHead);
  1111. free(pxRingbuffer);
  1112. }
  1113. }
  1114. size_t xRingbufferGetMaxItemSize(RingbufHandle_t xRingbuffer)
  1115. {
  1116. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1117. configASSERT(pxRingbuffer);
  1118. return pxRingbuffer->xMaxItemSize;
  1119. }
  1120. size_t xRingbufferGetCurFreeSize(RingbufHandle_t xRingbuffer)
  1121. {
  1122. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1123. configASSERT(pxRingbuffer);
  1124. size_t xFreeSize;
  1125. portENTER_CRITICAL(&pxRingbuffer->mux);
  1126. xFreeSize = pxRingbuffer->xGetCurMaxSize(pxRingbuffer);
  1127. portEXIT_CRITICAL(&pxRingbuffer->mux);
  1128. return xFreeSize;
  1129. }
  1130. BaseType_t xRingbufferAddToQueueSetRead(RingbufHandle_t xRingbuffer, QueueSetHandle_t xQueueSet)
  1131. {
  1132. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1133. BaseType_t xReturn;
  1134. configASSERT(pxRingbuffer && xQueueSet);
  1135. portENTER_CRITICAL(&pxRingbuffer->mux);
  1136. if (pxRingbuffer->xQueueSet != NULL || prvCheckItemAvail(pxRingbuffer) == pdTRUE) {
  1137. /*
  1138. - Cannot add ring buffer to more than one queue set
  1139. - It is dangerous to add a ring buffer to a queue set if the ring buffer currently has data to be read.
  1140. */
  1141. xReturn = pdFALSE;
  1142. } else {
  1143. //Add ring buffer to queue set
  1144. pxRingbuffer->xQueueSet = xQueueSet;
  1145. xReturn = pdTRUE;
  1146. }
  1147. portEXIT_CRITICAL(&pxRingbuffer->mux);
  1148. return xReturn;
  1149. }
  1150. BaseType_t xRingbufferRemoveFromQueueSetRead(RingbufHandle_t xRingbuffer, QueueSetHandle_t xQueueSet)
  1151. {
  1152. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1153. BaseType_t xReturn;
  1154. configASSERT(pxRingbuffer && xQueueSet);
  1155. portENTER_CRITICAL(&pxRingbuffer->mux);
  1156. if (pxRingbuffer->xQueueSet != xQueueSet || prvCheckItemAvail(pxRingbuffer) == pdTRUE) {
  1157. /*
  1158. - Ring buffer was never added to this queue set
  1159. - It is dangerous to remove a ring buffer from a queue set if the ring buffer currently has data to be read.
  1160. */
  1161. xReturn = pdFALSE;
  1162. } else {
  1163. //Remove ring buffer from queue set
  1164. pxRingbuffer->xQueueSet = NULL;
  1165. xReturn = pdTRUE;
  1166. }
  1167. portEXIT_CRITICAL(&pxRingbuffer->mux);
  1168. return xReturn;
  1169. }
  1170. void vRingbufferGetInfo(RingbufHandle_t xRingbuffer,
  1171. UBaseType_t *uxFree,
  1172. UBaseType_t *uxRead,
  1173. UBaseType_t *uxWrite,
  1174. UBaseType_t *uxAcquire,
  1175. UBaseType_t *uxItemsWaiting)
  1176. {
  1177. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1178. configASSERT(pxRingbuffer);
  1179. portENTER_CRITICAL(&pxRingbuffer->mux);
  1180. if (uxFree != NULL) {
  1181. *uxFree = (UBaseType_t)(pxRingbuffer->pucFree - pxRingbuffer->pucHead);
  1182. }
  1183. if (uxRead != NULL) {
  1184. *uxRead = (UBaseType_t)(pxRingbuffer->pucRead - pxRingbuffer->pucHead);
  1185. }
  1186. if (uxWrite != NULL) {
  1187. *uxWrite = (UBaseType_t)(pxRingbuffer->pucWrite - pxRingbuffer->pucHead);
  1188. }
  1189. if (uxAcquire != NULL) {
  1190. *uxAcquire = (UBaseType_t)(pxRingbuffer->pucAcquire - pxRingbuffer->pucHead);
  1191. }
  1192. if (uxItemsWaiting != NULL) {
  1193. *uxItemsWaiting = (UBaseType_t)(pxRingbuffer->xItemsWaiting);
  1194. }
  1195. portEXIT_CRITICAL(&pxRingbuffer->mux);
  1196. }
  1197. void xRingbufferPrintInfo(RingbufHandle_t xRingbuffer)
  1198. {
  1199. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1200. configASSERT(pxRingbuffer);
  1201. printf("Rb size:%" PRId32 "\tfree: %" PRId32 "\trptr: %" PRId32 "\tfreeptr: %" PRId32 "\twptr: %" PRId32 ", aptr: %" PRId32 "\n",
  1202. (int32_t)pxRingbuffer->xSize, (int32_t)prvGetFreeSize(pxRingbuffer),
  1203. (int32_t)(pxRingbuffer->pucRead - pxRingbuffer->pucHead),
  1204. (int32_t)(pxRingbuffer->pucFree - pxRingbuffer->pucHead),
  1205. (int32_t)(pxRingbuffer->pucWrite - pxRingbuffer->pucHead),
  1206. (int32_t)(pxRingbuffer->pucAcquire - pxRingbuffer->pucHead));
  1207. }
  1208. BaseType_t xRingbufferGetStaticBuffer(RingbufHandle_t xRingbuffer, uint8_t **ppucRingbufferStorage, StaticRingbuffer_t **ppxStaticRingbuffer)
  1209. {
  1210. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1211. BaseType_t xReturn;
  1212. configASSERT(pxRingbuffer && ppucRingbufferStorage && ppxStaticRingbuffer);
  1213. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_STATIC_FLAG) {
  1214. *ppucRingbufferStorage = pxRingbuffer->pucHead;
  1215. *ppxStaticRingbuffer = (StaticRingbuffer_t *)pxRingbuffer;
  1216. xReturn = pdTRUE;
  1217. } else {
  1218. xReturn = pdFALSE;
  1219. }
  1220. return xReturn;
  1221. }
  1222. RingbufHandle_t xRingbufferCreateWithCaps(size_t xBufferSize, RingbufferType_t xBufferType, UBaseType_t uxMemoryCaps)
  1223. {
  1224. RingbufHandle_t xRingbuffer;
  1225. StaticRingbuffer_t *pxStaticRingbuffer;
  1226. uint8_t *pucRingbufferStorage;
  1227. pxStaticRingbuffer = heap_caps_malloc(sizeof(StaticRingbuffer_t), (uint32_t)uxMemoryCaps);
  1228. pucRingbufferStorage = heap_caps_malloc(xBufferSize, (uint32_t)uxMemoryCaps);
  1229. if (pxStaticRingbuffer == NULL || pucRingbufferStorage == NULL) {
  1230. goto err;
  1231. }
  1232. // Create the ring buffer using static creation API
  1233. xRingbuffer = xRingbufferCreateStatic(xBufferSize, xBufferType, pucRingbufferStorage, pxStaticRingbuffer);
  1234. if (xRingbuffer == NULL) {
  1235. goto err;
  1236. }
  1237. return xRingbuffer;
  1238. err:
  1239. heap_caps_free(pxStaticRingbuffer);
  1240. heap_caps_free(pucRingbufferStorage);
  1241. return NULL;
  1242. }
  1243. void vRingbufferDeleteWithCaps(RingbufHandle_t xRingbuffer)
  1244. {
  1245. BaseType_t xResult;
  1246. StaticRingbuffer_t *pxStaticRingbuffer = NULL;
  1247. uint8_t *pucRingbufferStorage = NULL;
  1248. // Retrieve the buffers used to create the ring buffer before deleting it
  1249. xResult = xRingbufferGetStaticBuffer(xRingbuffer, &pucRingbufferStorage, &pxStaticRingbuffer);
  1250. configASSERT(xResult == pdTRUE);
  1251. // Delete the ring buffer
  1252. vRingbufferDelete(xRingbuffer);
  1253. // Free the memory buffers
  1254. heap_caps_free(pxStaticRingbuffer);
  1255. heap_caps_free(pucRingbufferStorage);
  1256. }