ringbuf.c 63 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397
  1. /*
  2. * SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <stdlib.h>
  7. #include <string.h>
  8. #include "freertos/FreeRTOS.h"
  9. #include "freertos/task.h"
  10. #include "freertos/semphr.h"
  11. #include "freertos/ringbuf.h"
  12. //32-bit alignment macros
  13. #define rbALIGN_MASK (0x03)
  14. #define rbALIGN_SIZE( xSize ) ( ( xSize + rbALIGN_MASK ) & ~rbALIGN_MASK )
  15. #define rbCHECK_ALIGNED( pvPtr ) ( ( ( UBaseType_t ) ( pvPtr ) & rbALIGN_MASK ) == 0 )
  16. //Ring buffer flags
  17. #define rbALLOW_SPLIT_FLAG ( ( UBaseType_t ) 1 ) //The ring buffer allows items to be split
  18. #define rbBYTE_BUFFER_FLAG ( ( UBaseType_t ) 2 ) //The ring buffer is a byte buffer
  19. #define rbBUFFER_FULL_FLAG ( ( UBaseType_t ) 4 ) //The ring buffer is currently full (write pointer == free pointer)
  20. #define rbBUFFER_STATIC_FLAG ( ( UBaseType_t ) 8 ) //The ring buffer is statically allocated
  21. //Item flags
  22. #define rbITEM_FREE_FLAG ( ( UBaseType_t ) 1 ) //Item has been retrieved and returned by application, free to overwrite
  23. #define rbITEM_DUMMY_DATA_FLAG ( ( UBaseType_t ) 2 ) //Data from here to end of the ring buffer is dummy data. Restart reading at start of head of the buffer
  24. #define rbITEM_SPLIT_FLAG ( ( UBaseType_t ) 4 ) //Valid for RINGBUF_TYPE_ALLOWSPLIT, indicating that rest of the data is wrapped around
  25. #define rbITEM_WRITTEN_FLAG ( ( UBaseType_t ) 8 ) //Item has been written to by the application, thus it is free to be read
  26. //Static allocation related
  27. #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
  28. #define rbGET_TX_SEM_HANDLE( pxRingbuffer ) ( (SemaphoreHandle_t) &(pxRingbuffer->xTransSemStatic) )
  29. #define rbGET_RX_SEM_HANDLE( pxRingbuffer ) ( (SemaphoreHandle_t) &(pxRingbuffer->xRecvSemStatic) )
  30. #else
  31. #define rbGET_TX_SEM_HANDLE( pxRingbuffer ) ( pxRingbuffer->xTransSemHandle )
  32. #define rbGET_RX_SEM_HANDLE( pxRingbuffer ) ( pxRingbuffer->xRecvSemHandle )
  33. #endif
  34. typedef struct {
  35. //This size of this structure must be 32-bit aligned
  36. size_t xItemLen;
  37. UBaseType_t uxItemFlags;
  38. } ItemHeader_t;
  39. #define rbHEADER_SIZE sizeof(ItemHeader_t)
  40. typedef struct RingbufferDefinition Ringbuffer_t;
  41. typedef BaseType_t (*CheckItemFitsFunction_t)(Ringbuffer_t *pxRingbuffer, size_t xItemSize);
  42. typedef void (*CopyItemFunction_t)(Ringbuffer_t *pxRingbuffer, const uint8_t *pcItem, size_t xItemSize);
  43. typedef BaseType_t (*CheckItemAvailFunction_t) (Ringbuffer_t *pxRingbuffer);
  44. typedef void *(*GetItemFunction_t)(Ringbuffer_t *pxRingbuffer, BaseType_t *pxIsSplit, size_t xMaxSize, size_t *pxItemSize);
  45. typedef void (*ReturnItemFunction_t)(Ringbuffer_t *pxRingbuffer, uint8_t *pvItem);
  46. typedef size_t (*GetCurMaxSizeFunction_t)(Ringbuffer_t *pxRingbuffer);
  47. typedef struct RingbufferDefinition {
  48. size_t xSize; //Size of the data storage
  49. size_t xMaxItemSize; //Maximum item size
  50. UBaseType_t uxRingbufferFlags; //Flags to indicate the type and status of ring buffer
  51. CheckItemFitsFunction_t xCheckItemFits; //Function to check if item can currently fit in ring buffer
  52. CopyItemFunction_t vCopyItem; //Function to copy item to ring buffer
  53. GetItemFunction_t pvGetItem; //Function to get item from ring buffer
  54. ReturnItemFunction_t vReturnItem; //Function to return item to ring buffer
  55. GetCurMaxSizeFunction_t xGetCurMaxSize; //Function to get current free size
  56. uint8_t *pucAcquire; //Acquire Pointer. Points to where the next item should be acquired.
  57. uint8_t *pucWrite; //Write Pointer. Points to where the next item should be written
  58. uint8_t *pucRead; //Read Pointer. Points to where the next item should be read from
  59. uint8_t *pucFree; //Free Pointer. Points to the last item that has yet to be returned to the ring buffer
  60. uint8_t *pucHead; //Pointer to the start of the ring buffer storage area
  61. uint8_t *pucTail; //Pointer to the end of the ring buffer storage area
  62. BaseType_t xItemsWaiting; //Number of items/bytes(for byte buffers) currently in ring buffer that have not yet been read
  63. /*
  64. * TransSem: Binary semaphore used to indicate to a blocked transmitting tasks
  65. * that more free space has become available or that the block has
  66. * timed out.
  67. *
  68. * RecvSem: Binary semaphore used to indicate to a blocked receiving task that
  69. * new data/item has been written to the ring buffer.
  70. *
  71. * Note - When static allocation is enabled, the two semaphores are always
  72. * statically stored in the ring buffer's control structure
  73. * regardless of whether the ring buffer is allocated dynamically or
  74. * statically. When static allocation is disabled, the two semaphores
  75. * are allocated dynamically and their handles stored instead, thus
  76. * making the ring buffer's control structure slightly smaller when
  77. * static allocation is disabled.
  78. */
  79. #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
  80. StaticSemaphore_t xTransSemStatic;
  81. StaticSemaphore_t xRecvSemStatic;
  82. #else
  83. SemaphoreHandle_t xTransSemHandle;
  84. SemaphoreHandle_t xRecvSemHandle;
  85. #endif
  86. portMUX_TYPE mux; //Spinlock required for SMP
  87. } Ringbuffer_t;
  88. #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
  89. #if __GNUC_PREREQ(4, 6)
  90. _Static_assert(sizeof(StaticRingbuffer_t) == sizeof(Ringbuffer_t), "StaticRingbuffer_t != Ringbuffer_t");
  91. #endif
  92. #endif
  93. /*
  94. Remark: A counting semaphore for items_buffered_sem would be more logical, but counting semaphores in
  95. FreeRTOS need a maximum count, and allocate more memory the larger the maximum count is. Here, we
  96. would need to set the maximum to the maximum amount of times a null-byte unit first in the buffer,
  97. which is quite high and so would waste a fair amount of memory.
  98. */
  99. /* --------------------------- Static Declarations -------------------------- */
  100. /*
  101. * WARNING: All of the following static functions (except generic functions)
  102. * ARE NOT THREAD SAFE. Therefore they should only be called within a critical
  103. * section (using spin locks)
  104. */
  105. //Initialize a ring buffer after space has been allocated for it
  106. static void prvInitializeNewRingbuffer(size_t xBufferSize,
  107. RingbufferType_t xBufferType,
  108. Ringbuffer_t *pxNewRingbuffer,
  109. uint8_t *pucRingbufferStorage);
  110. //Calculate current amount of free space (in bytes) in the ring buffer
  111. static size_t prvGetFreeSize(Ringbuffer_t *pxRingbuffer);
  112. //Checks if an item/data is currently available for retrieval
  113. static BaseType_t prvCheckItemAvail(Ringbuffer_t *pxRingbuffer);
  114. //Checks if an item will currently fit in a no-split/allow-split ring buffer
  115. static BaseType_t prvCheckItemFitsDefault( Ringbuffer_t *pxRingbuffer, size_t xItemSize);
  116. //Checks if an item will currently fit in a byte buffer
  117. static BaseType_t prvCheckItemFitsByteBuffer( Ringbuffer_t *pxRingbuffer, size_t xItemSize);
  118. //Copies an item to a no-split ring buffer. Only call this function after calling prvCheckItemFitsDefault()
  119. static void prvCopyItemNoSplit(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize);
  120. //Copies an item to a allow-split ring buffer. Only call this function after calling prvCheckItemFitsDefault()
  121. static void prvCopyItemAllowSplit(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize);
  122. //Copies an item to a byte buffer. Only call this function after calling prvCheckItemFitsByteBuffer()
  123. static void prvCopyItemByteBuf(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize);
  124. //Retrieve item from no-split/allow-split ring buffer. *pxIsSplit is set to pdTRUE if the retrieved item is split
  125. static void *prvGetItemDefault(Ringbuffer_t *pxRingbuffer,
  126. BaseType_t *pxIsSplit,
  127. size_t xUnusedParam,
  128. size_t *pxItemSize);
  129. //Retrieve data from byte buffer. If xMaxSize is 0, all continuous data is retrieved
  130. static void *prvGetItemByteBuf(Ringbuffer_t *pxRingbuffer,
  131. BaseType_t *pxUnusedParam,
  132. size_t xMaxSize,
  133. size_t *pxItemSize);
  134. //Return an item to a split/no-split ring buffer
  135. static void prvReturnItemDefault(Ringbuffer_t *pxRingbuffer, uint8_t *pucItem);
  136. //Return data to a byte buffer
  137. static void prvReturnItemByteBuf(Ringbuffer_t *pxRingbuffer, uint8_t *pucItem);
  138. //Get the maximum size an item that can currently have if sent to a no-split ring buffer
  139. static size_t prvGetCurMaxSizeNoSplit(Ringbuffer_t *pxRingbuffer);
  140. //Get the maximum size an item that can currently have if sent to a allow-split ring buffer
  141. static size_t prvGetCurMaxSizeAllowSplit(Ringbuffer_t *pxRingbuffer);
  142. //Get the maximum size an item that can currently have if sent to a byte buffer
  143. static size_t prvGetCurMaxSizeByteBuf(Ringbuffer_t *pxRingbuffer);
  144. /**
  145. * Generic function used to retrieve an item/data from ring buffers. If called on
  146. * an allow-split buffer, and pvItem2 and xItemSize2 are not NULL, both parts of
  147. * a split item will be retrieved. xMaxSize will only take effect if called on
  148. * byte buffers.
  149. */
  150. static BaseType_t prvReceiveGeneric(Ringbuffer_t *pxRingbuffer,
  151. void **pvItem1,
  152. void **pvItem2,
  153. size_t *xItemSize1,
  154. size_t *xItemSize2,
  155. size_t xMaxSize,
  156. TickType_t xTicksToWait);
  157. //Generic function used to retrieve an item/data from ring buffers in an ISR
  158. static BaseType_t prvReceiveGenericFromISR(Ringbuffer_t *pxRingbuffer,
  159. void **pvItem1,
  160. void **pvItem2,
  161. size_t *xItemSize1,
  162. size_t *xItemSize2,
  163. size_t xMaxSize);
  164. /* --------------------------- Static Definitions --------------------------- */
  165. static void prvInitializeNewRingbuffer(size_t xBufferSize,
  166. RingbufferType_t xBufferType,
  167. Ringbuffer_t *pxNewRingbuffer,
  168. uint8_t *pucRingbufferStorage)
  169. {
  170. //Initialize values
  171. pxNewRingbuffer->xSize = xBufferSize;
  172. pxNewRingbuffer->pucHead = pucRingbufferStorage;
  173. pxNewRingbuffer->pucTail = pucRingbufferStorage + xBufferSize;
  174. pxNewRingbuffer->pucFree = pucRingbufferStorage;
  175. pxNewRingbuffer->pucRead = pucRingbufferStorage;
  176. pxNewRingbuffer->pucWrite = pucRingbufferStorage;
  177. pxNewRingbuffer->pucAcquire = pucRingbufferStorage;
  178. pxNewRingbuffer->xItemsWaiting = 0;
  179. pxNewRingbuffer->uxRingbufferFlags = 0;
  180. //Initialize type dependent values and function pointers
  181. if (xBufferType == RINGBUF_TYPE_NOSPLIT) {
  182. pxNewRingbuffer->xCheckItemFits = prvCheckItemFitsDefault;
  183. pxNewRingbuffer->vCopyItem = prvCopyItemNoSplit;
  184. pxNewRingbuffer->pvGetItem = prvGetItemDefault;
  185. pxNewRingbuffer->vReturnItem = prvReturnItemDefault;
  186. /*
  187. * Worst case scenario is when the read/write/acquire/free pointers are all
  188. * pointing to the halfway point of the buffer.
  189. */
  190. pxNewRingbuffer->xMaxItemSize = rbALIGN_SIZE(pxNewRingbuffer->xSize / 2) - rbHEADER_SIZE;
  191. pxNewRingbuffer->xGetCurMaxSize = prvGetCurMaxSizeNoSplit;
  192. } else if (xBufferType == RINGBUF_TYPE_ALLOWSPLIT) {
  193. pxNewRingbuffer->uxRingbufferFlags |= rbALLOW_SPLIT_FLAG;
  194. pxNewRingbuffer->xCheckItemFits = prvCheckItemFitsDefault;
  195. pxNewRingbuffer->vCopyItem = prvCopyItemAllowSplit;
  196. pxNewRingbuffer->pvGetItem = prvGetItemDefault;
  197. pxNewRingbuffer->vReturnItem = prvReturnItemDefault;
  198. //Worst case an item is split into two, incurring two headers of overhead
  199. pxNewRingbuffer->xMaxItemSize = pxNewRingbuffer->xSize - (sizeof(ItemHeader_t) * 2);
  200. pxNewRingbuffer->xGetCurMaxSize = prvGetCurMaxSizeAllowSplit;
  201. } else { //Byte Buffer
  202. pxNewRingbuffer->uxRingbufferFlags |= rbBYTE_BUFFER_FLAG;
  203. pxNewRingbuffer->xCheckItemFits = prvCheckItemFitsByteBuffer;
  204. pxNewRingbuffer->vCopyItem = prvCopyItemByteBuf;
  205. pxNewRingbuffer->pvGetItem = prvGetItemByteBuf;
  206. pxNewRingbuffer->vReturnItem = prvReturnItemByteBuf;
  207. //Byte buffers do not incur any overhead
  208. pxNewRingbuffer->xMaxItemSize = pxNewRingbuffer->xSize;
  209. pxNewRingbuffer->xGetCurMaxSize = prvGetCurMaxSizeByteBuf;
  210. }
  211. xSemaphoreGive(rbGET_TX_SEM_HANDLE(pxNewRingbuffer));
  212. vPortCPUInitializeMutex(&pxNewRingbuffer->mux);
  213. }
  214. static size_t prvGetFreeSize(Ringbuffer_t *pxRingbuffer)
  215. {
  216. size_t xReturn;
  217. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
  218. xReturn = 0;
  219. } else {
  220. BaseType_t xFreeSize = pxRingbuffer->pucFree - pxRingbuffer->pucAcquire;
  221. //Check if xFreeSize has underflowed
  222. if (xFreeSize <= 0) {
  223. xFreeSize += pxRingbuffer->xSize;
  224. }
  225. xReturn = xFreeSize;
  226. }
  227. configASSERT(xReturn <= pxRingbuffer->xSize);
  228. return xReturn;
  229. }
  230. static BaseType_t prvCheckItemFitsDefault( Ringbuffer_t *pxRingbuffer, size_t xItemSize)
  231. {
  232. //Check arguments and buffer state
  233. configASSERT(rbCHECK_ALIGNED(pxRingbuffer->pucAcquire)); //pucAcquire is always aligned in no-split/allow-split ring buffers
  234. configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check write pointer is within bounds
  235. size_t xTotalItemSize = rbALIGN_SIZE(xItemSize) + rbHEADER_SIZE; //Rounded up aligned item size with header
  236. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
  237. //Buffer is either complete empty or completely full
  238. return (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) ? pdFALSE : pdTRUE;
  239. }
  240. if (pxRingbuffer->pucFree > pxRingbuffer->pucAcquire) {
  241. //Free space does not wrap around
  242. return (xTotalItemSize <= pxRingbuffer->pucFree - pxRingbuffer->pucAcquire) ? pdTRUE : pdFALSE;
  243. }
  244. //Free space wraps around
  245. if (xTotalItemSize <= pxRingbuffer->pucTail - pxRingbuffer->pucAcquire) {
  246. return pdTRUE; //Item fits without wrapping around
  247. }
  248. //Check if item fits by wrapping
  249. if (pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG) {
  250. //Allow split wrapping incurs an extra header
  251. return (xTotalItemSize + rbHEADER_SIZE <= pxRingbuffer->xSize - (pxRingbuffer->pucAcquire - pxRingbuffer->pucFree)) ? pdTRUE : pdFALSE;
  252. } else {
  253. return (xTotalItemSize <= pxRingbuffer->pucFree - pxRingbuffer->pucHead) ? pdTRUE : pdFALSE;
  254. }
  255. }
  256. static BaseType_t prvCheckItemFitsByteBuffer( Ringbuffer_t *pxRingbuffer, size_t xItemSize)
  257. {
  258. //Check arguments and buffer state
  259. configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check acquire pointer is within bounds
  260. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
  261. //Buffer is either complete empty or completely full
  262. return (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) ? pdFALSE : pdTRUE;
  263. }
  264. if (pxRingbuffer->pucFree > pxRingbuffer->pucAcquire) {
  265. //Free space does not wrap around
  266. return (xItemSize <= pxRingbuffer->pucFree - pxRingbuffer->pucAcquire) ? pdTRUE : pdFALSE;
  267. }
  268. //Free space wraps around
  269. return (xItemSize <= pxRingbuffer->xSize - (pxRingbuffer->pucAcquire - pxRingbuffer->pucFree)) ? pdTRUE : pdFALSE;
  270. }
  271. static uint8_t* prvAcquireItemNoSplit(Ringbuffer_t *pxRingbuffer, size_t xItemSize)
  272. {
  273. //Check arguments and buffer state
  274. size_t xAlignedItemSize = rbALIGN_SIZE(xItemSize); //Rounded up aligned item size
  275. size_t xRemLen = pxRingbuffer->pucTail - pxRingbuffer->pucAcquire; //Length from pucAcquire until end of buffer
  276. configASSERT(rbCHECK_ALIGNED(pxRingbuffer->pucAcquire)); //pucAcquire is always aligned in no-split ring buffers
  277. configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check write pointer is within bounds
  278. configASSERT(xRemLen >= rbHEADER_SIZE); //Remaining length must be able to at least fit an item header
  279. //If remaining length can't fit item, set as dummy data and wrap around
  280. if (xRemLen < xAlignedItemSize + rbHEADER_SIZE) {
  281. ItemHeader_t *pxDummy = (ItemHeader_t *)pxRingbuffer->pucAcquire;
  282. pxDummy->uxItemFlags = rbITEM_DUMMY_DATA_FLAG; //Set remaining length as dummy data
  283. pxDummy->xItemLen = 0; //Dummy data should have no length
  284. pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Reset acquire pointer to wrap around
  285. }
  286. //Item should be guaranteed to fit at this point. Set item header and copy data
  287. ItemHeader_t *pxHeader = (ItemHeader_t *)pxRingbuffer->pucAcquire;
  288. pxHeader->xItemLen = xItemSize;
  289. pxHeader->uxItemFlags = 0;
  290. //hold the buffer address without touching pucWrite
  291. uint8_t* item_address = pxRingbuffer->pucAcquire + rbHEADER_SIZE;
  292. pxRingbuffer->pucAcquire += rbHEADER_SIZE + xAlignedItemSize; //Advance pucAcquire past header and the item to next aligned address
  293. //After the allocation, add some padding after the buffer and correct the flags
  294. //If current remaining length can't fit a header, wrap around write pointer
  295. if (pxRingbuffer->pucTail - pxRingbuffer->pucAcquire < rbHEADER_SIZE) {
  296. pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Wrap around pucAcquire
  297. }
  298. //Check if buffer is full
  299. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
  300. //Mark the buffer as full to distinguish with an empty buffer
  301. pxRingbuffer->uxRingbufferFlags |= rbBUFFER_FULL_FLAG;
  302. }
  303. return item_address;
  304. }
  305. static void prvSendItemDoneNoSplit(Ringbuffer_t *pxRingbuffer, uint8_t* pucItem)
  306. {
  307. //Check arguments and buffer state
  308. configASSERT(rbCHECK_ALIGNED(pucItem));
  309. configASSERT(pucItem >= pxRingbuffer->pucHead);
  310. configASSERT(pucItem <= pxRingbuffer->pucTail); //Inclusive of pucTail in the case of zero length item at the very end
  311. //Get and check header of the item
  312. ItemHeader_t *pxCurHeader = (ItemHeader_t *)(pucItem - rbHEADER_SIZE);
  313. configASSERT(pxCurHeader->xItemLen <= pxRingbuffer->xMaxItemSize);
  314. configASSERT((pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) == 0); //Dummy items should never have been written
  315. configASSERT((pxCurHeader->uxItemFlags & rbITEM_WRITTEN_FLAG) == 0); //Indicates item has already been written before
  316. pxCurHeader->uxItemFlags &= ~rbITEM_SPLIT_FLAG; //Clear wrap flag if set (not strictly necessary)
  317. pxCurHeader->uxItemFlags |= rbITEM_WRITTEN_FLAG; //Mark as written
  318. pxRingbuffer->xItemsWaiting++;
  319. /*
  320. * Items might not be written in the order they were acquired. Move the
  321. * write pointer up to the next item that has not been marked as written (by
  322. * written flag) or up till the acquire pointer. When advancing the write
  323. * pointer, items that have already been written or items with dummy data
  324. * should be skipped over
  325. */
  326. pxCurHeader = (ItemHeader_t *)pxRingbuffer->pucWrite;
  327. //Skip over Items that have already been written or are dummy items
  328. while (((pxCurHeader->uxItemFlags & rbITEM_WRITTEN_FLAG) || (pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG)) && pxRingbuffer->pucWrite != pxRingbuffer->pucAcquire) {
  329. if (pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) {
  330. pxCurHeader->uxItemFlags |= rbITEM_WRITTEN_FLAG; //Mark as freed (not strictly necessary but adds redundancy)
  331. pxRingbuffer->pucWrite = pxRingbuffer->pucHead; //Wrap around due to dummy data
  332. } else {
  333. //Item with data that has already been written, advance write pointer past this item
  334. size_t xAlignedItemSize = rbALIGN_SIZE(pxCurHeader->xItemLen);
  335. pxRingbuffer->pucWrite += xAlignedItemSize + rbHEADER_SIZE;
  336. //Redundancy check to ensure write pointer has not overshot buffer bounds
  337. configASSERT(pxRingbuffer->pucWrite <= pxRingbuffer->pucHead + pxRingbuffer->xSize);
  338. }
  339. //Check if pucAcquire requires wrap around
  340. if ((pxRingbuffer->pucTail - pxRingbuffer->pucWrite) < rbHEADER_SIZE) {
  341. pxRingbuffer->pucWrite = pxRingbuffer->pucHead;
  342. }
  343. pxCurHeader = (ItemHeader_t *)pxRingbuffer->pucWrite; //Update header to point to item
  344. }
  345. }
  346. static void prvCopyItemNoSplit(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize)
  347. {
  348. uint8_t* item_addr = prvAcquireItemNoSplit(pxRingbuffer, xItemSize);
  349. memcpy(item_addr, pucItem, xItemSize);
  350. prvSendItemDoneNoSplit(pxRingbuffer, item_addr);
  351. }
  352. static void prvCopyItemAllowSplit(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize)
  353. {
  354. //Check arguments and buffer state
  355. size_t xAlignedItemSize = rbALIGN_SIZE(xItemSize); //Rounded up aligned item size
  356. size_t xRemLen = pxRingbuffer->pucTail - pxRingbuffer->pucAcquire; //Length from pucAcquire until end of buffer
  357. configASSERT(rbCHECK_ALIGNED(pxRingbuffer->pucAcquire)); //pucAcquire is always aligned in split ring buffers
  358. configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check write pointer is within bounds
  359. configASSERT(xRemLen >= rbHEADER_SIZE); //Remaining length must be able to at least fit an item header
  360. //Split item if necessary
  361. if (xRemLen < xAlignedItemSize + rbHEADER_SIZE) {
  362. //Write first part of the item
  363. ItemHeader_t *pxFirstHeader = (ItemHeader_t *)pxRingbuffer->pucAcquire;
  364. pxFirstHeader->uxItemFlags = 0;
  365. pxFirstHeader->xItemLen = xRemLen - rbHEADER_SIZE; //Fill remaining length with first part
  366. pxRingbuffer->pucAcquire += rbHEADER_SIZE; //Advance pucAcquire past header
  367. xRemLen -= rbHEADER_SIZE;
  368. if (xRemLen > 0) {
  369. memcpy(pxRingbuffer->pucAcquire, pucItem, xRemLen);
  370. pxRingbuffer->xItemsWaiting++;
  371. //Update item arguments to account for data already copied
  372. pucItem += xRemLen;
  373. xItemSize -= xRemLen;
  374. xAlignedItemSize -= xRemLen;
  375. pxFirstHeader->uxItemFlags |= rbITEM_SPLIT_FLAG; //There must be more data
  376. } else {
  377. //Remaining length was only large enough to fit header
  378. pxFirstHeader->uxItemFlags |= rbITEM_DUMMY_DATA_FLAG; //Item will completely be stored in 2nd part
  379. }
  380. pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Reset acquire pointer to start of buffer
  381. }
  382. //Item (whole or second part) should be guaranteed to fit at this point
  383. ItemHeader_t *pxSecondHeader = (ItemHeader_t *)pxRingbuffer->pucAcquire;
  384. pxSecondHeader->xItemLen = xItemSize;
  385. pxSecondHeader->uxItemFlags = 0;
  386. pxRingbuffer->pucAcquire += rbHEADER_SIZE; //Advance acquire pointer past header
  387. memcpy(pxRingbuffer->pucAcquire, pucItem, xItemSize);
  388. pxRingbuffer->xItemsWaiting++;
  389. pxRingbuffer->pucAcquire += xAlignedItemSize; //Advance pucAcquire past item to next aligned address
  390. //If current remaining length can't fit a header, wrap around write pointer
  391. if (pxRingbuffer->pucTail - pxRingbuffer->pucAcquire < rbHEADER_SIZE) {
  392. pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Wrap around pucAcquire
  393. }
  394. //Check if buffer is full
  395. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
  396. //Mark the buffer as full to distinguish with an empty buffer
  397. pxRingbuffer->uxRingbufferFlags |= rbBUFFER_FULL_FLAG;
  398. }
  399. //currently the Split mode is not supported, pucWrite tracks the pucAcquire
  400. pxRingbuffer->pucWrite = pxRingbuffer->pucAcquire;
  401. }
  402. static void prvCopyItemByteBuf(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize)
  403. {
  404. //Check arguments and buffer state
  405. configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check acquire pointer is within bounds
  406. size_t xRemLen = pxRingbuffer->pucTail - pxRingbuffer->pucAcquire; //Length from pucAcquire until end of buffer
  407. if (xRemLen < xItemSize) {
  408. //Copy as much as possible into remaining length
  409. memcpy(pxRingbuffer->pucAcquire, pucItem, xRemLen);
  410. pxRingbuffer->xItemsWaiting += xRemLen;
  411. //Update item arguments to account for data already written
  412. pucItem += xRemLen;
  413. xItemSize -= xRemLen;
  414. pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Reset acquire pointer to start of buffer
  415. }
  416. //Copy all or remaining portion of the item
  417. memcpy(pxRingbuffer->pucAcquire, pucItem, xItemSize);
  418. pxRingbuffer->xItemsWaiting += xItemSize;
  419. pxRingbuffer->pucAcquire += xItemSize;
  420. //Wrap around pucAcquire if it reaches the end
  421. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucTail) {
  422. pxRingbuffer->pucAcquire = pxRingbuffer->pucHead;
  423. }
  424. //Check if buffer is full
  425. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
  426. pxRingbuffer->uxRingbufferFlags |= rbBUFFER_FULL_FLAG; //Mark the buffer as full to avoid confusion with an empty buffer
  427. }
  428. //Currently, acquiring memory is not supported in byte mode. pucWrite tracks the pucAcquire.
  429. pxRingbuffer->pucWrite = pxRingbuffer->pucAcquire;
  430. }
  431. static BaseType_t prvCheckItemAvail(Ringbuffer_t *pxRingbuffer)
  432. {
  433. if ((pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) && pxRingbuffer->pucRead != pxRingbuffer->pucFree) {
  434. return pdFALSE; //Byte buffers do not allow multiple retrievals before return
  435. }
  436. if ((pxRingbuffer->xItemsWaiting > 0) && ((pxRingbuffer->pucRead != pxRingbuffer->pucWrite) || (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG))) {
  437. return pdTRUE; //Items/data available for retrieval
  438. } else {
  439. return pdFALSE; //No items/data available for retrieval
  440. }
  441. }
  442. static void *prvGetItemDefault(Ringbuffer_t *pxRingbuffer,
  443. BaseType_t *pxIsSplit,
  444. size_t xUnusedParam,
  445. size_t *pxItemSize)
  446. {
  447. //Check arguments and buffer state
  448. ItemHeader_t *pxHeader = (ItemHeader_t *)pxRingbuffer->pucRead;
  449. configASSERT(pxIsSplit != NULL);
  450. configASSERT((pxRingbuffer->xItemsWaiting > 0) && ((pxRingbuffer->pucRead != pxRingbuffer->pucWrite) || (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG))); //Check there are items to be read
  451. configASSERT(rbCHECK_ALIGNED(pxRingbuffer->pucRead)); //pucRead is always aligned in split ring buffers
  452. configASSERT(pxRingbuffer->pucRead >= pxRingbuffer->pucHead && pxRingbuffer->pucRead < pxRingbuffer->pucTail); //Check read pointer is within bounds
  453. configASSERT((pxHeader->xItemLen <= pxRingbuffer->xMaxItemSize) || (pxHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG));
  454. uint8_t *pcReturn;
  455. //Wrap around if dummy data (dummy data indicates wrap around in no-split buffers)
  456. if (pxHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) {
  457. pxRingbuffer->pucRead = pxRingbuffer->pucHead;
  458. //Check for errors with the next item
  459. pxHeader = (ItemHeader_t *)pxRingbuffer->pucRead;
  460. configASSERT(pxHeader->xItemLen <= pxRingbuffer->xMaxItemSize);
  461. }
  462. pcReturn = pxRingbuffer->pucRead + rbHEADER_SIZE; //Get pointer to part of item containing data (point past the header)
  463. if (pxHeader->xItemLen == 0) {
  464. //Inclusive of pucTail for special case where item of zero length just fits at the end of the buffer
  465. configASSERT(pcReturn >= pxRingbuffer->pucHead && pcReturn <= pxRingbuffer->pucTail);
  466. } else {
  467. //Exclusive of pucTali if length is larger than zero, pcReturn should never point to pucTail
  468. configASSERT(pcReturn >= pxRingbuffer->pucHead && pcReturn < pxRingbuffer->pucTail);
  469. }
  470. *pxItemSize = pxHeader->xItemLen; //Get length of item
  471. pxRingbuffer->xItemsWaiting --; //Update item count
  472. *pxIsSplit = (pxHeader->uxItemFlags & rbITEM_SPLIT_FLAG) ? pdTRUE : pdFALSE;
  473. pxRingbuffer->pucRead += rbHEADER_SIZE + rbALIGN_SIZE(pxHeader->xItemLen); //Update pucRead
  474. //Check if pucRead requires wrap around
  475. if ((pxRingbuffer->pucTail - pxRingbuffer->pucRead) < rbHEADER_SIZE) {
  476. pxRingbuffer->pucRead = pxRingbuffer->pucHead;
  477. }
  478. return (void *)pcReturn;
  479. }
  480. static void *prvGetItemByteBuf(Ringbuffer_t *pxRingbuffer,
  481. BaseType_t *pxUnusedParam,
  482. size_t xMaxSize,
  483. size_t *pxItemSize)
  484. {
  485. //Check arguments and buffer state
  486. configASSERT((pxRingbuffer->xItemsWaiting > 0) && ((pxRingbuffer->pucRead != pxRingbuffer->pucWrite) || (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG))); //Check there are items to be read
  487. configASSERT(pxRingbuffer->pucRead >= pxRingbuffer->pucHead && pxRingbuffer->pucRead < pxRingbuffer->pucTail); //Check read pointer is within bounds
  488. configASSERT(pxRingbuffer->pucRead == pxRingbuffer->pucFree);
  489. uint8_t *ret = pxRingbuffer->pucRead;
  490. if ((pxRingbuffer->pucRead > pxRingbuffer->pucWrite) || (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG)) { //Available data wraps around
  491. //Return contiguous piece from read pointer until buffer tail, or xMaxSize
  492. if (xMaxSize == 0 || pxRingbuffer->pucTail - pxRingbuffer->pucRead <= xMaxSize) {
  493. //All contiguous data from read pointer to tail
  494. *pxItemSize = pxRingbuffer->pucTail - pxRingbuffer->pucRead;
  495. pxRingbuffer->xItemsWaiting -= pxRingbuffer->pucTail - pxRingbuffer->pucRead;
  496. pxRingbuffer->pucRead = pxRingbuffer->pucHead; //Wrap around read pointer
  497. } else {
  498. //Return xMaxSize amount of data
  499. *pxItemSize = xMaxSize;
  500. pxRingbuffer->xItemsWaiting -= xMaxSize;
  501. pxRingbuffer->pucRead += xMaxSize; //Advance read pointer past retrieved data
  502. }
  503. } else { //Available data is contiguous between read and write pointer
  504. if (xMaxSize == 0 || pxRingbuffer->pucWrite - pxRingbuffer->pucRead <= xMaxSize) {
  505. //Return all contiguous data from read to write pointer
  506. *pxItemSize = pxRingbuffer->pucWrite - pxRingbuffer->pucRead;
  507. pxRingbuffer->xItemsWaiting -= pxRingbuffer->pucWrite - pxRingbuffer->pucRead;
  508. pxRingbuffer->pucRead = pxRingbuffer->pucWrite;
  509. } else {
  510. //Return xMaxSize data from read pointer
  511. *pxItemSize = xMaxSize;
  512. pxRingbuffer->xItemsWaiting -= xMaxSize;
  513. pxRingbuffer->pucRead += xMaxSize; //Advance read pointer past retrieved data
  514. }
  515. }
  516. return (void *)ret;
  517. }
  518. static void prvReturnItemDefault(Ringbuffer_t *pxRingbuffer, uint8_t *pucItem)
  519. {
  520. //Check arguments and buffer state
  521. configASSERT(rbCHECK_ALIGNED(pucItem));
  522. configASSERT(pucItem >= pxRingbuffer->pucHead);
  523. configASSERT(pucItem <= pxRingbuffer->pucTail); //Inclusive of pucTail in the case of zero length item at the very end
  524. //Get and check header of the item
  525. ItemHeader_t *pxCurHeader = (ItemHeader_t *)(pucItem - rbHEADER_SIZE);
  526. configASSERT(pxCurHeader->xItemLen <= pxRingbuffer->xMaxItemSize);
  527. configASSERT((pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) == 0); //Dummy items should never have been read
  528. configASSERT((pxCurHeader->uxItemFlags & rbITEM_FREE_FLAG) == 0); //Indicates item has already been returned before
  529. pxCurHeader->uxItemFlags &= ~rbITEM_SPLIT_FLAG; //Clear wrap flag if set (not strictly necessary)
  530. pxCurHeader->uxItemFlags |= rbITEM_FREE_FLAG; //Mark as free
  531. /*
  532. * Items might not be returned in the order they were retrieved. Move the free pointer
  533. * up to the next item that has not been marked as free (by free flag) or up
  534. * till the read pointer. When advancing the free pointer, items that have already been
  535. * freed or items with dummy data should be skipped over
  536. */
  537. pxCurHeader = (ItemHeader_t *)pxRingbuffer->pucFree;
  538. //Skip over Items that have already been freed or are dummy items
  539. while (((pxCurHeader->uxItemFlags & rbITEM_FREE_FLAG) || (pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG)) && pxRingbuffer->pucFree != pxRingbuffer->pucRead) {
  540. if (pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) {
  541. pxCurHeader->uxItemFlags |= rbITEM_FREE_FLAG; //Mark as freed (not strictly necessary but adds redundancy)
  542. pxRingbuffer->pucFree = pxRingbuffer->pucHead; //Wrap around due to dummy data
  543. } else {
  544. //Item with data that has already been freed, advance free pointer past this item
  545. size_t xAlignedItemSize = rbALIGN_SIZE(pxCurHeader->xItemLen);
  546. pxRingbuffer->pucFree += xAlignedItemSize + rbHEADER_SIZE;
  547. //Redundancy check to ensure free pointer has not overshot buffer bounds
  548. configASSERT(pxRingbuffer->pucFree <= pxRingbuffer->pucHead + pxRingbuffer->xSize);
  549. }
  550. //Check if pucRead requires wrap around
  551. if ((pxRingbuffer->pucTail - pxRingbuffer->pucFree) < rbHEADER_SIZE) {
  552. pxRingbuffer->pucFree = pxRingbuffer->pucHead;
  553. }
  554. pxCurHeader = (ItemHeader_t *)pxRingbuffer->pucFree; //Update header to point to item
  555. }
  556. //Check if the buffer full flag should be reset
  557. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
  558. if (pxRingbuffer->pucFree != pxRingbuffer->pucAcquire) {
  559. pxRingbuffer->uxRingbufferFlags &= ~rbBUFFER_FULL_FLAG;
  560. } else if (pxRingbuffer->pucFree == pxRingbuffer->pucAcquire && pxRingbuffer->pucFree == pxRingbuffer->pucRead) {
  561. //Special case where a full buffer is completely freed in one go
  562. pxRingbuffer->uxRingbufferFlags &= ~rbBUFFER_FULL_FLAG;
  563. }
  564. }
  565. }
  566. static void prvReturnItemByteBuf(Ringbuffer_t *pxRingbuffer, uint8_t *pucItem)
  567. {
  568. //Check pointer points to address inside buffer
  569. configASSERT((uint8_t *)pucItem >= pxRingbuffer->pucHead);
  570. configASSERT((uint8_t *)pucItem < pxRingbuffer->pucTail);
  571. //Free the read memory. Simply moves free pointer to read pointer as byte buffers do not allow multiple outstanding reads
  572. pxRingbuffer->pucFree = pxRingbuffer->pucRead;
  573. //If buffer was full before, reset full flag as free pointer has moved
  574. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
  575. pxRingbuffer->uxRingbufferFlags &= ~rbBUFFER_FULL_FLAG;
  576. }
  577. }
  578. static size_t prvGetCurMaxSizeNoSplit(Ringbuffer_t *pxRingbuffer)
  579. {
  580. BaseType_t xFreeSize;
  581. //Check if buffer is full
  582. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
  583. return 0;
  584. }
  585. if (pxRingbuffer->pucAcquire < pxRingbuffer->pucFree) {
  586. //Free space is contiguous between pucAcquire and pucFree
  587. xFreeSize = pxRingbuffer->pucFree - pxRingbuffer->pucAcquire;
  588. } else {
  589. //Free space wraps around (or overlapped at pucHead), select largest
  590. //contiguous free space as no-split items require contiguous space
  591. size_t xSize1 = pxRingbuffer->pucTail - pxRingbuffer->pucAcquire;
  592. size_t xSize2 = pxRingbuffer->pucFree - pxRingbuffer->pucHead;
  593. xFreeSize = (xSize1 > xSize2) ? xSize1 : xSize2;
  594. }
  595. //No-split ring buffer items need space for a header
  596. xFreeSize -= rbHEADER_SIZE;
  597. //Limit free size to be within bounds
  598. if (xFreeSize > pxRingbuffer->xMaxItemSize) {
  599. xFreeSize = pxRingbuffer->xMaxItemSize;
  600. } else if (xFreeSize < 0) {
  601. //Occurs when free space is less than header size
  602. xFreeSize = 0;
  603. }
  604. return xFreeSize;
  605. }
  606. static size_t prvGetCurMaxSizeAllowSplit(Ringbuffer_t *pxRingbuffer)
  607. {
  608. BaseType_t xFreeSize;
  609. //Check if buffer is full
  610. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
  611. return 0;
  612. }
  613. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucHead && pxRingbuffer->pucFree == pxRingbuffer->pucHead) {
  614. //Check for special case where pucAcquire and pucFree are both at pucHead
  615. xFreeSize = pxRingbuffer->xSize - rbHEADER_SIZE;
  616. } else if (pxRingbuffer->pucAcquire < pxRingbuffer->pucFree) {
  617. //Free space is contiguous between pucAcquire and pucFree, requires single header
  618. xFreeSize = (pxRingbuffer->pucFree - pxRingbuffer->pucAcquire) - rbHEADER_SIZE;
  619. } else {
  620. //Free space wraps around, requires two headers
  621. xFreeSize = (pxRingbuffer->pucFree - pxRingbuffer->pucHead) +
  622. (pxRingbuffer->pucTail - pxRingbuffer->pucAcquire) -
  623. (rbHEADER_SIZE * 2);
  624. }
  625. //Limit free size to be within bounds
  626. if (xFreeSize > pxRingbuffer->xMaxItemSize) {
  627. xFreeSize = pxRingbuffer->xMaxItemSize;
  628. } else if (xFreeSize < 0) {
  629. xFreeSize = 0;
  630. }
  631. return xFreeSize;
  632. }
  633. static size_t prvGetCurMaxSizeByteBuf(Ringbuffer_t *pxRingbuffer)
  634. {
  635. BaseType_t xFreeSize;
  636. //Check if buffer is full
  637. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
  638. return 0;
  639. }
  640. /*
  641. * Return whatever space is available depending on relative positions of the free
  642. * pointer and Acquire pointer. There is no overhead of headers in this mode
  643. */
  644. xFreeSize = pxRingbuffer->pucFree - pxRingbuffer->pucAcquire;
  645. if (xFreeSize <= 0) {
  646. xFreeSize += pxRingbuffer->xSize;
  647. }
  648. return xFreeSize;
  649. }
  650. static BaseType_t prvReceiveGeneric(Ringbuffer_t *pxRingbuffer,
  651. void **pvItem1,
  652. void **pvItem2,
  653. size_t *xItemSize1,
  654. size_t *xItemSize2,
  655. size_t xMaxSize,
  656. TickType_t xTicksToWait)
  657. {
  658. BaseType_t xReturn = pdFALSE;
  659. BaseType_t xReturnSemaphore = pdFALSE;
  660. TickType_t xTicksEnd = xTaskGetTickCount() + xTicksToWait;
  661. TickType_t xTicksRemaining = xTicksToWait;
  662. while (xTicksRemaining <= xTicksToWait) { //xTicksToWait will underflow once xTaskGetTickCount() > ticks_end
  663. //Block until more free space becomes available or timeout
  664. if (xSemaphoreTake(rbGET_RX_SEM_HANDLE(pxRingbuffer), xTicksRemaining) != pdTRUE) {
  665. xReturn = pdFALSE; //Timed out attempting to get semaphore
  666. break;
  667. }
  668. //Semaphore obtained, check if item can be retrieved
  669. portENTER_CRITICAL(&pxRingbuffer->mux);
  670. if (prvCheckItemAvail(pxRingbuffer) == pdTRUE) {
  671. //Item is available for retrieval
  672. BaseType_t xIsSplit;
  673. if (pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) {
  674. //Second argument (pxIsSplit) is unused for byte buffers
  675. *pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, NULL, xMaxSize, xItemSize1);
  676. } else {
  677. //Third argument (xMaxSize) is unused for no-split/allow-split buffers
  678. *pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize1);
  679. }
  680. //Check for item split if configured to do so
  681. if ((pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG) && (pvItem2 != NULL) && (xItemSize2 != NULL)) {
  682. if (xIsSplit == pdTRUE) {
  683. *pvItem2 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize2);
  684. configASSERT(*pvItem2 < *pvItem1); //Check wrap around has occurred
  685. configASSERT(xIsSplit == pdFALSE); //Second part should not have wrapped flag
  686. } else {
  687. *pvItem2 = NULL;
  688. }
  689. }
  690. xReturn = pdTRUE;
  691. if (pxRingbuffer->xItemsWaiting > 0) {
  692. xReturnSemaphore = pdTRUE;
  693. }
  694. portEXIT_CRITICAL(&pxRingbuffer->mux);
  695. break;
  696. }
  697. //No item available for retrieval, adjust ticks and take the semaphore again
  698. if (xTicksToWait != portMAX_DELAY) {
  699. xTicksRemaining = xTicksEnd - xTaskGetTickCount();
  700. }
  701. portEXIT_CRITICAL(&pxRingbuffer->mux);
  702. /*
  703. * Gap between critical section and re-acquiring of the semaphore. If
  704. * semaphore is given now, priority inversion might occur (see docs)
  705. */
  706. }
  707. if (xReturnSemaphore == pdTRUE) {
  708. xSemaphoreGive(rbGET_RX_SEM_HANDLE(pxRingbuffer)); //Give semaphore back so other tasks can retrieve
  709. }
  710. return xReturn;
  711. }
  712. static BaseType_t prvReceiveGenericFromISR(Ringbuffer_t *pxRingbuffer,
  713. void **pvItem1,
  714. void **pvItem2,
  715. size_t *xItemSize1,
  716. size_t *xItemSize2,
  717. size_t xMaxSize)
  718. {
  719. BaseType_t xReturn = pdFALSE;
  720. BaseType_t xReturnSemaphore = pdFALSE;
  721. portENTER_CRITICAL_ISR(&pxRingbuffer->mux);
  722. if(prvCheckItemAvail(pxRingbuffer) == pdTRUE) {
  723. BaseType_t xIsSplit;
  724. if (pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) {
  725. //Second argument (pxIsSplit) is unused for byte buffers
  726. *pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, NULL, xMaxSize, xItemSize1);
  727. } else {
  728. //Third argument (xMaxSize) is unused for no-split/allow-split buffers
  729. *pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize1);
  730. }
  731. //Check for item split if configured to do so
  732. if ((pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG) && pvItem2 != NULL && xItemSize2 != NULL) {
  733. if (xIsSplit == pdTRUE) {
  734. *pvItem2 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize2);
  735. configASSERT(*pvItem2 < *pvItem1); //Check wrap around has occurred
  736. configASSERT(xIsSplit == pdFALSE); //Second part should not have wrapped flag
  737. } else {
  738. *pvItem2 = NULL;
  739. }
  740. }
  741. xReturn = pdTRUE;
  742. if (pxRingbuffer->xItemsWaiting > 0) {
  743. xReturnSemaphore = pdTRUE;
  744. }
  745. }
  746. portEXIT_CRITICAL_ISR(&pxRingbuffer->mux);
  747. if (xReturnSemaphore == pdTRUE) {
  748. xSemaphoreGiveFromISR(rbGET_RX_SEM_HANDLE(pxRingbuffer), NULL); //Give semaphore back so other tasks can retrieve
  749. }
  750. return xReturn;
  751. }
  752. /* --------------------------- Public Definitions --------------------------- */
  753. RingbufHandle_t xRingbufferCreate(size_t xBufferSize, RingbufferType_t xBufferType)
  754. {
  755. configASSERT(xBufferSize > 0);
  756. configASSERT(xBufferType < RINGBUF_TYPE_MAX);
  757. //Allocate memory
  758. if (xBufferType != RINGBUF_TYPE_BYTEBUF) {
  759. xBufferSize = rbALIGN_SIZE(xBufferSize); //xBufferSize is rounded up for no-split/allow-split buffers
  760. }
  761. Ringbuffer_t *pxNewRingbuffer = calloc(1, sizeof(Ringbuffer_t));
  762. uint8_t *pucRingbufferStorage = malloc(xBufferSize);
  763. if (pxNewRingbuffer == NULL || pucRingbufferStorage == NULL) {
  764. goto err;
  765. }
  766. //Initialize Semaphores
  767. #if ( configSUPPORT_STATIC_ALLOCATION == 1)
  768. //We don't use the handles for static semaphores, and xSemaphoreCreateBinaryStatic will never fail thus no need to check static case
  769. xSemaphoreCreateBinaryStatic(&(pxNewRingbuffer->xTransSemStatic));
  770. xSemaphoreCreateBinaryStatic(&(pxNewRingbuffer->xRecvSemStatic));
  771. #else
  772. pxNewRingbuffer->xTransSemHandle = xSemaphoreCreateBinary();
  773. pxNewRingbuffer->xRecvSemHandle = xSemaphoreCreateBinary();
  774. if (pxNewRingbuffer->xTransSemHandle == NULL || pxNewRingbuffer->xRecvSemHandle == NULL) {
  775. if (pxNewRingbuffer->xTransSemHandle != NULL) {
  776. vSemaphoreDelete(pxNewRingbuffer->xTransSemHandle);
  777. }
  778. if (pxNewRingbuffer->xRecvSemHandle != NULL) {
  779. vSemaphoreDelete(pxNewRingbuffer->xRecvSemHandle);
  780. }
  781. goto err;
  782. }
  783. #endif
  784. prvInitializeNewRingbuffer(xBufferSize, xBufferType, pxNewRingbuffer, pucRingbufferStorage);
  785. return (RingbufHandle_t)pxNewRingbuffer;
  786. err:
  787. //An error has occurred, Free memory and return NULL
  788. free(pxNewRingbuffer);
  789. free(pucRingbufferStorage);
  790. return NULL;
  791. }
  792. RingbufHandle_t xRingbufferCreateNoSplit(size_t xItemSize, size_t xItemNum)
  793. {
  794. return xRingbufferCreate((rbALIGN_SIZE(xItemSize) + rbHEADER_SIZE) * xItemNum, RINGBUF_TYPE_NOSPLIT);
  795. }
  796. #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
  797. RingbufHandle_t xRingbufferCreateStatic(size_t xBufferSize,
  798. RingbufferType_t xBufferType,
  799. uint8_t *pucRingbufferStorage,
  800. StaticRingbuffer_t *pxStaticRingbuffer)
  801. {
  802. //Check arguments
  803. configASSERT(xBufferSize > 0);
  804. configASSERT(xBufferType < RINGBUF_TYPE_MAX);
  805. configASSERT(pucRingbufferStorage != NULL && pxStaticRingbuffer != NULL);
  806. if (xBufferType != RINGBUF_TYPE_BYTEBUF) {
  807. //No-split/allow-split buffer sizes must be 32-bit aligned
  808. configASSERT(rbCHECK_ALIGNED(xBufferSize));
  809. }
  810. Ringbuffer_t *pxNewRingbuffer = (Ringbuffer_t *)pxStaticRingbuffer;
  811. xSemaphoreCreateBinaryStatic(&(pxNewRingbuffer->xTransSemStatic));
  812. xSemaphoreCreateBinaryStatic(&(pxNewRingbuffer->xRecvSemStatic));
  813. prvInitializeNewRingbuffer(xBufferSize, xBufferType, pxNewRingbuffer, pucRingbufferStorage);
  814. pxNewRingbuffer->uxRingbufferFlags |= rbBUFFER_STATIC_FLAG;
  815. return (RingbufHandle_t)pxNewRingbuffer;
  816. }
  817. #endif
  818. BaseType_t xRingbufferSendAcquire(RingbufHandle_t xRingbuffer, void **ppvItem, size_t xItemSize, TickType_t xTicksToWait)
  819. {
  820. //Check arguments
  821. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  822. configASSERT(pxRingbuffer);
  823. configASSERT(ppvItem != NULL || xItemSize == 0);
  824. //currently only supported in NoSplit buffers
  825. configASSERT((pxRingbuffer->uxRingbufferFlags & (rbBYTE_BUFFER_FLAG | rbALLOW_SPLIT_FLAG)) == 0);
  826. *ppvItem = NULL;
  827. if (xItemSize > pxRingbuffer->xMaxItemSize) {
  828. return pdFALSE; //Data will never ever fit in the queue.
  829. }
  830. if ((pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) && xItemSize == 0) {
  831. return pdTRUE; //Sending 0 bytes to byte buffer has no effect
  832. }
  833. //Attempt to send an item
  834. BaseType_t xReturn = pdFALSE;
  835. BaseType_t xReturnSemaphore = pdFALSE;
  836. TickType_t xTicksEnd = xTaskGetTickCount() + xTicksToWait;
  837. TickType_t xTicksRemaining = xTicksToWait;
  838. while (xTicksRemaining <= xTicksToWait) { //xTicksToWait will underflow once xTaskGetTickCount() > ticks_end
  839. //Block until more free space becomes available or timeout
  840. if (xSemaphoreTake(rbGET_TX_SEM_HANDLE(pxRingbuffer), xTicksRemaining) != pdTRUE) {
  841. xReturn = pdFALSE;
  842. break;
  843. }
  844. //Semaphore obtained, check if item can fit
  845. portENTER_CRITICAL(&pxRingbuffer->mux);
  846. if(pxRingbuffer->xCheckItemFits(pxRingbuffer, xItemSize) == pdTRUE) {
  847. //Item will fit, copy item
  848. *ppvItem = prvAcquireItemNoSplit(pxRingbuffer, xItemSize);
  849. xReturn = pdTRUE;
  850. //Check if the free semaphore should be returned to allow other tasks to send
  851. if (prvGetFreeSize(pxRingbuffer) > 0) {
  852. xReturnSemaphore = pdTRUE;
  853. }
  854. portEXIT_CRITICAL(&pxRingbuffer->mux);
  855. break;
  856. }
  857. //Item doesn't fit, adjust ticks and take the semaphore again
  858. if (xTicksToWait != portMAX_DELAY) {
  859. xTicksRemaining = xTicksEnd - xTaskGetTickCount();
  860. }
  861. portEXIT_CRITICAL(&pxRingbuffer->mux);
  862. /*
  863. * Gap between critical section and re-acquiring of the semaphore. If
  864. * semaphore is given now, priority inversion might occur (see docs)
  865. */
  866. }
  867. if (xReturnSemaphore == pdTRUE) {
  868. xSemaphoreGive(rbGET_TX_SEM_HANDLE(pxRingbuffer)); //Give back semaphore so other tasks can acquire
  869. }
  870. return xReturn;
  871. }
  872. BaseType_t xRingbufferSendComplete(RingbufHandle_t xRingbuffer, void *pvItem)
  873. {
  874. //Check arguments
  875. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  876. configASSERT(pxRingbuffer);
  877. configASSERT(pvItem != NULL);
  878. configASSERT((pxRingbuffer->uxRingbufferFlags & (rbBYTE_BUFFER_FLAG | rbALLOW_SPLIT_FLAG)) == 0);
  879. portENTER_CRITICAL(&pxRingbuffer->mux);
  880. prvSendItemDoneNoSplit(pxRingbuffer, pvItem);
  881. portEXIT_CRITICAL(&pxRingbuffer->mux);
  882. xSemaphoreGive(rbGET_RX_SEM_HANDLE(pxRingbuffer));
  883. return pdTRUE;
  884. }
  885. BaseType_t xRingbufferSend(RingbufHandle_t xRingbuffer,
  886. const void *pvItem,
  887. size_t xItemSize,
  888. TickType_t xTicksToWait)
  889. {
  890. //Check arguments
  891. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  892. configASSERT(pxRingbuffer);
  893. configASSERT(pvItem != NULL || xItemSize == 0);
  894. if (xItemSize > pxRingbuffer->xMaxItemSize) {
  895. return pdFALSE; //Data will never ever fit in the queue.
  896. }
  897. if ((pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) && xItemSize == 0) {
  898. return pdTRUE; //Sending 0 bytes to byte buffer has no effect
  899. }
  900. //Attempt to send an item
  901. BaseType_t xReturn = pdFALSE;
  902. BaseType_t xReturnSemaphore = pdFALSE;
  903. TickType_t xTicksEnd = xTaskGetTickCount() + xTicksToWait;
  904. TickType_t xTicksRemaining = xTicksToWait;
  905. while (xTicksRemaining <= xTicksToWait) { //xTicksToWait will underflow once xTaskGetTickCount() > ticks_end
  906. //Block until more free space becomes available or timeout
  907. if (xSemaphoreTake(rbGET_TX_SEM_HANDLE(pxRingbuffer), xTicksRemaining) != pdTRUE) {
  908. xReturn = pdFALSE;
  909. break;
  910. }
  911. //Semaphore obtained, check if item can fit
  912. portENTER_CRITICAL(&pxRingbuffer->mux);
  913. if(pxRingbuffer->xCheckItemFits(pxRingbuffer, xItemSize) == pdTRUE) {
  914. //Item will fit, copy item
  915. pxRingbuffer->vCopyItem(pxRingbuffer, pvItem, xItemSize);
  916. xReturn = pdTRUE;
  917. //Check if the free semaphore should be returned to allow other tasks to send
  918. if (prvGetFreeSize(pxRingbuffer) > 0) {
  919. xReturnSemaphore = pdTRUE;
  920. }
  921. portEXIT_CRITICAL(&pxRingbuffer->mux);
  922. break;
  923. }
  924. //Item doesn't fit, adjust ticks and take the semaphore again
  925. if (xTicksToWait != portMAX_DELAY) {
  926. xTicksRemaining = xTicksEnd - xTaskGetTickCount();
  927. }
  928. portEXIT_CRITICAL(&pxRingbuffer->mux);
  929. /*
  930. * Gap between critical section and re-acquiring of the semaphore. If
  931. * semaphore is given now, priority inversion might occur (see docs)
  932. */
  933. }
  934. if (xReturnSemaphore == pdTRUE) {
  935. xSemaphoreGive(rbGET_TX_SEM_HANDLE(pxRingbuffer)); //Give back semaphore so other tasks can send
  936. }
  937. if (xReturn == pdTRUE) {
  938. //Indicate item was successfully sent
  939. xSemaphoreGive(rbGET_RX_SEM_HANDLE(pxRingbuffer));
  940. }
  941. return xReturn;
  942. }
  943. BaseType_t xRingbufferSendFromISR(RingbufHandle_t xRingbuffer,
  944. const void *pvItem,
  945. size_t xItemSize,
  946. BaseType_t *pxHigherPriorityTaskWoken)
  947. {
  948. //Check arguments
  949. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  950. configASSERT(pxRingbuffer);
  951. configASSERT(pvItem != NULL || xItemSize == 0);
  952. if (xItemSize > pxRingbuffer->xMaxItemSize) {
  953. return pdFALSE; //Data will never ever fit in the queue.
  954. }
  955. if ((pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) && xItemSize == 0) {
  956. return pdTRUE; //Sending 0 bytes to byte buffer has no effect
  957. }
  958. //Attempt to send an item
  959. BaseType_t xReturn;
  960. BaseType_t xReturnSemaphore = pdFALSE;
  961. portENTER_CRITICAL_ISR(&pxRingbuffer->mux);
  962. if (pxRingbuffer->xCheckItemFits(xRingbuffer, xItemSize) == pdTRUE) {
  963. pxRingbuffer->vCopyItem(xRingbuffer, pvItem, xItemSize);
  964. xReturn = pdTRUE;
  965. //Check if the free semaphore should be returned to allow other tasks to send
  966. if (prvGetFreeSize(pxRingbuffer) > 0) {
  967. xReturnSemaphore = pdTRUE;
  968. }
  969. } else {
  970. xReturn = pdFALSE;
  971. }
  972. portEXIT_CRITICAL_ISR(&pxRingbuffer->mux);
  973. if (xReturnSemaphore == pdTRUE) {
  974. xSemaphoreGiveFromISR(rbGET_TX_SEM_HANDLE(pxRingbuffer), pxHigherPriorityTaskWoken); //Give back semaphore so other tasks can send
  975. }
  976. if (xReturn == pdTRUE) {
  977. //Indicate item was successfully sent
  978. xSemaphoreGiveFromISR(rbGET_RX_SEM_HANDLE(pxRingbuffer), pxHigherPriorityTaskWoken);
  979. }
  980. return xReturn;
  981. }
  982. void *xRingbufferReceive(RingbufHandle_t xRingbuffer, size_t *pxItemSize, TickType_t xTicksToWait)
  983. {
  984. //Check arguments
  985. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  986. configASSERT(pxRingbuffer);
  987. //Attempt to retrieve an item
  988. void *pvTempItem;
  989. size_t xTempSize;
  990. if (prvReceiveGeneric(pxRingbuffer, &pvTempItem, NULL, &xTempSize, NULL, 0, xTicksToWait) == pdTRUE) {
  991. if (pxItemSize != NULL) {
  992. *pxItemSize = xTempSize;
  993. }
  994. return pvTempItem;
  995. } else {
  996. return NULL;
  997. }
  998. }
  999. void *xRingbufferReceiveFromISR(RingbufHandle_t xRingbuffer, size_t *pxItemSize)
  1000. {
  1001. //Check arguments
  1002. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1003. configASSERT(pxRingbuffer);
  1004. //Attempt to retrieve an item
  1005. void *pvTempItem;
  1006. size_t xTempSize;
  1007. if (prvReceiveGenericFromISR(pxRingbuffer, &pvTempItem, NULL, &xTempSize, NULL, 0) == pdTRUE) {
  1008. if (pxItemSize != NULL) {
  1009. *pxItemSize = xTempSize;
  1010. }
  1011. return pvTempItem;
  1012. } else {
  1013. return NULL;
  1014. }
  1015. }
  1016. BaseType_t xRingbufferReceiveSplit(RingbufHandle_t xRingbuffer,
  1017. void **ppvHeadItem,
  1018. void **ppvTailItem,
  1019. size_t *pxHeadItemSize,
  1020. size_t *pxTailItemSize,
  1021. TickType_t xTicksToWait)
  1022. {
  1023. //Check arguments
  1024. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1025. configASSERT(pxRingbuffer);
  1026. configASSERT(pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG);
  1027. configASSERT(ppvHeadItem != NULL && ppvTailItem != NULL);
  1028. //Attempt to retrieve multiple items
  1029. void *pvTempHeadItem, *pvTempTailItem;
  1030. size_t xTempHeadSize, xTempTailSize;
  1031. if (prvReceiveGeneric(pxRingbuffer, &pvTempHeadItem, &pvTempTailItem, &xTempHeadSize, &xTempTailSize, 0, xTicksToWait) == pdTRUE) {
  1032. //At least one item was retrieved
  1033. *ppvHeadItem = pvTempHeadItem;
  1034. if(pxHeadItemSize != NULL){
  1035. *pxHeadItemSize = xTempHeadSize;
  1036. }
  1037. //Check to see if a second item was also retrieved
  1038. if (pvTempTailItem != NULL) {
  1039. *ppvTailItem = pvTempTailItem;
  1040. if (pxTailItemSize != NULL) {
  1041. *pxTailItemSize = xTempTailSize;
  1042. }
  1043. } else {
  1044. *ppvTailItem = NULL;
  1045. }
  1046. return pdTRUE;
  1047. } else {
  1048. //No items retrieved
  1049. *ppvHeadItem = NULL;
  1050. *ppvTailItem = NULL;
  1051. return pdFALSE;
  1052. }
  1053. }
  1054. BaseType_t xRingbufferReceiveSplitFromISR(RingbufHandle_t xRingbuffer,
  1055. void **ppvHeadItem,
  1056. void **ppvTailItem,
  1057. size_t *pxHeadItemSize,
  1058. size_t *pxTailItemSize)
  1059. {
  1060. //Check arguments
  1061. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1062. configASSERT(pxRingbuffer);
  1063. configASSERT(pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG);
  1064. configASSERT(ppvHeadItem != NULL && ppvTailItem != NULL);
  1065. //Attempt to retrieve multiple items
  1066. void *pvTempHeadItem = NULL, *pvTempTailItem = NULL;
  1067. size_t xTempHeadSize, xTempTailSize;
  1068. if (prvReceiveGenericFromISR(pxRingbuffer, &pvTempHeadItem, &pvTempTailItem, &xTempHeadSize, &xTempTailSize, 0) == pdTRUE) {
  1069. //At least one item was received
  1070. *ppvHeadItem = pvTempHeadItem;
  1071. if (pxHeadItemSize != NULL) {
  1072. *pxHeadItemSize = xTempHeadSize;
  1073. }
  1074. //Check to see if a second item was also retrieved
  1075. if (pvTempTailItem != NULL) {
  1076. *ppvTailItem = pvTempTailItem;
  1077. if (pxTailItemSize != NULL) {
  1078. *pxTailItemSize = xTempTailSize;
  1079. }
  1080. } else {
  1081. *ppvTailItem = NULL;
  1082. }
  1083. return pdTRUE;
  1084. } else {
  1085. *ppvHeadItem = NULL;
  1086. *ppvTailItem = NULL;
  1087. return pdFALSE;
  1088. }
  1089. }
  1090. void *xRingbufferReceiveUpTo(RingbufHandle_t xRingbuffer,
  1091. size_t *pxItemSize,
  1092. TickType_t xTicksToWait,
  1093. size_t xMaxSize)
  1094. {
  1095. //Check arguments
  1096. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1097. configASSERT(pxRingbuffer);
  1098. configASSERT(pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG); //This function should only be called for byte buffers
  1099. if (xMaxSize == 0) {
  1100. return NULL;
  1101. }
  1102. //Attempt to retrieve up to xMaxSize bytes
  1103. void *pvTempItem;
  1104. size_t xTempSize;
  1105. if (prvReceiveGeneric(pxRingbuffer, &pvTempItem, NULL, &xTempSize, NULL, xMaxSize, xTicksToWait) == pdTRUE) {
  1106. if (pxItemSize != NULL) {
  1107. *pxItemSize = xTempSize;
  1108. }
  1109. return pvTempItem;
  1110. } else {
  1111. return NULL;
  1112. }
  1113. }
  1114. void *xRingbufferReceiveUpToFromISR(RingbufHandle_t xRingbuffer, size_t *pxItemSize, size_t xMaxSize)
  1115. {
  1116. //Check arguments
  1117. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1118. configASSERT(pxRingbuffer);
  1119. configASSERT(pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG); //This function should only be called for byte buffers
  1120. if (xMaxSize == 0) {
  1121. return NULL;
  1122. }
  1123. //Attempt to retrieve up to xMaxSize bytes
  1124. void *pvTempItem;
  1125. size_t xTempSize;
  1126. if (prvReceiveGenericFromISR(pxRingbuffer, &pvTempItem, NULL, &xTempSize, NULL, xMaxSize) == pdTRUE) {
  1127. if (pxItemSize != NULL) {
  1128. *pxItemSize = xTempSize;
  1129. }
  1130. return pvTempItem;
  1131. } else {
  1132. return NULL;
  1133. }
  1134. }
  1135. void vRingbufferReturnItem(RingbufHandle_t xRingbuffer, void *pvItem)
  1136. {
  1137. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1138. configASSERT(pxRingbuffer);
  1139. configASSERT(pvItem != NULL);
  1140. portENTER_CRITICAL(&pxRingbuffer->mux);
  1141. pxRingbuffer->vReturnItem(pxRingbuffer, (uint8_t *)pvItem);
  1142. portEXIT_CRITICAL(&pxRingbuffer->mux);
  1143. xSemaphoreGive(rbGET_TX_SEM_HANDLE(pxRingbuffer));
  1144. }
  1145. void vRingbufferReturnItemFromISR(RingbufHandle_t xRingbuffer, void *pvItem, BaseType_t *pxHigherPriorityTaskWoken)
  1146. {
  1147. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1148. configASSERT(pxRingbuffer);
  1149. configASSERT(pvItem != NULL);
  1150. portENTER_CRITICAL_ISR(&pxRingbuffer->mux);
  1151. pxRingbuffer->vReturnItem(pxRingbuffer, (uint8_t *)pvItem);
  1152. portEXIT_CRITICAL_ISR(&pxRingbuffer->mux);
  1153. xSemaphoreGiveFromISR(rbGET_TX_SEM_HANDLE(pxRingbuffer), pxHigherPriorityTaskWoken);
  1154. }
  1155. void vRingbufferDelete(RingbufHandle_t xRingbuffer)
  1156. {
  1157. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1158. configASSERT(pxRingbuffer);
  1159. vSemaphoreDelete(rbGET_TX_SEM_HANDLE(pxRingbuffer));
  1160. vSemaphoreDelete(rbGET_RX_SEM_HANDLE(pxRingbuffer));
  1161. #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
  1162. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_STATIC_FLAG) {
  1163. //Ring buffer was statically allocated, no need to free
  1164. return;
  1165. }
  1166. #endif
  1167. free(pxRingbuffer->pucHead);
  1168. free(pxRingbuffer);
  1169. }
  1170. size_t xRingbufferGetMaxItemSize(RingbufHandle_t xRingbuffer)
  1171. {
  1172. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1173. configASSERT(pxRingbuffer);
  1174. return pxRingbuffer->xMaxItemSize;
  1175. }
  1176. size_t xRingbufferGetCurFreeSize(RingbufHandle_t xRingbuffer)
  1177. {
  1178. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1179. configASSERT(pxRingbuffer);
  1180. size_t xFreeSize;
  1181. portENTER_CRITICAL(&pxRingbuffer->mux);
  1182. xFreeSize = pxRingbuffer->xGetCurMaxSize(pxRingbuffer);
  1183. portEXIT_CRITICAL(&pxRingbuffer->mux);
  1184. return xFreeSize;
  1185. }
  1186. BaseType_t xRingbufferAddToQueueSetRead(RingbufHandle_t xRingbuffer, QueueSetHandle_t xQueueSet)
  1187. {
  1188. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1189. configASSERT(pxRingbuffer);
  1190. BaseType_t xReturn;
  1191. portENTER_CRITICAL(&pxRingbuffer->mux);
  1192. //Cannot add semaphore to queue set if semaphore is not empty. Temporarily hold semaphore
  1193. BaseType_t xHoldSemaphore = xSemaphoreTake(rbGET_RX_SEM_HANDLE(pxRingbuffer), 0);
  1194. xReturn = xQueueAddToSet(rbGET_RX_SEM_HANDLE(pxRingbuffer), xQueueSet);
  1195. if (xHoldSemaphore == pdTRUE) {
  1196. //Return semaphore if temporarily held
  1197. configASSERT(xSemaphoreGive(rbGET_RX_SEM_HANDLE(pxRingbuffer)) == pdTRUE);
  1198. }
  1199. portEXIT_CRITICAL(&pxRingbuffer->mux);
  1200. return xReturn;
  1201. }
  1202. BaseType_t xRingbufferCanRead(RingbufHandle_t xRingbuffer, QueueSetMemberHandle_t xMember)
  1203. {
  1204. //Check if the selected queue set member is the ring buffer's read semaphore
  1205. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1206. configASSERT(pxRingbuffer);
  1207. return (rbGET_RX_SEM_HANDLE(pxRingbuffer) == xMember) ? pdTRUE : pdFALSE;
  1208. }
  1209. BaseType_t xRingbufferRemoveFromQueueSetRead(RingbufHandle_t xRingbuffer, QueueSetHandle_t xQueueSet)
  1210. {
  1211. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1212. configASSERT(pxRingbuffer);
  1213. BaseType_t xReturn;
  1214. portENTER_CRITICAL(&pxRingbuffer->mux);
  1215. //Cannot remove semaphore from queue set if semaphore is not empty. Temporarily hold semaphore
  1216. BaseType_t xHoldSemaphore = xSemaphoreTake(rbGET_RX_SEM_HANDLE(pxRingbuffer), 0);
  1217. xReturn = xQueueRemoveFromSet(rbGET_RX_SEM_HANDLE(pxRingbuffer), xQueueSet);
  1218. if (xHoldSemaphore == pdTRUE) {
  1219. //Return semaphore if temporarily held
  1220. configASSERT(xSemaphoreGive(rbGET_RX_SEM_HANDLE(pxRingbuffer)) == pdTRUE);
  1221. }
  1222. portEXIT_CRITICAL(&pxRingbuffer->mux);
  1223. return xReturn;
  1224. }
  1225. void vRingbufferGetInfo(RingbufHandle_t xRingbuffer,
  1226. UBaseType_t *uxFree,
  1227. UBaseType_t *uxRead,
  1228. UBaseType_t *uxWrite,
  1229. UBaseType_t *uxAcquire,
  1230. UBaseType_t *uxItemsWaiting)
  1231. {
  1232. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1233. configASSERT(pxRingbuffer);
  1234. portENTER_CRITICAL(&pxRingbuffer->mux);
  1235. if (uxFree != NULL) {
  1236. *uxFree = (UBaseType_t)(pxRingbuffer->pucFree - pxRingbuffer->pucHead);
  1237. }
  1238. if (uxRead != NULL) {
  1239. *uxRead = (UBaseType_t)(pxRingbuffer->pucRead - pxRingbuffer->pucHead);
  1240. }
  1241. if (uxWrite != NULL) {
  1242. *uxWrite = (UBaseType_t)(pxRingbuffer->pucWrite - pxRingbuffer->pucHead);
  1243. }
  1244. if (uxAcquire != NULL) {
  1245. *uxAcquire = (UBaseType_t)(pxRingbuffer->pucAcquire - pxRingbuffer->pucHead);
  1246. }
  1247. if (uxItemsWaiting != NULL) {
  1248. *uxItemsWaiting = (UBaseType_t)(pxRingbuffer->xItemsWaiting);
  1249. }
  1250. portEXIT_CRITICAL(&pxRingbuffer->mux);
  1251. }
  1252. void xRingbufferPrintInfo(RingbufHandle_t xRingbuffer)
  1253. {
  1254. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1255. configASSERT(pxRingbuffer);
  1256. printf("Rb size:%d\tfree: %d\trptr: %d\tfreeptr: %d\twptr: %d, aptr: %d\n",
  1257. pxRingbuffer->xSize, prvGetFreeSize(pxRingbuffer),
  1258. pxRingbuffer->pucRead - pxRingbuffer->pucHead,
  1259. pxRingbuffer->pucFree - pxRingbuffer->pucHead,
  1260. pxRingbuffer->pucWrite - pxRingbuffer->pucHead,
  1261. pxRingbuffer->pucAcquire - pxRingbuffer->pucHead);
  1262. }