ringbuf.c 65 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440
  1. // Copyright 2015-2021 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include <stdlib.h>
  15. #include <string.h>
  16. #include "freertos/FreeRTOS.h"
  17. #include "freertos/task.h"
  18. #include "freertos/semphr.h"
  19. #include "freertos/ringbuf.h"
  20. //32-bit alignment macros
  21. #define rbALIGN_SIZE( xSize ) ( ( xSize + portBYTE_ALIGNMENT_MASK ) & ~portBYTE_ALIGNMENT_MASK )
  22. #define rbCHECK_ALIGNED( pvPtr ) ( ( ( UBaseType_t ) ( pvPtr ) & portBYTE_ALIGNMENT_MASK ) == 0 )
  23. //Ring buffer flags
  24. #define rbALLOW_SPLIT_FLAG ( ( UBaseType_t ) 1 ) //The ring buffer allows items to be split
  25. #define rbBYTE_BUFFER_FLAG ( ( UBaseType_t ) 2 ) //The ring buffer is a byte buffer
  26. #define rbBUFFER_FULL_FLAG ( ( UBaseType_t ) 4 ) //The ring buffer is currently full (write pointer == free pointer)
  27. #define rbBUFFER_STATIC_FLAG ( ( UBaseType_t ) 8 ) //The ring buffer is statically allocated
  28. //Item flags
  29. #define rbITEM_FREE_FLAG ( ( UBaseType_t ) 1 ) //Item has been retrieved and returned by application, free to overwrite
  30. #define rbITEM_DUMMY_DATA_FLAG ( ( UBaseType_t ) 2 ) //Data from here to end of the ring buffer is dummy data. Restart reading at start of head of the buffer
  31. #define rbITEM_SPLIT_FLAG ( ( UBaseType_t ) 4 ) //Valid for RINGBUF_TYPE_ALLOWSPLIT, indicating that rest of the data is wrapped around
  32. #define rbITEM_WRITTEN_FLAG ( ( UBaseType_t ) 8 ) //Item has been written to by the application, thus can be read
  33. //Static allocation related
  34. #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
  35. #define rbGET_TX_SEM_HANDLE( pxRingbuffer ) ( (SemaphoreHandle_t) &(pxRingbuffer->xTransSemStatic) )
  36. #define rbGET_RX_SEM_HANDLE( pxRingbuffer ) ( (SemaphoreHandle_t) &(pxRingbuffer->xRecvSemStatic) )
  37. #else
  38. #define rbGET_TX_SEM_HANDLE( pxRingbuffer ) ( pxRingbuffer->xTransSemHandle )
  39. #define rbGET_RX_SEM_HANDLE( pxRingbuffer ) ( pxRingbuffer->xRecvSemHandle )
  40. #endif
  41. typedef struct {
  42. //This size of this structure must be 32-bit aligned
  43. size_t xItemLen;
  44. UBaseType_t uxItemFlags;
  45. } ItemHeader_t;
  46. #define rbHEADER_SIZE sizeof(ItemHeader_t)
  47. typedef struct RingbufferDefinition Ringbuffer_t;
  48. typedef BaseType_t (*CheckItemFitsFunction_t)(Ringbuffer_t *pxRingbuffer, size_t xItemSize);
  49. typedef void (*CopyItemFunction_t)(Ringbuffer_t *pxRingbuffer, const uint8_t *pcItem, size_t xItemSize);
  50. typedef BaseType_t (*CheckItemAvailFunction_t) (Ringbuffer_t *pxRingbuffer);
  51. typedef void *(*GetItemFunction_t)(Ringbuffer_t *pxRingbuffer, BaseType_t *pxIsSplit, size_t xMaxSize, size_t *pxItemSize);
  52. typedef void (*ReturnItemFunction_t)(Ringbuffer_t *pxRingbuffer, uint8_t *pvItem);
  53. typedef size_t (*GetCurMaxSizeFunction_t)(Ringbuffer_t *pxRingbuffer);
  54. typedef struct RingbufferDefinition {
  55. size_t xSize; //Size of the data storage
  56. size_t xMaxItemSize; //Maximum item size
  57. UBaseType_t uxRingbufferFlags; //Flags to indicate the type and status of ring buffer
  58. CheckItemFitsFunction_t xCheckItemFits; //Function to check if item can currently fit in ring buffer
  59. CopyItemFunction_t vCopyItem; //Function to copy item to ring buffer
  60. GetItemFunction_t pvGetItem; //Function to get item from ring buffer
  61. ReturnItemFunction_t vReturnItem; //Function to return item to ring buffer
  62. GetCurMaxSizeFunction_t xGetCurMaxSize; //Function to get current free size
  63. uint8_t *pucAcquire; //Acquire Pointer. Points to where the next item should be acquired.
  64. uint8_t *pucWrite; //Write Pointer. Points to where the next item should be written
  65. uint8_t *pucRead; //Read Pointer. Points to where the next item should be read from
  66. uint8_t *pucFree; //Free Pointer. Points to the last item that has yet to be returned to the ring buffer
  67. uint8_t *pucHead; //Pointer to the start of the ring buffer storage area
  68. uint8_t *pucTail; //Pointer to the end of the ring buffer storage area
  69. BaseType_t xItemsWaiting; //Number of items/bytes(for byte buffers) currently in ring buffer that have not yet been read
  70. /*
  71. * TransSem: Binary semaphore used to indicate to a blocked transmitting tasks
  72. * that more free space has become available or that the block has
  73. * timed out.
  74. *
  75. * RecvSem: Binary semaphore used to indicate to a blocked receiving task that
  76. * new data/item has been written to the ring buffer.
  77. *
  78. * Note - When static allocation is enabled, the two semaphores are always
  79. * statically stored in the ring buffer's control structure
  80. * regardless of whether the ring buffer is allocated dynamically or
  81. * statically. When static allocation is disabled, the two semaphores
  82. * are allocated dynamically and their handles stored instead, thus
  83. * making the ring buffer's control structure slightly smaller when
  84. * static allocation is disabled.
  85. */
  86. #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
  87. StaticSemaphore_t xTransSemStatic;
  88. StaticSemaphore_t xRecvSemStatic;
  89. #else
  90. SemaphoreHandle_t xTransSemHandle;
  91. SemaphoreHandle_t xRecvSemHandle;
  92. #endif
  93. portMUX_TYPE mux; //Spinlock required for SMP
  94. } Ringbuffer_t;
  95. #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
  96. #if __GNUC_PREREQ(4, 6)
  97. _Static_assert(sizeof(StaticRingbuffer_t) == sizeof(Ringbuffer_t), "StaticRingbuffer_t != Ringbuffer_t");
  98. #endif
  99. #endif
  100. /*
  101. Remark: A counting semaphore for items_buffered_sem would be more logical, but counting semaphores in
  102. FreeRTOS need a maximum count, and allocate more memory the larger the maximum count is. Here, we
  103. would need to set the maximum to the maximum amount of times a null-byte unit first in the buffer,
  104. which is quite high and so would waste a fair amount of memory.
  105. */
  106. /* --------------------------- Static Declarations -------------------------- */
  107. /*
  108. * WARNING: All of the following static functions (except generic functions)
  109. * ARE NOT THREAD SAFE. Therefore they should only be called within a critical
  110. * section (using spin locks)
  111. */
  112. //Initialize a ring buffer after space has been allocated for it
  113. static void prvInitializeNewRingbuffer(size_t xBufferSize,
  114. RingbufferType_t xBufferType,
  115. Ringbuffer_t *pxNewRingbuffer,
  116. uint8_t *pucRingbufferStorage);
  117. //Calculate current amount of free space (in bytes) in the ring buffer
  118. static size_t prvGetFreeSize(Ringbuffer_t *pxRingbuffer);
  119. //Checks if an item/data is currently available for retrieval
  120. static BaseType_t prvCheckItemAvail(Ringbuffer_t *pxRingbuffer);
  121. //Checks if an item will currently fit in a no-split/allow-split ring buffer
  122. static BaseType_t prvCheckItemFitsDefault( Ringbuffer_t *pxRingbuffer, size_t xItemSize);
  123. //Checks if an item will currently fit in a byte buffer
  124. static BaseType_t prvCheckItemFitsByteBuffer( Ringbuffer_t *pxRingbuffer, size_t xItemSize);
  125. /*
  126. Copies an item to a no-split ring buffer
  127. Entry:
  128. - Must have already guaranteed there is sufficient space for item by calling prvCheckItemFitsDefault()
  129. Exit:
  130. - New item copied into ring buffer
  131. - pucAcquire and pucWrite updated.
  132. - Dummy item added if necessary
  133. */
  134. static void prvCopyItemNoSplit(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize);
  135. /*
  136. Copies an item to a allow-split ring buffer
  137. Entry:
  138. - Must have already guaranteed there is sufficient space for item by calling prvCheckItemFitsDefault()
  139. Exit:
  140. - New item copied into ring buffer
  141. - pucAcquire and pucWrite updated
  142. - Item may be split
  143. */
  144. static void prvCopyItemAllowSplit(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize);
  145. //Copies an item to a byte buffer. Only call this function after calling prvCheckItemFitsByteBuffer()
  146. static void prvCopyItemByteBuf(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize);
  147. //Retrieve item from no-split/allow-split ring buffer. *pxIsSplit is set to pdTRUE if the retrieved item is split
  148. /*
  149. Entry:
  150. - Must have already guaranteed that there is an item available for retrieval by calling prvCheckItemAvail()
  151. - Guaranteed that pucREAD points to a valid item (i.e., not a dummy item)
  152. Exit:
  153. - Item is returned. Only first half returned if split
  154. - pucREAD updated to point to next valid item to read, or equals to pucWrite if there are no more valid items to read
  155. - pucREAD update must skip over dummy items
  156. */
  157. static void *prvGetItemDefault(Ringbuffer_t *pxRingbuffer,
  158. BaseType_t *pxIsSplit,
  159. size_t xUnusedParam,
  160. size_t *pxItemSize);
  161. //Retrieve data from byte buffer. If xMaxSize is 0, all continuous data is retrieved
  162. static void *prvGetItemByteBuf(Ringbuffer_t *pxRingbuffer,
  163. BaseType_t *pxUnusedParam,
  164. size_t xMaxSize,
  165. size_t *pxItemSize);
  166. /*
  167. Return an item to a split/no-split ring buffer
  168. Exit:
  169. - Item is marked free rbITEM_FREE_FLAG
  170. - pucFree is progressed as far as possible, skipping over already freed items or dummy items
  171. */
  172. static void prvReturnItemDefault(Ringbuffer_t *pxRingbuffer, uint8_t *pucItem);
  173. //Return data to a byte buffer
  174. static void prvReturnItemByteBuf(Ringbuffer_t *pxRingbuffer, uint8_t *pucItem);
  175. //Get the maximum size an item that can currently have if sent to a no-split ring buffer
  176. static size_t prvGetCurMaxSizeNoSplit(Ringbuffer_t *pxRingbuffer);
  177. //Get the maximum size an item that can currently have if sent to a allow-split ring buffer
  178. static size_t prvGetCurMaxSizeAllowSplit(Ringbuffer_t *pxRingbuffer);
  179. //Get the maximum size an item that can currently have if sent to a byte buffer
  180. static size_t prvGetCurMaxSizeByteBuf(Ringbuffer_t *pxRingbuffer);
  181. /**
  182. * Generic function used to retrieve an item/data from ring buffers. If called on
  183. * an allow-split buffer, and pvItem2 and xItemSize2 are not NULL, both parts of
  184. * a split item will be retrieved. xMaxSize will only take effect if called on
  185. * byte buffers.
  186. */
  187. static BaseType_t prvReceiveGeneric(Ringbuffer_t *pxRingbuffer,
  188. void **pvItem1,
  189. void **pvItem2,
  190. size_t *xItemSize1,
  191. size_t *xItemSize2,
  192. size_t xMaxSize,
  193. TickType_t xTicksToWait);
  194. //Generic function used to retrieve an item/data from ring buffers in an ISR
  195. static BaseType_t prvReceiveGenericFromISR(Ringbuffer_t *pxRingbuffer,
  196. void **pvItem1,
  197. void **pvItem2,
  198. size_t *xItemSize1,
  199. size_t *xItemSize2,
  200. size_t xMaxSize);
  201. /* --------------------------- Static Definitions --------------------------- */
  202. static void prvInitializeNewRingbuffer(size_t xBufferSize,
  203. RingbufferType_t xBufferType,
  204. Ringbuffer_t *pxNewRingbuffer,
  205. uint8_t *pucRingbufferStorage)
  206. {
  207. //Initialize values
  208. pxNewRingbuffer->xSize = xBufferSize;
  209. pxNewRingbuffer->pucHead = pucRingbufferStorage;
  210. pxNewRingbuffer->pucTail = pucRingbufferStorage + xBufferSize;
  211. pxNewRingbuffer->pucFree = pucRingbufferStorage;
  212. pxNewRingbuffer->pucRead = pucRingbufferStorage;
  213. pxNewRingbuffer->pucWrite = pucRingbufferStorage;
  214. pxNewRingbuffer->pucAcquire = pucRingbufferStorage;
  215. pxNewRingbuffer->xItemsWaiting = 0;
  216. pxNewRingbuffer->uxRingbufferFlags = 0;
  217. //Initialize type dependent values and function pointers
  218. if (xBufferType == RINGBUF_TYPE_NOSPLIT) {
  219. pxNewRingbuffer->xCheckItemFits = prvCheckItemFitsDefault;
  220. pxNewRingbuffer->vCopyItem = prvCopyItemNoSplit;
  221. pxNewRingbuffer->pvGetItem = prvGetItemDefault;
  222. pxNewRingbuffer->vReturnItem = prvReturnItemDefault;
  223. /*
  224. * Worst case scenario is when the read/write/acquire/free pointers are all
  225. * pointing to the halfway point of the buffer.
  226. */
  227. pxNewRingbuffer->xMaxItemSize = rbALIGN_SIZE(pxNewRingbuffer->xSize / 2) - rbHEADER_SIZE;
  228. pxNewRingbuffer->xGetCurMaxSize = prvGetCurMaxSizeNoSplit;
  229. } else if (xBufferType == RINGBUF_TYPE_ALLOWSPLIT) {
  230. pxNewRingbuffer->uxRingbufferFlags |= rbALLOW_SPLIT_FLAG;
  231. pxNewRingbuffer->xCheckItemFits = prvCheckItemFitsDefault;
  232. pxNewRingbuffer->vCopyItem = prvCopyItemAllowSplit;
  233. pxNewRingbuffer->pvGetItem = prvGetItemDefault;
  234. pxNewRingbuffer->vReturnItem = prvReturnItemDefault;
  235. //Worst case an item is split into two, incurring two headers of overhead
  236. pxNewRingbuffer->xMaxItemSize = pxNewRingbuffer->xSize - (sizeof(ItemHeader_t) * 2);
  237. pxNewRingbuffer->xGetCurMaxSize = prvGetCurMaxSizeAllowSplit;
  238. } else { //Byte Buffer
  239. pxNewRingbuffer->uxRingbufferFlags |= rbBYTE_BUFFER_FLAG;
  240. pxNewRingbuffer->xCheckItemFits = prvCheckItemFitsByteBuffer;
  241. pxNewRingbuffer->vCopyItem = prvCopyItemByteBuf;
  242. pxNewRingbuffer->pvGetItem = prvGetItemByteBuf;
  243. pxNewRingbuffer->vReturnItem = prvReturnItemByteBuf;
  244. //Byte buffers do not incur any overhead
  245. pxNewRingbuffer->xMaxItemSize = pxNewRingbuffer->xSize;
  246. pxNewRingbuffer->xGetCurMaxSize = prvGetCurMaxSizeByteBuf;
  247. }
  248. xSemaphoreGive(rbGET_TX_SEM_HANDLE(pxNewRingbuffer));
  249. vPortCPUInitializeMutex(&pxNewRingbuffer->mux);
  250. }
  251. static size_t prvGetFreeSize(Ringbuffer_t *pxRingbuffer)
  252. {
  253. size_t xReturn;
  254. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
  255. xReturn = 0;
  256. } else {
  257. BaseType_t xFreeSize = pxRingbuffer->pucFree - pxRingbuffer->pucAcquire;
  258. //Check if xFreeSize has underflowed
  259. if (xFreeSize <= 0) {
  260. xFreeSize += pxRingbuffer->xSize;
  261. }
  262. xReturn = xFreeSize;
  263. }
  264. configASSERT(xReturn <= pxRingbuffer->xSize);
  265. return xReturn;
  266. }
  267. static BaseType_t prvCheckItemFitsDefault( Ringbuffer_t *pxRingbuffer, size_t xItemSize)
  268. {
  269. //Check arguments and buffer state
  270. configASSERT(rbCHECK_ALIGNED(pxRingbuffer->pucAcquire)); //pucAcquire is always aligned in no-split/allow-split ring buffers
  271. configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check write pointer is within bounds
  272. size_t xTotalItemSize = rbALIGN_SIZE(xItemSize) + rbHEADER_SIZE; //Rounded up aligned item size with header
  273. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
  274. //Buffer is either complete empty or completely full
  275. return (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) ? pdFALSE : pdTRUE;
  276. }
  277. if (pxRingbuffer->pucFree > pxRingbuffer->pucAcquire) {
  278. //Free space does not wrap around
  279. return (xTotalItemSize <= pxRingbuffer->pucFree - pxRingbuffer->pucAcquire) ? pdTRUE : pdFALSE;
  280. }
  281. //Free space wraps around
  282. if (xTotalItemSize <= pxRingbuffer->pucTail - pxRingbuffer->pucAcquire) {
  283. return pdTRUE; //Item fits without wrapping around
  284. }
  285. //Check if item fits by wrapping
  286. if (pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG) {
  287. //Allow split wrapping incurs an extra header
  288. return (xTotalItemSize + rbHEADER_SIZE <= pxRingbuffer->xSize - (pxRingbuffer->pucAcquire - pxRingbuffer->pucFree)) ? pdTRUE : pdFALSE;
  289. } else {
  290. return (xTotalItemSize <= pxRingbuffer->pucFree - pxRingbuffer->pucHead) ? pdTRUE : pdFALSE;
  291. }
  292. }
  293. static BaseType_t prvCheckItemFitsByteBuffer( Ringbuffer_t *pxRingbuffer, size_t xItemSize)
  294. {
  295. //Check arguments and buffer state
  296. configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check acquire pointer is within bounds
  297. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
  298. //Buffer is either complete empty or completely full
  299. return (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) ? pdFALSE : pdTRUE;
  300. }
  301. if (pxRingbuffer->pucFree > pxRingbuffer->pucAcquire) {
  302. //Free space does not wrap around
  303. return (xItemSize <= pxRingbuffer->pucFree - pxRingbuffer->pucAcquire) ? pdTRUE : pdFALSE;
  304. }
  305. //Free space wraps around
  306. return (xItemSize <= pxRingbuffer->xSize - (pxRingbuffer->pucAcquire - pxRingbuffer->pucFree)) ? pdTRUE : pdFALSE;
  307. }
  308. static uint8_t* prvAcquireItemNoSplit(Ringbuffer_t *pxRingbuffer, size_t xItemSize)
  309. {
  310. //Check arguments and buffer state
  311. size_t xAlignedItemSize = rbALIGN_SIZE(xItemSize); //Rounded up aligned item size
  312. size_t xRemLen = pxRingbuffer->pucTail - pxRingbuffer->pucAcquire; //Length from pucAcquire until end of buffer
  313. configASSERT(rbCHECK_ALIGNED(pxRingbuffer->pucAcquire)); //pucAcquire is always aligned in no-split ring buffers
  314. configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check write pointer is within bounds
  315. configASSERT(xRemLen >= rbHEADER_SIZE); //Remaining length must be able to at least fit an item header
  316. //If remaining length can't fit item, set as dummy data and wrap around
  317. if (xRemLen < xAlignedItemSize + rbHEADER_SIZE) {
  318. ItemHeader_t *pxDummy = (ItemHeader_t *)pxRingbuffer->pucAcquire;
  319. pxDummy->uxItemFlags = rbITEM_DUMMY_DATA_FLAG; //Set remaining length as dummy data
  320. pxDummy->xItemLen = 0; //Dummy data should have no length
  321. pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Reset acquire pointer to wrap around
  322. }
  323. //Item should be guaranteed to fit at this point. Set item header and copy data
  324. ItemHeader_t *pxHeader = (ItemHeader_t *)pxRingbuffer->pucAcquire;
  325. pxHeader->xItemLen = xItemSize;
  326. pxHeader->uxItemFlags = 0;
  327. //hold the buffer address without touching pucWrite
  328. uint8_t* item_address = pxRingbuffer->pucAcquire + rbHEADER_SIZE;
  329. pxRingbuffer->pucAcquire += rbHEADER_SIZE + xAlignedItemSize; //Advance pucAcquire past header and the item to next aligned address
  330. //After the allocation, add some padding after the buffer and correct the flags
  331. //If current remaining length can't fit a header, wrap around write pointer
  332. if (pxRingbuffer->pucTail - pxRingbuffer->pucAcquire < rbHEADER_SIZE) {
  333. pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Wrap around pucAcquire
  334. }
  335. //Check if buffer is full
  336. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
  337. //Mark the buffer as full to distinguish with an empty buffer
  338. pxRingbuffer->uxRingbufferFlags |= rbBUFFER_FULL_FLAG;
  339. }
  340. return item_address;
  341. }
  342. static void prvSendItemDoneNoSplit(Ringbuffer_t *pxRingbuffer, uint8_t* pucItem)
  343. {
  344. //Check arguments and buffer state
  345. configASSERT(rbCHECK_ALIGNED(pucItem));
  346. configASSERT(pucItem >= pxRingbuffer->pucHead);
  347. configASSERT(pucItem <= pxRingbuffer->pucTail); //Inclusive of pucTail in the case of zero length item at the very end
  348. //Get and check header of the item
  349. ItemHeader_t *pxCurHeader = (ItemHeader_t *)(pucItem - rbHEADER_SIZE);
  350. configASSERT(pxCurHeader->xItemLen <= pxRingbuffer->xMaxItemSize);
  351. configASSERT((pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) == 0); //Dummy items should never have been written
  352. configASSERT((pxCurHeader->uxItemFlags & rbITEM_WRITTEN_FLAG) == 0); //Indicates item has already been written before
  353. pxCurHeader->uxItemFlags &= ~rbITEM_SPLIT_FLAG; //Clear wrap flag if set (not strictly necessary)
  354. pxCurHeader->uxItemFlags |= rbITEM_WRITTEN_FLAG; //Mark as written
  355. pxRingbuffer->xItemsWaiting++;
  356. /*
  357. * Items might not be written in the order they were acquired. Move the
  358. * write pointer up to the next item that has not been marked as written (by
  359. * written flag) or up till the acquire pointer. When advancing the write
  360. * pointer, items that have already been written or items with dummy data
  361. * should be skipped over
  362. */
  363. pxCurHeader = (ItemHeader_t *)pxRingbuffer->pucWrite;
  364. //Skip over Items that have already been written or are dummy items
  365. while (((pxCurHeader->uxItemFlags & rbITEM_WRITTEN_FLAG) || (pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG)) && pxRingbuffer->pucWrite != pxRingbuffer->pucAcquire) {
  366. if (pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) {
  367. pxCurHeader->uxItemFlags |= rbITEM_WRITTEN_FLAG; //Mark as freed (not strictly necessary but adds redundancy)
  368. pxRingbuffer->pucWrite = pxRingbuffer->pucHead; //Wrap around due to dummy data
  369. } else {
  370. //Item with data that has already been written, advance write pointer past this item
  371. size_t xAlignedItemSize = rbALIGN_SIZE(pxCurHeader->xItemLen);
  372. pxRingbuffer->pucWrite += xAlignedItemSize + rbHEADER_SIZE;
  373. //Redundancy check to ensure write pointer has not overshot buffer bounds
  374. configASSERT(pxRingbuffer->pucWrite <= pxRingbuffer->pucHead + pxRingbuffer->xSize);
  375. }
  376. //Check if pucWrite requires wrap around
  377. if ((pxRingbuffer->pucTail - pxRingbuffer->pucWrite) < rbHEADER_SIZE) {
  378. pxRingbuffer->pucWrite = pxRingbuffer->pucHead;
  379. }
  380. pxCurHeader = (ItemHeader_t *)pxRingbuffer->pucWrite; //Update header to point to item
  381. }
  382. }
  383. static void prvCopyItemNoSplit(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize)
  384. {
  385. uint8_t* item_addr = prvAcquireItemNoSplit(pxRingbuffer, xItemSize);
  386. memcpy(item_addr, pucItem, xItemSize);
  387. prvSendItemDoneNoSplit(pxRingbuffer, item_addr);
  388. }
  389. static void prvCopyItemAllowSplit(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize)
  390. {
  391. //Check arguments and buffer state
  392. size_t xAlignedItemSize = rbALIGN_SIZE(xItemSize); //Rounded up aligned item size
  393. size_t xRemLen = pxRingbuffer->pucTail - pxRingbuffer->pucAcquire; //Length from pucAcquire until end of buffer
  394. configASSERT(rbCHECK_ALIGNED(pxRingbuffer->pucAcquire)); //pucAcquire is always aligned in split ring buffers
  395. configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check write pointer is within bounds
  396. configASSERT(xRemLen >= rbHEADER_SIZE); //Remaining length must be able to at least fit an item header
  397. //Split item if necessary
  398. if (xRemLen < xAlignedItemSize + rbHEADER_SIZE) {
  399. //Write first part of the item
  400. ItemHeader_t *pxFirstHeader = (ItemHeader_t *)pxRingbuffer->pucAcquire;
  401. pxFirstHeader->uxItemFlags = 0;
  402. pxFirstHeader->xItemLen = xRemLen - rbHEADER_SIZE; //Fill remaining length with first part
  403. pxRingbuffer->pucAcquire += rbHEADER_SIZE; //Advance pucAcquire past header
  404. xRemLen -= rbHEADER_SIZE;
  405. if (xRemLen > 0) {
  406. memcpy(pxRingbuffer->pucAcquire, pucItem, xRemLen);
  407. pxRingbuffer->xItemsWaiting++;
  408. //Update item arguments to account for data already copied
  409. pucItem += xRemLen;
  410. xItemSize -= xRemLen;
  411. xAlignedItemSize -= xRemLen;
  412. pxFirstHeader->uxItemFlags |= rbITEM_SPLIT_FLAG; //There must be more data
  413. } else {
  414. //Remaining length was only large enough to fit header
  415. pxFirstHeader->uxItemFlags |= rbITEM_DUMMY_DATA_FLAG; //Item will completely be stored in 2nd part
  416. }
  417. pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Reset acquire pointer to start of buffer
  418. }
  419. //Item (whole or second part) should be guaranteed to fit at this point
  420. ItemHeader_t *pxSecondHeader = (ItemHeader_t *)pxRingbuffer->pucAcquire;
  421. pxSecondHeader->xItemLen = xItemSize;
  422. pxSecondHeader->uxItemFlags = 0;
  423. pxRingbuffer->pucAcquire += rbHEADER_SIZE; //Advance acquire pointer past header
  424. memcpy(pxRingbuffer->pucAcquire, pucItem, xItemSize);
  425. pxRingbuffer->xItemsWaiting++;
  426. pxRingbuffer->pucAcquire += xAlignedItemSize; //Advance pucAcquire past item to next aligned address
  427. //If current remaining length can't fit a header, wrap around write pointer
  428. if (pxRingbuffer->pucTail - pxRingbuffer->pucAcquire < rbHEADER_SIZE) {
  429. pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Wrap around pucAcquire
  430. }
  431. //Check if buffer is full
  432. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
  433. //Mark the buffer as full to distinguish with an empty buffer
  434. pxRingbuffer->uxRingbufferFlags |= rbBUFFER_FULL_FLAG;
  435. }
  436. //currently the Split mode is not supported, pucWrite tracks the pucAcquire
  437. pxRingbuffer->pucWrite = pxRingbuffer->pucAcquire;
  438. }
  439. static void prvCopyItemByteBuf(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize)
  440. {
  441. //Check arguments and buffer state
  442. configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check acquire pointer is within bounds
  443. size_t xRemLen = pxRingbuffer->pucTail - pxRingbuffer->pucAcquire; //Length from pucAcquire until end of buffer
  444. if (xRemLen < xItemSize) {
  445. //Copy as much as possible into remaining length
  446. memcpy(pxRingbuffer->pucAcquire, pucItem, xRemLen);
  447. pxRingbuffer->xItemsWaiting += xRemLen;
  448. //Update item arguments to account for data already written
  449. pucItem += xRemLen;
  450. xItemSize -= xRemLen;
  451. pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Reset acquire pointer to start of buffer
  452. }
  453. //Copy all or remaining portion of the item
  454. memcpy(pxRingbuffer->pucAcquire, pucItem, xItemSize);
  455. pxRingbuffer->xItemsWaiting += xItemSize;
  456. pxRingbuffer->pucAcquire += xItemSize;
  457. //Wrap around pucAcquire if it reaches the end
  458. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucTail) {
  459. pxRingbuffer->pucAcquire = pxRingbuffer->pucHead;
  460. }
  461. //Check if buffer is full
  462. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
  463. pxRingbuffer->uxRingbufferFlags |= rbBUFFER_FULL_FLAG; //Mark the buffer as full to avoid confusion with an empty buffer
  464. }
  465. //Currently, acquiring memory is not supported in byte mode. pucWrite tracks the pucAcquire.
  466. pxRingbuffer->pucWrite = pxRingbuffer->pucAcquire;
  467. }
  468. static BaseType_t prvCheckItemAvail(Ringbuffer_t *pxRingbuffer)
  469. {
  470. if ((pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) && pxRingbuffer->pucRead != pxRingbuffer->pucFree) {
  471. return pdFALSE; //Byte buffers do not allow multiple retrievals before return
  472. }
  473. if ((pxRingbuffer->xItemsWaiting > 0) && ((pxRingbuffer->pucRead != pxRingbuffer->pucWrite) || (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG))) {
  474. return pdTRUE; //Items/data available for retrieval
  475. } else {
  476. return pdFALSE; //No items/data available for retrieval
  477. }
  478. }
  479. static void *prvGetItemDefault(Ringbuffer_t *pxRingbuffer,
  480. BaseType_t *pxIsSplit,
  481. size_t xUnusedParam,
  482. size_t *pxItemSize)
  483. {
  484. //Check arguments and buffer state
  485. ItemHeader_t *pxHeader = (ItemHeader_t *)pxRingbuffer->pucRead;
  486. configASSERT(pxIsSplit != NULL);
  487. configASSERT((pxRingbuffer->xItemsWaiting > 0) && ((pxRingbuffer->pucRead != pxRingbuffer->pucWrite) || (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG))); //Check there are items to be read
  488. configASSERT(rbCHECK_ALIGNED(pxRingbuffer->pucRead)); //pucRead is always aligned in split ring buffers
  489. configASSERT(pxRingbuffer->pucRead >= pxRingbuffer->pucHead && pxRingbuffer->pucRead < pxRingbuffer->pucTail); //Check read pointer is within bounds
  490. configASSERT((pxHeader->xItemLen <= pxRingbuffer->xMaxItemSize) || (pxHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG));
  491. uint8_t *pcReturn;
  492. //Wrap around if dummy data (dummy data indicates wrap around in no-split buffers)
  493. if (pxHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) {
  494. pxRingbuffer->pucRead = pxRingbuffer->pucHead;
  495. //Check for errors with the next item
  496. pxHeader = (ItemHeader_t *)pxRingbuffer->pucRead;
  497. configASSERT(pxHeader->xItemLen <= pxRingbuffer->xMaxItemSize);
  498. }
  499. pcReturn = pxRingbuffer->pucRead + rbHEADER_SIZE; //Get pointer to part of item containing data (point past the header)
  500. if (pxHeader->xItemLen == 0) {
  501. //Inclusive of pucTail for special case where item of zero length just fits at the end of the buffer
  502. configASSERT(pcReturn >= pxRingbuffer->pucHead && pcReturn <= pxRingbuffer->pucTail);
  503. } else {
  504. //Exclusive of pucTail if length is larger than zero, pcReturn should never point to pucTail
  505. configASSERT(pcReturn >= pxRingbuffer->pucHead && pcReturn < pxRingbuffer->pucTail);
  506. }
  507. *pxItemSize = pxHeader->xItemLen; //Get length of item
  508. pxRingbuffer->xItemsWaiting --; //Update item count
  509. *pxIsSplit = (pxHeader->uxItemFlags & rbITEM_SPLIT_FLAG) ? pdTRUE : pdFALSE;
  510. pxRingbuffer->pucRead += rbHEADER_SIZE + rbALIGN_SIZE(pxHeader->xItemLen); //Update pucRead
  511. //Check if pucRead requires wrap around
  512. if ((pxRingbuffer->pucTail - pxRingbuffer->pucRead) < rbHEADER_SIZE) {
  513. pxRingbuffer->pucRead = pxRingbuffer->pucHead;
  514. }
  515. return (void *)pcReturn;
  516. }
  517. static void *prvGetItemByteBuf(Ringbuffer_t *pxRingbuffer,
  518. BaseType_t *pxUnusedParam,
  519. size_t xMaxSize,
  520. size_t *pxItemSize)
  521. {
  522. //Check arguments and buffer state
  523. configASSERT((pxRingbuffer->xItemsWaiting > 0) && ((pxRingbuffer->pucRead != pxRingbuffer->pucWrite) || (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG))); //Check there are items to be read
  524. configASSERT(pxRingbuffer->pucRead >= pxRingbuffer->pucHead && pxRingbuffer->pucRead < pxRingbuffer->pucTail); //Check read pointer is within bounds
  525. configASSERT(pxRingbuffer->pucRead == pxRingbuffer->pucFree);
  526. uint8_t *ret = pxRingbuffer->pucRead;
  527. if ((pxRingbuffer->pucRead > pxRingbuffer->pucWrite) || (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG)) { //Available data wraps around
  528. //Return contiguous piece from read pointer until buffer tail, or xMaxSize
  529. if (xMaxSize == 0 || pxRingbuffer->pucTail - pxRingbuffer->pucRead <= xMaxSize) {
  530. //All contiguous data from read pointer to tail
  531. *pxItemSize = pxRingbuffer->pucTail - pxRingbuffer->pucRead;
  532. pxRingbuffer->xItemsWaiting -= pxRingbuffer->pucTail - pxRingbuffer->pucRead;
  533. pxRingbuffer->pucRead = pxRingbuffer->pucHead; //Wrap around read pointer
  534. } else {
  535. //Return xMaxSize amount of data
  536. *pxItemSize = xMaxSize;
  537. pxRingbuffer->xItemsWaiting -= xMaxSize;
  538. pxRingbuffer->pucRead += xMaxSize; //Advance read pointer past retrieved data
  539. }
  540. } else { //Available data is contiguous between read and write pointer
  541. if (xMaxSize == 0 || pxRingbuffer->pucWrite - pxRingbuffer->pucRead <= xMaxSize) {
  542. //Return all contiguous data from read to write pointer
  543. *pxItemSize = pxRingbuffer->pucWrite - pxRingbuffer->pucRead;
  544. pxRingbuffer->xItemsWaiting -= pxRingbuffer->pucWrite - pxRingbuffer->pucRead;
  545. pxRingbuffer->pucRead = pxRingbuffer->pucWrite;
  546. } else {
  547. //Return xMaxSize data from read pointer
  548. *pxItemSize = xMaxSize;
  549. pxRingbuffer->xItemsWaiting -= xMaxSize;
  550. pxRingbuffer->pucRead += xMaxSize; //Advance read pointer past retrieved data
  551. }
  552. }
  553. return (void *)ret;
  554. }
  555. static void prvReturnItemDefault(Ringbuffer_t *pxRingbuffer, uint8_t *pucItem)
  556. {
  557. //Check arguments and buffer state
  558. configASSERT(rbCHECK_ALIGNED(pucItem));
  559. configASSERT(pucItem >= pxRingbuffer->pucHead);
  560. configASSERT(pucItem <= pxRingbuffer->pucTail); //Inclusive of pucTail in the case of zero length item at the very end
  561. //Get and check header of the item
  562. ItemHeader_t *pxCurHeader = (ItemHeader_t *)(pucItem - rbHEADER_SIZE);
  563. configASSERT(pxCurHeader->xItemLen <= pxRingbuffer->xMaxItemSize);
  564. configASSERT((pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) == 0); //Dummy items should never have been read
  565. configASSERT((pxCurHeader->uxItemFlags & rbITEM_FREE_FLAG) == 0); //Indicates item has already been returned before
  566. pxCurHeader->uxItemFlags &= ~rbITEM_SPLIT_FLAG; //Clear wrap flag if set (not strictly necessary)
  567. pxCurHeader->uxItemFlags |= rbITEM_FREE_FLAG; //Mark as free
  568. /*
  569. * Items might not be returned in the order they were retrieved. Move the free pointer
  570. * up to the next item that has not been marked as free (by free flag) or up
  571. * till the read pointer. When advancing the free pointer, items that have already been
  572. * freed or items with dummy data should be skipped over
  573. */
  574. pxCurHeader = (ItemHeader_t *)pxRingbuffer->pucFree;
  575. //Skip over Items that have already been freed or are dummy items
  576. while (((pxCurHeader->uxItemFlags & rbITEM_FREE_FLAG) || (pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG)) && pxRingbuffer->pucFree != pxRingbuffer->pucRead) {
  577. if (pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) {
  578. pxCurHeader->uxItemFlags |= rbITEM_FREE_FLAG; //Mark as freed (not strictly necessary but adds redundancy)
  579. pxRingbuffer->pucFree = pxRingbuffer->pucHead; //Wrap around due to dummy data
  580. } else {
  581. //Item with data that has already been freed, advance free pointer past this item
  582. size_t xAlignedItemSize = rbALIGN_SIZE(pxCurHeader->xItemLen);
  583. pxRingbuffer->pucFree += xAlignedItemSize + rbHEADER_SIZE;
  584. //Redundancy check to ensure free pointer has not overshot buffer bounds
  585. configASSERT(pxRingbuffer->pucFree <= pxRingbuffer->pucHead + pxRingbuffer->xSize);
  586. }
  587. //Check if pucFree requires wrap around
  588. if ((pxRingbuffer->pucTail - pxRingbuffer->pucFree) < rbHEADER_SIZE) {
  589. pxRingbuffer->pucFree = pxRingbuffer->pucHead;
  590. }
  591. pxCurHeader = (ItemHeader_t *)pxRingbuffer->pucFree; //Update header to point to item
  592. }
  593. //Check if the buffer full flag should be reset
  594. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
  595. if (pxRingbuffer->pucFree != pxRingbuffer->pucAcquire) {
  596. pxRingbuffer->uxRingbufferFlags &= ~rbBUFFER_FULL_FLAG;
  597. } else if (pxRingbuffer->pucFree == pxRingbuffer->pucAcquire && pxRingbuffer->pucFree == pxRingbuffer->pucRead) {
  598. //Special case where a full buffer is completely freed in one go
  599. pxRingbuffer->uxRingbufferFlags &= ~rbBUFFER_FULL_FLAG;
  600. }
  601. }
  602. }
  603. static void prvReturnItemByteBuf(Ringbuffer_t *pxRingbuffer, uint8_t *pucItem)
  604. {
  605. //Check pointer points to address inside buffer
  606. configASSERT((uint8_t *)pucItem >= pxRingbuffer->pucHead);
  607. configASSERT((uint8_t *)pucItem < pxRingbuffer->pucTail);
  608. //Free the read memory. Simply moves free pointer to read pointer as byte buffers do not allow multiple outstanding reads
  609. pxRingbuffer->pucFree = pxRingbuffer->pucRead;
  610. //If buffer was full before, reset full flag as free pointer has moved
  611. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
  612. pxRingbuffer->uxRingbufferFlags &= ~rbBUFFER_FULL_FLAG;
  613. }
  614. }
  615. static size_t prvGetCurMaxSizeNoSplit(Ringbuffer_t *pxRingbuffer)
  616. {
  617. BaseType_t xFreeSize;
  618. //Check if buffer is full
  619. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
  620. return 0;
  621. }
  622. if (pxRingbuffer->pucAcquire < pxRingbuffer->pucFree) {
  623. //Free space is contiguous between pucAcquire and pucFree
  624. xFreeSize = pxRingbuffer->pucFree - pxRingbuffer->pucAcquire;
  625. } else {
  626. //Free space wraps around (or overlapped at pucHead), select largest
  627. //contiguous free space as no-split items require contiguous space
  628. size_t xSize1 = pxRingbuffer->pucTail - pxRingbuffer->pucAcquire;
  629. size_t xSize2 = pxRingbuffer->pucFree - pxRingbuffer->pucHead;
  630. xFreeSize = (xSize1 > xSize2) ? xSize1 : xSize2;
  631. }
  632. //No-split ring buffer items need space for a header
  633. xFreeSize -= rbHEADER_SIZE;
  634. //Check for xFreeSize < 0 before checking xFreeSize > pxRingbuffer->xMaxItemSize
  635. //to avoid incorrect comparison operation when xFreeSize is negative
  636. if (xFreeSize < 0) {
  637. //Occurs when free space is less than header size
  638. xFreeSize = 0;
  639. } else if (xFreeSize > pxRingbuffer->xMaxItemSize) {
  640. //Limit free size to be within bounds
  641. xFreeSize = pxRingbuffer->xMaxItemSize;
  642. }
  643. return xFreeSize;
  644. }
  645. static size_t prvGetCurMaxSizeAllowSplit(Ringbuffer_t *pxRingbuffer)
  646. {
  647. BaseType_t xFreeSize;
  648. //Check if buffer is full
  649. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
  650. return 0;
  651. }
  652. if (pxRingbuffer->pucAcquire == pxRingbuffer->pucHead && pxRingbuffer->pucFree == pxRingbuffer->pucHead) {
  653. //Check for special case where pucAcquire and pucFree are both at pucHead
  654. xFreeSize = pxRingbuffer->xSize - rbHEADER_SIZE;
  655. } else if (pxRingbuffer->pucAcquire < pxRingbuffer->pucFree) {
  656. //Free space is contiguous between pucAcquire and pucFree, requires single header
  657. xFreeSize = (pxRingbuffer->pucFree - pxRingbuffer->pucAcquire) - rbHEADER_SIZE;
  658. } else {
  659. //Free space wraps around, requires two headers
  660. xFreeSize = (pxRingbuffer->pucFree - pxRingbuffer->pucHead) +
  661. (pxRingbuffer->pucTail - pxRingbuffer->pucAcquire) -
  662. (rbHEADER_SIZE * 2);
  663. }
  664. //Check for xFreeSize < 0 before checking xFreeSize > pxRingbuffer->xMaxItemSize
  665. //to avoid incorrect comparison operation when xFreeSize is negative
  666. if (xFreeSize < 0) {
  667. xFreeSize = 0;
  668. } else if (xFreeSize > pxRingbuffer->xMaxItemSize) {
  669. //Limit free size to be within bounds
  670. xFreeSize = pxRingbuffer->xMaxItemSize;
  671. }
  672. return xFreeSize;
  673. }
  674. static size_t prvGetCurMaxSizeByteBuf(Ringbuffer_t *pxRingbuffer)
  675. {
  676. BaseType_t xFreeSize;
  677. //Check if buffer is full
  678. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
  679. return 0;
  680. }
  681. /*
  682. * Return whatever space is available depending on relative positions of the free
  683. * pointer and Acquire pointer. There is no overhead of headers in this mode
  684. */
  685. xFreeSize = pxRingbuffer->pucFree - pxRingbuffer->pucAcquire;
  686. if (xFreeSize <= 0) {
  687. xFreeSize += pxRingbuffer->xSize;
  688. }
  689. return xFreeSize;
  690. }
  691. static BaseType_t prvReceiveGeneric(Ringbuffer_t *pxRingbuffer,
  692. void **pvItem1,
  693. void **pvItem2,
  694. size_t *xItemSize1,
  695. size_t *xItemSize2,
  696. size_t xMaxSize,
  697. TickType_t xTicksToWait)
  698. {
  699. BaseType_t xReturn = pdFALSE;
  700. BaseType_t xReturnSemaphore = pdFALSE;
  701. TickType_t xTicksEnd = xTaskGetTickCount() + xTicksToWait;
  702. TickType_t xTicksRemaining = xTicksToWait;
  703. while (xTicksRemaining <= xTicksToWait) { //xTicksToWait will underflow once xTaskGetTickCount() > ticks_end
  704. //Block until more free space becomes available or timeout
  705. if (xSemaphoreTake(rbGET_RX_SEM_HANDLE(pxRingbuffer), xTicksRemaining) != pdTRUE) {
  706. xReturn = pdFALSE; //Timed out attempting to get semaphore
  707. break;
  708. }
  709. //Semaphore obtained, check if item can be retrieved
  710. portENTER_CRITICAL(&pxRingbuffer->mux);
  711. if (prvCheckItemAvail(pxRingbuffer) == pdTRUE) {
  712. //Item is available for retrieval
  713. BaseType_t xIsSplit;
  714. if (pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) {
  715. //Second argument (pxIsSplit) is unused for byte buffers
  716. *pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, NULL, xMaxSize, xItemSize1);
  717. } else {
  718. //Third argument (xMaxSize) is unused for no-split/allow-split buffers
  719. *pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize1);
  720. }
  721. //Check for item split if configured to do so
  722. if ((pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG) && (pvItem2 != NULL) && (xItemSize2 != NULL)) {
  723. if (xIsSplit == pdTRUE) {
  724. *pvItem2 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize2);
  725. configASSERT(*pvItem2 < *pvItem1); //Check wrap around has occurred
  726. configASSERT(xIsSplit == pdFALSE); //Second part should not have wrapped flag
  727. } else {
  728. *pvItem2 = NULL;
  729. }
  730. }
  731. xReturn = pdTRUE;
  732. if (pxRingbuffer->xItemsWaiting > 0) {
  733. xReturnSemaphore = pdTRUE;
  734. }
  735. portEXIT_CRITICAL(&pxRingbuffer->mux);
  736. break;
  737. }
  738. //No item available for retrieval, adjust ticks and take the semaphore again
  739. if (xTicksToWait != portMAX_DELAY) {
  740. xTicksRemaining = xTicksEnd - xTaskGetTickCount();
  741. }
  742. portEXIT_CRITICAL(&pxRingbuffer->mux);
  743. /*
  744. * Gap between critical section and re-acquiring of the semaphore. If
  745. * semaphore is given now, priority inversion might occur (see docs)
  746. */
  747. }
  748. if (xReturnSemaphore == pdTRUE) {
  749. xSemaphoreGive(rbGET_RX_SEM_HANDLE(pxRingbuffer)); //Give semaphore back so other tasks can retrieve
  750. }
  751. return xReturn;
  752. }
  753. static BaseType_t prvReceiveGenericFromISR(Ringbuffer_t *pxRingbuffer,
  754. void **pvItem1,
  755. void **pvItem2,
  756. size_t *xItemSize1,
  757. size_t *xItemSize2,
  758. size_t xMaxSize)
  759. {
  760. BaseType_t xReturn = pdFALSE;
  761. BaseType_t xReturnSemaphore = pdFALSE;
  762. portENTER_CRITICAL_ISR(&pxRingbuffer->mux);
  763. if(prvCheckItemAvail(pxRingbuffer) == pdTRUE) {
  764. BaseType_t xIsSplit;
  765. if (pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) {
  766. //Second argument (pxIsSplit) is unused for byte buffers
  767. *pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, NULL, xMaxSize, xItemSize1);
  768. } else {
  769. //Third argument (xMaxSize) is unused for no-split/allow-split buffers
  770. *pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize1);
  771. }
  772. //Check for item split if configured to do so
  773. if ((pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG) && pvItem2 != NULL && xItemSize2 != NULL) {
  774. if (xIsSplit == pdTRUE) {
  775. *pvItem2 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize2);
  776. configASSERT(*pvItem2 < *pvItem1); //Check wrap around has occurred
  777. configASSERT(xIsSplit == pdFALSE); //Second part should not have wrapped flag
  778. } else {
  779. *pvItem2 = NULL;
  780. }
  781. }
  782. xReturn = pdTRUE;
  783. if (pxRingbuffer->xItemsWaiting > 0) {
  784. xReturnSemaphore = pdTRUE;
  785. }
  786. }
  787. portEXIT_CRITICAL_ISR(&pxRingbuffer->mux);
  788. if (xReturnSemaphore == pdTRUE) {
  789. xSemaphoreGiveFromISR(rbGET_RX_SEM_HANDLE(pxRingbuffer), NULL); //Give semaphore back so other tasks can retrieve
  790. }
  791. return xReturn;
  792. }
  793. /* --------------------------- Public Definitions --------------------------- */
  794. RingbufHandle_t xRingbufferCreate(size_t xBufferSize, RingbufferType_t xBufferType)
  795. {
  796. configASSERT(xBufferSize > 0);
  797. configASSERT(xBufferType < RINGBUF_TYPE_MAX);
  798. //Allocate memory
  799. if (xBufferType != RINGBUF_TYPE_BYTEBUF) {
  800. xBufferSize = rbALIGN_SIZE(xBufferSize); //xBufferSize is rounded up for no-split/allow-split buffers
  801. }
  802. Ringbuffer_t *pxNewRingbuffer = calloc(1, sizeof(Ringbuffer_t));
  803. uint8_t *pucRingbufferStorage = malloc(xBufferSize);
  804. if (pxNewRingbuffer == NULL || pucRingbufferStorage == NULL) {
  805. goto err;
  806. }
  807. //Initialize Semaphores
  808. #if ( configSUPPORT_STATIC_ALLOCATION == 1)
  809. //We don't use the handles for static semaphores, and xSemaphoreCreateBinaryStatic will never fail thus no need to check static case
  810. xSemaphoreCreateBinaryStatic(&(pxNewRingbuffer->xTransSemStatic));
  811. xSemaphoreCreateBinaryStatic(&(pxNewRingbuffer->xRecvSemStatic));
  812. #else
  813. pxNewRingbuffer->xTransSemHandle = xSemaphoreCreateBinary();
  814. pxNewRingbuffer->xRecvSemHandle = xSemaphoreCreateBinary();
  815. if (pxNewRingbuffer->xTransSemHandle == NULL || pxNewRingbuffer->xRecvSemHandle == NULL) {
  816. if (pxNewRingbuffer->xTransSemHandle != NULL) {
  817. vSemaphoreDelete(pxNewRingbuffer->xTransSemHandle);
  818. }
  819. if (pxNewRingbuffer->xRecvSemHandle != NULL) {
  820. vSemaphoreDelete(pxNewRingbuffer->xRecvSemHandle);
  821. }
  822. goto err;
  823. }
  824. #endif
  825. prvInitializeNewRingbuffer(xBufferSize, xBufferType, pxNewRingbuffer, pucRingbufferStorage);
  826. return (RingbufHandle_t)pxNewRingbuffer;
  827. err:
  828. //An error has occurred, Free memory and return NULL
  829. free(pxNewRingbuffer);
  830. free(pucRingbufferStorage);
  831. return NULL;
  832. }
  833. RingbufHandle_t xRingbufferCreateNoSplit(size_t xItemSize, size_t xItemNum)
  834. {
  835. return xRingbufferCreate((rbALIGN_SIZE(xItemSize) + rbHEADER_SIZE) * xItemNum, RINGBUF_TYPE_NOSPLIT);
  836. }
  837. #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
  838. RingbufHandle_t xRingbufferCreateStatic(size_t xBufferSize,
  839. RingbufferType_t xBufferType,
  840. uint8_t *pucRingbufferStorage,
  841. StaticRingbuffer_t *pxStaticRingbuffer)
  842. {
  843. //Check arguments
  844. configASSERT(xBufferSize > 0);
  845. configASSERT(xBufferType < RINGBUF_TYPE_MAX);
  846. configASSERT(pucRingbufferStorage != NULL && pxStaticRingbuffer != NULL);
  847. if (xBufferType != RINGBUF_TYPE_BYTEBUF) {
  848. //No-split/allow-split buffer sizes must be 32-bit aligned
  849. configASSERT(rbCHECK_ALIGNED(xBufferSize));
  850. }
  851. Ringbuffer_t *pxNewRingbuffer = (Ringbuffer_t *)pxStaticRingbuffer;
  852. xSemaphoreCreateBinaryStatic(&(pxNewRingbuffer->xTransSemStatic));
  853. xSemaphoreCreateBinaryStatic(&(pxNewRingbuffer->xRecvSemStatic));
  854. prvInitializeNewRingbuffer(xBufferSize, xBufferType, pxNewRingbuffer, pucRingbufferStorage);
  855. pxNewRingbuffer->uxRingbufferFlags |= rbBUFFER_STATIC_FLAG;
  856. return (RingbufHandle_t)pxNewRingbuffer;
  857. }
  858. #endif
  859. BaseType_t xRingbufferSendAcquire(RingbufHandle_t xRingbuffer, void **ppvItem, size_t xItemSize, TickType_t xTicksToWait)
  860. {
  861. //Check arguments
  862. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  863. configASSERT(pxRingbuffer);
  864. configASSERT(ppvItem != NULL || xItemSize == 0);
  865. //currently only supported in NoSplit buffers
  866. configASSERT((pxRingbuffer->uxRingbufferFlags & (rbBYTE_BUFFER_FLAG | rbALLOW_SPLIT_FLAG)) == 0);
  867. *ppvItem = NULL;
  868. if (xItemSize > pxRingbuffer->xMaxItemSize) {
  869. return pdFALSE; //Data will never ever fit in the queue.
  870. }
  871. if ((pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) && xItemSize == 0) {
  872. return pdTRUE; //Sending 0 bytes to byte buffer has no effect
  873. }
  874. //Attempt to send an item
  875. BaseType_t xReturn = pdFALSE;
  876. BaseType_t xReturnSemaphore = pdFALSE;
  877. TickType_t xTicksEnd = xTaskGetTickCount() + xTicksToWait;
  878. TickType_t xTicksRemaining = xTicksToWait;
  879. while (xTicksRemaining <= xTicksToWait) { //xTicksToWait will underflow once xTaskGetTickCount() > ticks_end
  880. //Block until more free space becomes available or timeout
  881. if (xSemaphoreTake(rbGET_TX_SEM_HANDLE(pxRingbuffer), xTicksRemaining) != pdTRUE) {
  882. xReturn = pdFALSE;
  883. break;
  884. }
  885. //Semaphore obtained, check if item can fit
  886. portENTER_CRITICAL(&pxRingbuffer->mux);
  887. if(pxRingbuffer->xCheckItemFits(pxRingbuffer, xItemSize) == pdTRUE) {
  888. //Item will fit, copy item
  889. *ppvItem = prvAcquireItemNoSplit(pxRingbuffer, xItemSize);
  890. xReturn = pdTRUE;
  891. //Check if the free semaphore should be returned to allow other tasks to send
  892. if (prvGetFreeSize(pxRingbuffer) > 0) {
  893. xReturnSemaphore = pdTRUE;
  894. }
  895. portEXIT_CRITICAL(&pxRingbuffer->mux);
  896. break;
  897. }
  898. //Item doesn't fit, adjust ticks and take the semaphore again
  899. if (xTicksToWait != portMAX_DELAY) {
  900. xTicksRemaining = xTicksEnd - xTaskGetTickCount();
  901. }
  902. portEXIT_CRITICAL(&pxRingbuffer->mux);
  903. /*
  904. * Gap between critical section and re-acquiring of the semaphore. If
  905. * semaphore is given now, priority inversion might occur (see docs)
  906. */
  907. }
  908. if (xReturnSemaphore == pdTRUE) {
  909. xSemaphoreGive(rbGET_TX_SEM_HANDLE(pxRingbuffer)); //Give back semaphore so other tasks can acquire
  910. }
  911. return xReturn;
  912. }
  913. BaseType_t xRingbufferSendComplete(RingbufHandle_t xRingbuffer, void *pvItem)
  914. {
  915. //Check arguments
  916. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  917. configASSERT(pxRingbuffer);
  918. configASSERT(pvItem != NULL);
  919. configASSERT((pxRingbuffer->uxRingbufferFlags & (rbBYTE_BUFFER_FLAG | rbALLOW_SPLIT_FLAG)) == 0);
  920. portENTER_CRITICAL(&pxRingbuffer->mux);
  921. prvSendItemDoneNoSplit(pxRingbuffer, pvItem);
  922. portEXIT_CRITICAL(&pxRingbuffer->mux);
  923. xSemaphoreGive(rbGET_RX_SEM_HANDLE(pxRingbuffer));
  924. return pdTRUE;
  925. }
  926. BaseType_t xRingbufferSend(RingbufHandle_t xRingbuffer,
  927. const void *pvItem,
  928. size_t xItemSize,
  929. TickType_t xTicksToWait)
  930. {
  931. //Check arguments
  932. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  933. configASSERT(pxRingbuffer);
  934. configASSERT(pvItem != NULL || xItemSize == 0);
  935. if (xItemSize > pxRingbuffer->xMaxItemSize) {
  936. return pdFALSE; //Data will never ever fit in the queue.
  937. }
  938. if ((pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) && xItemSize == 0) {
  939. return pdTRUE; //Sending 0 bytes to byte buffer has no effect
  940. }
  941. //Attempt to send an item
  942. BaseType_t xReturn = pdFALSE;
  943. BaseType_t xReturnSemaphore = pdFALSE;
  944. TickType_t xTicksEnd = xTaskGetTickCount() + xTicksToWait;
  945. TickType_t xTicksRemaining = xTicksToWait;
  946. while (xTicksRemaining <= xTicksToWait) { //xTicksToWait will underflow once xTaskGetTickCount() > ticks_end
  947. //Block until more free space becomes available or timeout
  948. if (xSemaphoreTake(rbGET_TX_SEM_HANDLE(pxRingbuffer), xTicksRemaining) != pdTRUE) {
  949. xReturn = pdFALSE;
  950. break;
  951. }
  952. //Semaphore obtained, check if item can fit
  953. portENTER_CRITICAL(&pxRingbuffer->mux);
  954. if(pxRingbuffer->xCheckItemFits(pxRingbuffer, xItemSize) == pdTRUE) {
  955. //Item will fit, copy item
  956. pxRingbuffer->vCopyItem(pxRingbuffer, pvItem, xItemSize);
  957. xReturn = pdTRUE;
  958. //Check if the free semaphore should be returned to allow other tasks to send
  959. if (prvGetFreeSize(pxRingbuffer) > 0) {
  960. xReturnSemaphore = pdTRUE;
  961. }
  962. portEXIT_CRITICAL(&pxRingbuffer->mux);
  963. break;
  964. }
  965. //Item doesn't fit, adjust ticks and take the semaphore again
  966. if (xTicksToWait != portMAX_DELAY) {
  967. xTicksRemaining = xTicksEnd - xTaskGetTickCount();
  968. }
  969. portEXIT_CRITICAL(&pxRingbuffer->mux);
  970. /*
  971. * Gap between critical section and re-acquiring of the semaphore. If
  972. * semaphore is given now, priority inversion might occur (see docs)
  973. */
  974. }
  975. if (xReturnSemaphore == pdTRUE) {
  976. xSemaphoreGive(rbGET_TX_SEM_HANDLE(pxRingbuffer)); //Give back semaphore so other tasks can send
  977. }
  978. if (xReturn == pdTRUE) {
  979. //Indicate item was successfully sent
  980. xSemaphoreGive(rbGET_RX_SEM_HANDLE(pxRingbuffer));
  981. }
  982. return xReturn;
  983. }
  984. BaseType_t xRingbufferSendFromISR(RingbufHandle_t xRingbuffer,
  985. const void *pvItem,
  986. size_t xItemSize,
  987. BaseType_t *pxHigherPriorityTaskWoken)
  988. {
  989. //Check arguments
  990. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  991. configASSERT(pxRingbuffer);
  992. configASSERT(pvItem != NULL || xItemSize == 0);
  993. if (xItemSize > pxRingbuffer->xMaxItemSize) {
  994. return pdFALSE; //Data will never ever fit in the queue.
  995. }
  996. if ((pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) && xItemSize == 0) {
  997. return pdTRUE; //Sending 0 bytes to byte buffer has no effect
  998. }
  999. //Attempt to send an item
  1000. BaseType_t xReturn;
  1001. BaseType_t xReturnSemaphore = pdFALSE;
  1002. portENTER_CRITICAL_ISR(&pxRingbuffer->mux);
  1003. if (pxRingbuffer->xCheckItemFits(xRingbuffer, xItemSize) == pdTRUE) {
  1004. pxRingbuffer->vCopyItem(xRingbuffer, pvItem, xItemSize);
  1005. xReturn = pdTRUE;
  1006. //Check if the free semaphore should be returned to allow other tasks to send
  1007. if (prvGetFreeSize(pxRingbuffer) > 0) {
  1008. xReturnSemaphore = pdTRUE;
  1009. }
  1010. } else {
  1011. xReturn = pdFALSE;
  1012. }
  1013. portEXIT_CRITICAL_ISR(&pxRingbuffer->mux);
  1014. if (xReturnSemaphore == pdTRUE) {
  1015. xSemaphoreGiveFromISR(rbGET_TX_SEM_HANDLE(pxRingbuffer), pxHigherPriorityTaskWoken); //Give back semaphore so other tasks can send
  1016. }
  1017. if (xReturn == pdTRUE) {
  1018. //Indicate item was successfully sent
  1019. xSemaphoreGiveFromISR(rbGET_RX_SEM_HANDLE(pxRingbuffer), pxHigherPriorityTaskWoken);
  1020. }
  1021. return xReturn;
  1022. }
  1023. void *xRingbufferReceive(RingbufHandle_t xRingbuffer, size_t *pxItemSize, TickType_t xTicksToWait)
  1024. {
  1025. //Check arguments
  1026. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1027. configASSERT(pxRingbuffer);
  1028. //Attempt to retrieve an item
  1029. void *pvTempItem;
  1030. size_t xTempSize;
  1031. if (prvReceiveGeneric(pxRingbuffer, &pvTempItem, NULL, &xTempSize, NULL, 0, xTicksToWait) == pdTRUE) {
  1032. if (pxItemSize != NULL) {
  1033. *pxItemSize = xTempSize;
  1034. }
  1035. return pvTempItem;
  1036. } else {
  1037. return NULL;
  1038. }
  1039. }
  1040. void *xRingbufferReceiveFromISR(RingbufHandle_t xRingbuffer, size_t *pxItemSize)
  1041. {
  1042. //Check arguments
  1043. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1044. configASSERT(pxRingbuffer);
  1045. //Attempt to retrieve an item
  1046. void *pvTempItem;
  1047. size_t xTempSize;
  1048. if (prvReceiveGenericFromISR(pxRingbuffer, &pvTempItem, NULL, &xTempSize, NULL, 0) == pdTRUE) {
  1049. if (pxItemSize != NULL) {
  1050. *pxItemSize = xTempSize;
  1051. }
  1052. return pvTempItem;
  1053. } else {
  1054. return NULL;
  1055. }
  1056. }
  1057. BaseType_t xRingbufferReceiveSplit(RingbufHandle_t xRingbuffer,
  1058. void **ppvHeadItem,
  1059. void **ppvTailItem,
  1060. size_t *pxHeadItemSize,
  1061. size_t *pxTailItemSize,
  1062. TickType_t xTicksToWait)
  1063. {
  1064. //Check arguments
  1065. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1066. configASSERT(pxRingbuffer);
  1067. configASSERT(pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG);
  1068. configASSERT(ppvHeadItem != NULL && ppvTailItem != NULL);
  1069. //Attempt to retrieve multiple items
  1070. void *pvTempHeadItem, *pvTempTailItem;
  1071. size_t xTempHeadSize, xTempTailSize;
  1072. if (prvReceiveGeneric(pxRingbuffer, &pvTempHeadItem, &pvTempTailItem, &xTempHeadSize, &xTempTailSize, 0, xTicksToWait) == pdTRUE) {
  1073. //At least one item was retrieved
  1074. *ppvHeadItem = pvTempHeadItem;
  1075. if(pxHeadItemSize != NULL){
  1076. *pxHeadItemSize = xTempHeadSize;
  1077. }
  1078. //Check to see if a second item was also retrieved
  1079. if (pvTempTailItem != NULL) {
  1080. *ppvTailItem = pvTempTailItem;
  1081. if (pxTailItemSize != NULL) {
  1082. *pxTailItemSize = xTempTailSize;
  1083. }
  1084. } else {
  1085. *ppvTailItem = NULL;
  1086. }
  1087. return pdTRUE;
  1088. } else {
  1089. //No items retrieved
  1090. *ppvHeadItem = NULL;
  1091. *ppvTailItem = NULL;
  1092. return pdFALSE;
  1093. }
  1094. }
  1095. BaseType_t xRingbufferReceiveSplitFromISR(RingbufHandle_t xRingbuffer,
  1096. void **ppvHeadItem,
  1097. void **ppvTailItem,
  1098. size_t *pxHeadItemSize,
  1099. size_t *pxTailItemSize)
  1100. {
  1101. //Check arguments
  1102. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1103. configASSERT(pxRingbuffer);
  1104. configASSERT(pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG);
  1105. configASSERT(ppvHeadItem != NULL && ppvTailItem != NULL);
  1106. //Attempt to retrieve multiple items
  1107. void *pvTempHeadItem = NULL, *pvTempTailItem = NULL;
  1108. size_t xTempHeadSize, xTempTailSize;
  1109. if (prvReceiveGenericFromISR(pxRingbuffer, &pvTempHeadItem, &pvTempTailItem, &xTempHeadSize, &xTempTailSize, 0) == pdTRUE) {
  1110. //At least one item was received
  1111. *ppvHeadItem = pvTempHeadItem;
  1112. if (pxHeadItemSize != NULL) {
  1113. *pxHeadItemSize = xTempHeadSize;
  1114. }
  1115. //Check to see if a second item was also retrieved
  1116. if (pvTempTailItem != NULL) {
  1117. *ppvTailItem = pvTempTailItem;
  1118. if (pxTailItemSize != NULL) {
  1119. *pxTailItemSize = xTempTailSize;
  1120. }
  1121. } else {
  1122. *ppvTailItem = NULL;
  1123. }
  1124. return pdTRUE;
  1125. } else {
  1126. *ppvHeadItem = NULL;
  1127. *ppvTailItem = NULL;
  1128. return pdFALSE;
  1129. }
  1130. }
  1131. void *xRingbufferReceiveUpTo(RingbufHandle_t xRingbuffer,
  1132. size_t *pxItemSize,
  1133. TickType_t xTicksToWait,
  1134. size_t xMaxSize)
  1135. {
  1136. //Check arguments
  1137. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1138. configASSERT(pxRingbuffer);
  1139. configASSERT(pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG); //This function should only be called for byte buffers
  1140. if (xMaxSize == 0) {
  1141. return NULL;
  1142. }
  1143. //Attempt to retrieve up to xMaxSize bytes
  1144. void *pvTempItem;
  1145. size_t xTempSize;
  1146. if (prvReceiveGeneric(pxRingbuffer, &pvTempItem, NULL, &xTempSize, NULL, xMaxSize, xTicksToWait) == pdTRUE) {
  1147. if (pxItemSize != NULL) {
  1148. *pxItemSize = xTempSize;
  1149. }
  1150. return pvTempItem;
  1151. } else {
  1152. return NULL;
  1153. }
  1154. }
  1155. void *xRingbufferReceiveUpToFromISR(RingbufHandle_t xRingbuffer, size_t *pxItemSize, size_t xMaxSize)
  1156. {
  1157. //Check arguments
  1158. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1159. configASSERT(pxRingbuffer);
  1160. configASSERT(pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG); //This function should only be called for byte buffers
  1161. if (xMaxSize == 0) {
  1162. return NULL;
  1163. }
  1164. //Attempt to retrieve up to xMaxSize bytes
  1165. void *pvTempItem;
  1166. size_t xTempSize;
  1167. if (prvReceiveGenericFromISR(pxRingbuffer, &pvTempItem, NULL, &xTempSize, NULL, xMaxSize) == pdTRUE) {
  1168. if (pxItemSize != NULL) {
  1169. *pxItemSize = xTempSize;
  1170. }
  1171. return pvTempItem;
  1172. } else {
  1173. return NULL;
  1174. }
  1175. }
  1176. void vRingbufferReturnItem(RingbufHandle_t xRingbuffer, void *pvItem)
  1177. {
  1178. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1179. configASSERT(pxRingbuffer);
  1180. configASSERT(pvItem != NULL);
  1181. portENTER_CRITICAL(&pxRingbuffer->mux);
  1182. pxRingbuffer->vReturnItem(pxRingbuffer, (uint8_t *)pvItem);
  1183. portEXIT_CRITICAL(&pxRingbuffer->mux);
  1184. xSemaphoreGive(rbGET_TX_SEM_HANDLE(pxRingbuffer));
  1185. }
  1186. void vRingbufferReturnItemFromISR(RingbufHandle_t xRingbuffer, void *pvItem, BaseType_t *pxHigherPriorityTaskWoken)
  1187. {
  1188. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1189. configASSERT(pxRingbuffer);
  1190. configASSERT(pvItem != NULL);
  1191. portENTER_CRITICAL_ISR(&pxRingbuffer->mux);
  1192. pxRingbuffer->vReturnItem(pxRingbuffer, (uint8_t *)pvItem);
  1193. portEXIT_CRITICAL_ISR(&pxRingbuffer->mux);
  1194. xSemaphoreGiveFromISR(rbGET_TX_SEM_HANDLE(pxRingbuffer), pxHigherPriorityTaskWoken);
  1195. }
  1196. void vRingbufferDelete(RingbufHandle_t xRingbuffer)
  1197. {
  1198. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1199. configASSERT(pxRingbuffer);
  1200. vSemaphoreDelete(rbGET_TX_SEM_HANDLE(pxRingbuffer));
  1201. vSemaphoreDelete(rbGET_RX_SEM_HANDLE(pxRingbuffer));
  1202. #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
  1203. if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_STATIC_FLAG) {
  1204. //Ring buffer was statically allocated, no need to free
  1205. return;
  1206. }
  1207. #endif
  1208. free(pxRingbuffer->pucHead);
  1209. free(pxRingbuffer);
  1210. }
  1211. size_t xRingbufferGetMaxItemSize(RingbufHandle_t xRingbuffer)
  1212. {
  1213. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1214. configASSERT(pxRingbuffer);
  1215. return pxRingbuffer->xMaxItemSize;
  1216. }
  1217. size_t xRingbufferGetCurFreeSize(RingbufHandle_t xRingbuffer)
  1218. {
  1219. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1220. configASSERT(pxRingbuffer);
  1221. size_t xFreeSize;
  1222. portENTER_CRITICAL(&pxRingbuffer->mux);
  1223. xFreeSize = pxRingbuffer->xGetCurMaxSize(pxRingbuffer);
  1224. portEXIT_CRITICAL(&pxRingbuffer->mux);
  1225. return xFreeSize;
  1226. }
  1227. BaseType_t xRingbufferAddToQueueSetRead(RingbufHandle_t xRingbuffer, QueueSetHandle_t xQueueSet)
  1228. {
  1229. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1230. configASSERT(pxRingbuffer);
  1231. BaseType_t xReturn;
  1232. portENTER_CRITICAL(&pxRingbuffer->mux);
  1233. //Cannot add semaphore to queue set if semaphore is not empty. Temporarily hold semaphore
  1234. BaseType_t xHoldSemaphore = xSemaphoreTake(rbGET_RX_SEM_HANDLE(pxRingbuffer), 0);
  1235. xReturn = xQueueAddToSet(rbGET_RX_SEM_HANDLE(pxRingbuffer), xQueueSet);
  1236. if (xHoldSemaphore == pdTRUE) {
  1237. //Return semaphore if temporarily held
  1238. configASSERT(xSemaphoreGive(rbGET_RX_SEM_HANDLE(pxRingbuffer)) == pdTRUE);
  1239. }
  1240. portEXIT_CRITICAL(&pxRingbuffer->mux);
  1241. return xReturn;
  1242. }
  1243. BaseType_t xRingbufferCanRead(RingbufHandle_t xRingbuffer, QueueSetMemberHandle_t xMember)
  1244. {
  1245. //Check if the selected queue set member is the ring buffer's read semaphore
  1246. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1247. configASSERT(pxRingbuffer);
  1248. return (rbGET_RX_SEM_HANDLE(pxRingbuffer) == xMember) ? pdTRUE : pdFALSE;
  1249. }
  1250. BaseType_t xRingbufferRemoveFromQueueSetRead(RingbufHandle_t xRingbuffer, QueueSetHandle_t xQueueSet)
  1251. {
  1252. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1253. configASSERT(pxRingbuffer);
  1254. BaseType_t xReturn;
  1255. portENTER_CRITICAL(&pxRingbuffer->mux);
  1256. //Cannot remove semaphore from queue set if semaphore is not empty. Temporarily hold semaphore
  1257. BaseType_t xHoldSemaphore = xSemaphoreTake(rbGET_RX_SEM_HANDLE(pxRingbuffer), 0);
  1258. xReturn = xQueueRemoveFromSet(rbGET_RX_SEM_HANDLE(pxRingbuffer), xQueueSet);
  1259. if (xHoldSemaphore == pdTRUE) {
  1260. //Return semaphore if temporarily held
  1261. configASSERT(xSemaphoreGive(rbGET_RX_SEM_HANDLE(pxRingbuffer)) == pdTRUE);
  1262. }
  1263. portEXIT_CRITICAL(&pxRingbuffer->mux);
  1264. return xReturn;
  1265. }
  1266. void vRingbufferGetInfo(RingbufHandle_t xRingbuffer,
  1267. UBaseType_t *uxFree,
  1268. UBaseType_t *uxRead,
  1269. UBaseType_t *uxWrite,
  1270. UBaseType_t *uxAcquire,
  1271. UBaseType_t *uxItemsWaiting)
  1272. {
  1273. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1274. configASSERT(pxRingbuffer);
  1275. portENTER_CRITICAL(&pxRingbuffer->mux);
  1276. if (uxFree != NULL) {
  1277. *uxFree = (UBaseType_t)(pxRingbuffer->pucFree - pxRingbuffer->pucHead);
  1278. }
  1279. if (uxRead != NULL) {
  1280. *uxRead = (UBaseType_t)(pxRingbuffer->pucRead - pxRingbuffer->pucHead);
  1281. }
  1282. if (uxWrite != NULL) {
  1283. *uxWrite = (UBaseType_t)(pxRingbuffer->pucWrite - pxRingbuffer->pucHead);
  1284. }
  1285. if (uxAcquire != NULL) {
  1286. *uxAcquire = (UBaseType_t)(pxRingbuffer->pucAcquire - pxRingbuffer->pucHead);
  1287. }
  1288. if (uxItemsWaiting != NULL) {
  1289. *uxItemsWaiting = (UBaseType_t)(pxRingbuffer->xItemsWaiting);
  1290. }
  1291. portEXIT_CRITICAL(&pxRingbuffer->mux);
  1292. }
  1293. void xRingbufferPrintInfo(RingbufHandle_t xRingbuffer)
  1294. {
  1295. Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
  1296. configASSERT(pxRingbuffer);
  1297. printf("Rb size:%d\tfree: %d\trptr: %d\tfreeptr: %d\twptr: %d, aptr: %d\n",
  1298. pxRingbuffer->xSize, prvGetFreeSize(pxRingbuffer),
  1299. pxRingbuffer->pucRead - pxRingbuffer->pucHead,
  1300. pxRingbuffer->pucFree - pxRingbuffer->pucHead,
  1301. pxRingbuffer->pucWrite - pxRingbuffer->pucHead,
  1302. pxRingbuffer->pucAcquire - pxRingbuffer->pucHead);
  1303. }