|
|
@@ -1,4 +1,4 @@
|
|
|
-// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
|
|
|
+// Copyright 2015-2018 Espressif Systems (Shanghai) PTE LTD
|
|
|
//
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
@@ -12,776 +12,1173 @@
|
|
|
// See the License for the specific language governing permissions and
|
|
|
// limitations under the License.
|
|
|
|
|
|
-#include "freertos/FreeRTOS.h"
|
|
|
-#include "freertos/task.h"
|
|
|
-#include "freertos/semphr.h"
|
|
|
-#include "freertos/queue.h"
|
|
|
-#include "freertos/xtensa_api.h"
|
|
|
-#include "freertos/ringbuf.h"
|
|
|
-#include "esp_attr.h"
|
|
|
-#include <stdint.h>
|
|
|
-#include <string.h>
|
|
|
#include <stdlib.h>
|
|
|
-#include <stdio.h>
|
|
|
-
|
|
|
-typedef enum {
|
|
|
- flag_allowsplit = 1,
|
|
|
- flag_bytebuf = 2,
|
|
|
-} rbflag_t;
|
|
|
-
|
|
|
-typedef enum {
|
|
|
- iflag_free = 1, //Buffer is not read and given back by application, free to overwrite
|
|
|
- iflag_dummydata = 2, //Data from here to end of ringbuffer is dummy. Restart reading at start of ringbuffer.
|
|
|
- iflag_wrap = 4, //Valid for RINGBUF_TYPE_ALLOWSPLIT, indicating that rest of the data is wrapped around
|
|
|
-} itemflag_t;
|
|
|
-
|
|
|
-
|
|
|
-typedef struct ringbuf_t ringbuf_t;
|
|
|
-
|
|
|
-//The ringbuffer structure
|
|
|
-struct ringbuf_t {
|
|
|
- SemaphoreHandle_t free_space_sem; //Binary semaphore, wakes up writing threads when there's more free space
|
|
|
- SemaphoreHandle_t items_buffered_sem; //Binary semaphore, indicates there are new packets in the circular buffer. See remark.
|
|
|
- size_t size; //Size of the data storage
|
|
|
- uint8_t *write_ptr; //Pointer where the next item is written
|
|
|
- uint8_t *read_ptr; //Pointer from where the next item is read
|
|
|
- uint8_t *free_ptr; //Pointer to the last block that hasn't been given back to the ringbuffer yet
|
|
|
- uint8_t *data; //Data storage
|
|
|
- portMUX_TYPE mux; //Spinlock for actual data/ptr/struct modification
|
|
|
- rbflag_t flags;
|
|
|
- size_t maxItemSize;
|
|
|
- //The following keep function pointers to hold different implementations for ringbuffer management.
|
|
|
- BaseType_t (*copyItemToRingbufImpl)(ringbuf_t *rb, uint8_t *buffer, size_t buffer_size);
|
|
|
- uint8_t *(*getItemFromRingbufImpl)(ringbuf_t *rb, size_t *length, int wanted_length);
|
|
|
- void (*returnItemToRingbufImpl)(ringbuf_t *rb, void *item);
|
|
|
- size_t (*getFreeSizeImpl)(ringbuf_t *rb);
|
|
|
-};
|
|
|
+#include <string.h>
|
|
|
+#include "FreeRTOS.h"
|
|
|
+#include "task.h"
|
|
|
+#include "semphr.h"
|
|
|
+#include "ringbuf.h"
|
|
|
+
|
|
|
+//32-bit alignment macros
|
|
|
+#define rbALIGN_SIZE( xSize ) ( ( xSize + portBYTE_ALIGNMENT_MASK ) & ~portBYTE_ALIGNMENT_MASK )
|
|
|
+#define rbCHECK_ALIGNED( pvPtr ) ( ( ( UBaseType_t ) pvPtr & portBYTE_ALIGNMENT_MASK ) == 0 )
|
|
|
|
|
|
+//Ring buffer flags
|
|
|
+#define rbALLOW_SPLIT_FLAG ( ( UBaseType_t ) 1 ) //The ring buffer allows items to be split
|
|
|
+#define rbBYTE_BUFFER_FLAG ( ( UBaseType_t ) 2 ) //The ring buffer is a byte buffer
|
|
|
+#define rbBUFFER_FULL_FLAG ( ( UBaseType_t ) 4 ) //The ring buffer is currently full (write pointer == free pointer)
|
|
|
|
|
|
+//Item flags
|
|
|
+#define rbITEM_FREE_FLAG ( ( UBaseType_t ) 1 ) //Item has been retrieved and returned by application, free to overwrite
|
|
|
+#define rbITEM_DUMMY_DATA_FLAG ( ( UBaseType_t ) 2 ) //Data from here to end of the ring buffer is dummy data. Restart reading at start of head of the buffer
|
|
|
+#define rbITEM_SPLIT_FLAG ( ( UBaseType_t ) 4 ) //Valid for RINGBUF_TYPE_ALLOWSPLIT, indicating that rest of the data is wrapped around
|
|
|
+
|
|
|
+typedef struct {
|
|
|
+ //This size of this structure must be 32-bit aligned
|
|
|
+ size_t xItemLen;
|
|
|
+ UBaseType_t uxItemFlags;
|
|
|
+} ItemHeader_t;
|
|
|
+
|
|
|
+#define rbHEADER_SIZE sizeof(ItemHeader_t)
|
|
|
+typedef struct Ringbuffer_t Ringbuffer_t;
|
|
|
+typedef BaseType_t (*CheckItemFitsFunction_t)(Ringbuffer_t *pxRingbuffer, size_t xItemSize);
|
|
|
+typedef void (*CopyItemFunction_t)(Ringbuffer_t *pxRingbuffer, const uint8_t *pcItem, size_t xItemSize);
|
|
|
+typedef BaseType_t (*CheckItemAvailFunction_t) (Ringbuffer_t *pxRingbuffer);
|
|
|
+typedef void *(*GetItemFunction_t)(Ringbuffer_t *pxRingbuffer, BaseType_t *pxIsSplit, size_t xMaxSize, size_t *pxItemSize);
|
|
|
+typedef void (*ReturnItemFunction_t)(Ringbuffer_t *pxRingbuffer, uint8_t *pvItem);
|
|
|
+typedef size_t (*GetCurMaxSizeFunction_t)(Ringbuffer_t *pxRingbuffer);
|
|
|
+
|
|
|
+struct Ringbuffer_t {
|
|
|
+ size_t xSize; //Size of the data storage
|
|
|
+ UBaseType_t uxRingbufferFlags; //Flags to indicate the type and status of ring buffer
|
|
|
+ size_t xMaxItemSize; //Maximum item size
|
|
|
+
|
|
|
+ CheckItemFitsFunction_t xCheckItemFits; //Function to check if item can currently fit in ring buffer
|
|
|
+ CopyItemFunction_t vCopyItem; //Function to copy item to ring buffer
|
|
|
+ GetItemFunction_t pvGetItem; //Function to get item from ring buffer
|
|
|
+ ReturnItemFunction_t vReturnItem; //Function to return item to ring buffer
|
|
|
+ GetCurMaxSizeFunction_t xGetCurMaxSize; //Function to get current free size
|
|
|
+
|
|
|
+ uint8_t *pucWrite; //Write Pointer. Points to where the next item should be written
|
|
|
+ uint8_t *pucRead; //Read Pointer. Points to where the next item should be read from
|
|
|
+ uint8_t *pucFree; //Free Pointer. Points to the last item that has yet to be returned to the ring buffer
|
|
|
+ uint8_t *pucHead; //Pointer to the start of the ring buffer storage area
|
|
|
+ uint8_t *pucTail; //Pointer to the end of the ring buffer storage area
|
|
|
+
|
|
|
+ BaseType_t xItemsWaiting; //Number of items/bytes(for byte buffers) currently in ring buffer that have not yet been read
|
|
|
+ SemaphoreHandle_t xFreeSpaceSemaphore; //Binary semaphore, wakes up writing threads when more free space becomes available or when another thread times out attempting to write
|
|
|
+ SemaphoreHandle_t xItemsBufferedSemaphore; //Binary semaphore, indicates there are new packets in the circular buffer. See remark.
|
|
|
+ portMUX_TYPE mux; //Spinlock required for SMP
|
|
|
+};
|
|
|
|
|
|
/*
|
|
|
Remark: A counting semaphore for items_buffered_sem would be more logical, but counting semaphores in
|
|
|
FreeRTOS need a maximum count, and allocate more memory the larger the maximum count is. Here, we
|
|
|
-would need to set the maximum to the maximum amount of times a null-byte unit firs in the buffer,
|
|
|
+would need to set the maximum to the maximum amount of times a null-byte unit first in the buffer,
|
|
|
which is quite high and so would waste a fair amount of memory.
|
|
|
*/
|
|
|
|
|
|
+/* ------------------------------------------------ Static Declarations ------------------------------------------ */
|
|
|
+/*
|
|
|
+ * WARNING: All of the following static functions (except generic functions)
|
|
|
+ * ARE NOT THREAD SAFE. Therefore they should only be called within a critical
|
|
|
+ * section (using spin locks)
|
|
|
+ */
|
|
|
|
|
|
-//The header prepended to each ringbuffer entry. Size is assumed to be a multiple of 32bits.
|
|
|
-typedef struct {
|
|
|
- size_t len;
|
|
|
- itemflag_t flags;
|
|
|
-} buf_entry_hdr_t;
|
|
|
-
|
|
|
-
|
|
|
-//Calculate space free in the buffer
|
|
|
-static int ringbufferFreeMem(ringbuf_t *rb)
|
|
|
-{
|
|
|
- int free_size = rb->free_ptr-rb->write_ptr;
|
|
|
- if (free_size <= 0) free_size += rb->size;
|
|
|
- //Reserve one byte. If we do not do this and the entire buffer is filled, we get a situation
|
|
|
- //where read_ptr == free_ptr, messing up the next calculation.
|
|
|
- return free_size-1;
|
|
|
-}
|
|
|
-
|
|
|
-//Copies a single item to the ring buffer; refuses to split items. Assumes there is space in the ringbuffer and
|
|
|
-//the ringbuffer is locked. Increases write_ptr to the next item. Returns pdTRUE on
|
|
|
-//success, pdFALSE if it can't make the item fit and the calling routine needs to retry
|
|
|
-//later or fail.
|
|
|
-//This function by itself is not threadsafe, always call from within a muxed section.
|
|
|
-static BaseType_t copyItemToRingbufNoSplit(ringbuf_t *rb, uint8_t *buffer, size_t buffer_size)
|
|
|
-{
|
|
|
- size_t rbuffer_size;
|
|
|
- rbuffer_size=(buffer_size+3)&~3; //Payload length, rounded to next 32-bit value
|
|
|
- configASSERT(((int)rb->write_ptr&3)==0); //write_ptr needs to be 32-bit aligned
|
|
|
- configASSERT(rb->write_ptr-(rb->data+rb->size) >= sizeof(buf_entry_hdr_t)); //need to have at least the size
|
|
|
- //of a header to the end of the ringbuff
|
|
|
- size_t rem_len=(rb->data + rb->size) - rb->write_ptr; //length remaining until end of ringbuffer
|
|
|
-
|
|
|
- //See if we have enough contiguous space to write the buffer.
|
|
|
- if (rem_len < rbuffer_size + sizeof(buf_entry_hdr_t)) {
|
|
|
- //Buffer plus header is not going to fit in the room from wr_pos to the end of the
|
|
|
- //ringbuffer... but we're not allowed to split the buffer. We need to fill the
|
|
|
- //rest of the ringbuffer with a dummy item so we can place the data at the _start_ of
|
|
|
- //the ringbuffer..
|
|
|
- //First, find out if we actually have enough space at the start of the ringbuffer to
|
|
|
- //make this work (Again, we need 4 bytes extra because otherwise read_ptr==free_ptr)
|
|
|
- if (rb->free_ptr-rb->data < rbuffer_size+sizeof(buf_entry_hdr_t)+4) {
|
|
|
- //Will not fit.
|
|
|
- return pdFALSE;
|
|
|
- }
|
|
|
- //If the read buffer hasn't wrapped around yet, there's no way this will work either.
|
|
|
- if (rb->free_ptr > rb->write_ptr) {
|
|
|
- //No luck.
|
|
|
- return pdFALSE;
|
|
|
- }
|
|
|
+//Calculate current amount of free space (in bytes) in the ring buffer
|
|
|
+static size_t prvGetFreeSize(Ringbuffer_t *pxRingbuffer);
|
|
|
+
|
|
|
+//Checks if an item/data is currently available for retrieval
|
|
|
+static BaseType_t prvCheckItemAvail(Ringbuffer_t *pxRingbuffer);
|
|
|
+
|
|
|
+//Checks if an item will currently fit in a no-split/allow-split ring buffer
|
|
|
+static BaseType_t prvCheckItemFitsDefault( Ringbuffer_t *pxRingbuffer, size_t xItemSize);
|
|
|
+
|
|
|
+//Checks if an item will currently fit in a byte buffer
|
|
|
+static BaseType_t prvCheckItemFitsByteBuffer( Ringbuffer_t *pxRingbuffer, size_t xItemSize);
|
|
|
+
|
|
|
+//Copies an item to a no-split ring buffer. Only call this function after calling prvCheckItemFitsDefault()
|
|
|
+static void prvCopyItemNoSplit(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize);
|
|
|
+
|
|
|
+//Copies an item to a allow-split ring buffer. Only call this function after calling prvCheckItemFitsDefault()
|
|
|
+static void prvCopyItemAllowSplit(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize);
|
|
|
+
|
|
|
+//Copies an item to a byte buffer. Only call this function after calling prvCheckItemFitsByteBuffer()
|
|
|
+static void prvCopyItemByteBuf(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize);
|
|
|
+
|
|
|
+//Retrieve item from no-split/allow-split ring buffer. *pxIsSplit is set to pdTRUE if the retrieved item is split
|
|
|
+static void *prvGetItemDefault(Ringbuffer_t *pxRingbuffer, BaseType_t *pxIsSplit, size_t xUnusedParam, size_t *pxItemSize);
|
|
|
+
|
|
|
+//Retrieve data from byte buffer. If xMaxSize is 0, all continuous data is retrieved
|
|
|
+static void *prvGetItemByteBuf(Ringbuffer_t *pxRingbuffer, BaseType_t *pxUnusedParam ,size_t xMaxSize, size_t *pxItemSize);
|
|
|
+
|
|
|
+//Return an item to a split/no-split ring buffer
|
|
|
+static void prvReturnItemDefault(Ringbuffer_t *pxRingbuffer, uint8_t *pucItem);
|
|
|
+
|
|
|
+//Return data to a byte buffer
|
|
|
+static void prvReturnItemByteBuf(Ringbuffer_t *pxRingbuffer, uint8_t *pucItem);
|
|
|
+
|
|
|
+//Get the maximum size an item that can currently have if sent to a no-split ring buffer
|
|
|
+static size_t prvGetCurMaxSizeNoSplit(Ringbuffer_t *pxRingbuffer);
|
|
|
+
|
|
|
+//Get the maximum size an item that can currently have if sent to a allow-split ring buffer
|
|
|
+static size_t prvGetCurMaxSizeAllowSplit(Ringbuffer_t *pxRingbuffer);
|
|
|
+
|
|
|
+//Get the maximum size an item that can currently have if sent to a byte buffer
|
|
|
+static size_t prvGetCurMaxSizeByteBuf(Ringbuffer_t *pxRingbuffer);
|
|
|
+
|
|
|
+/**
|
|
|
+ * Generic function used to retrieve an item/data from ring buffers. If called on
|
|
|
+ * an allow-split buffer, and pvItem2 and xItemSize2 are not NULL, both parts of
|
|
|
+ * a split item will be retrieved. xMaxSize will only take effect if called on
|
|
|
+ * byte buffers.
|
|
|
+ */
|
|
|
+static BaseType_t prvReceiveGeneric(Ringbuffer_t *pxRingbuffer, void **pvItem1, void **pvItem2, size_t *xItemSize1, size_t *xItemSize2, size_t xMaxSize, TickType_t xTicksToWait);
|
|
|
+
|
|
|
+//Generic function used to retrieve an item/data from ring buffers in an ISR
|
|
|
+static BaseType_t prvReceiveGenericFromISR(Ringbuffer_t *pxRingbuffer, void **pvItem1, void **pvItem2, size_t *xItemSize1, size_t *xItemSize2, size_t xMaxSize);
|
|
|
+
|
|
|
+/* ------------------------------------------------ Static Definitions ------------------------------------------- */
|
|
|
|
|
|
- //Okay, it will fit. Mark the rest of the ringbuffer space with a dummy packet.
|
|
|
- buf_entry_hdr_t *hdr=(buf_entry_hdr_t *)rb->write_ptr;
|
|
|
- hdr->flags=iflag_dummydata;
|
|
|
- //Reset the write pointer to the start of the ringbuffer so the code later on can
|
|
|
- //happily write the data.
|
|
|
- rb->write_ptr=rb->data;
|
|
|
+static size_t prvGetFreeSize(Ringbuffer_t *pxRingbuffer)
|
|
|
+{
|
|
|
+ size_t xReturn;
|
|
|
+ if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
|
|
|
+ xReturn = 0;
|
|
|
} else {
|
|
|
- //No special handling needed. Checking if it's gonna fit probably still is a good idea.
|
|
|
- if (ringbufferFreeMem(rb) < sizeof(buf_entry_hdr_t)+rbuffer_size) {
|
|
|
- //Buffer is not going to fit, period.
|
|
|
- return pdFALSE;
|
|
|
+ BaseType_t xFreeSize = pxRingbuffer->pucFree - pxRingbuffer->pucWrite;
|
|
|
+ //Check if xFreeSize has underflowed
|
|
|
+ if (xFreeSize <= 0) {
|
|
|
+ xFreeSize += pxRingbuffer->xSize;
|
|
|
}
|
|
|
+ xReturn = xFreeSize;
|
|
|
}
|
|
|
+ configASSERT(xReturn <= pxRingbuffer->xSize);
|
|
|
+ return xReturn;
|
|
|
+}
|
|
|
|
|
|
- //If we are here, the buffer is guaranteed to fit in the space starting at the write pointer.
|
|
|
- buf_entry_hdr_t *hdr=(buf_entry_hdr_t *)rb->write_ptr;
|
|
|
- hdr->len=buffer_size;
|
|
|
- hdr->flags=0;
|
|
|
- rb->write_ptr+=sizeof(buf_entry_hdr_t);
|
|
|
- memcpy(rb->write_ptr, buffer, buffer_size);
|
|
|
- rb->write_ptr+=rbuffer_size;
|
|
|
-
|
|
|
- //The buffer will wrap around if we don't have room for a header anymore.
|
|
|
- if ((rb->data+rb->size)-rb->write_ptr < sizeof(buf_entry_hdr_t)) {
|
|
|
- //'Forward' the write buffer until we are at the start of the ringbuffer.
|
|
|
- //The read pointer will always be at the start of a full header, which cannot
|
|
|
- //exist at the point of the current write pointer, so there's no chance of overtaking
|
|
|
- //that.
|
|
|
- rb->write_ptr=rb->data;
|
|
|
- }
|
|
|
- return pdTRUE;
|
|
|
-}
|
|
|
-
|
|
|
-//Copies a single item to the ring buffer; allows split items. Assumes there is space in the ringbuffer and
|
|
|
-//the ringbuffer is locked. Increases write_ptr to the next item. Returns pdTRUE on
|
|
|
-//success, pdFALSE if it can't make the item fit and the calling routine needs to retry
|
|
|
-//later or fail.
|
|
|
-//This function by itself is not threadsafe, always call from within a muxed section.
|
|
|
-static BaseType_t copyItemToRingbufAllowSplit(ringbuf_t *rb, uint8_t *buffer, size_t buffer_size)
|
|
|
-{
|
|
|
- size_t rbuffer_size;
|
|
|
- rbuffer_size=(buffer_size+3)&~3; //Payload length, rounded to next 32-bit value
|
|
|
- configASSERT(((int)rb->write_ptr&3)==0); //write_ptr needs to be 32-bit aligned
|
|
|
- configASSERT(rb->write_ptr-(rb->data+rb->size) >= sizeof(buf_entry_hdr_t)); //need to have at least the size
|
|
|
- //of a header to the end of the ringbuff
|
|
|
- size_t rem_len=(rb->data + rb->size) - rb->write_ptr; //length remaining until end of ringbuffer
|
|
|
-
|
|
|
- //See if we have enough contiguous space to write the buffer.
|
|
|
- if (rem_len < rbuffer_size + sizeof(buf_entry_hdr_t)) {
|
|
|
- //The buffer can't be contiguously written to the ringbuffer, but needs special handling. Do
|
|
|
- //that depending on how the ringbuffer is configured.
|
|
|
- //The code here is also expected to check if the buffer, mangled in whatever way is implemented,
|
|
|
- //will still fit, and return pdFALSE if that is not the case.
|
|
|
- //Buffer plus header is not going to fit in the room from wr_pos to the end of the
|
|
|
- //ringbuffer... we need to split the write in two.
|
|
|
- //First, see if this will fit at all.
|
|
|
- if (ringbufferFreeMem(rb) < (sizeof(buf_entry_hdr_t)*2)+rbuffer_size) {
|
|
|
- //Will not fit.
|
|
|
- return pdFALSE;
|
|
|
- }
|
|
|
- //Because the code at the end of the function makes sure we always have
|
|
|
- //room for a header, this should never assert.
|
|
|
- configASSERT(rem_len>=sizeof(buf_entry_hdr_t));
|
|
|
- //Okay, it should fit. Write everything.
|
|
|
- //First, place bit of buffer that does fit. Write header first...
|
|
|
- buf_entry_hdr_t *hdr=(buf_entry_hdr_t *)rb->write_ptr;
|
|
|
- hdr->flags=0;
|
|
|
- hdr->len=rem_len-sizeof(buf_entry_hdr_t);
|
|
|
- rb->write_ptr+=sizeof(buf_entry_hdr_t);
|
|
|
- rem_len-=sizeof(buf_entry_hdr_t);
|
|
|
- if (rem_len!=0) {
|
|
|
- //..then write the data bit that fits.
|
|
|
- memcpy(rb->write_ptr, buffer, rem_len);
|
|
|
- //Update vars so the code later on will write the rest of the data.
|
|
|
- buffer+=rem_len;
|
|
|
- buffer_size-=rem_len;
|
|
|
- //Re-adjust the rbuffer value to be 4 byte aligned
|
|
|
- rbuffer_size=(buffer_size+3)&~3;
|
|
|
- //It is possible that we are here because we checked for 4byte aligned
|
|
|
- //size, but actual data was smaller.
|
|
|
- //Eg. For buffer_size = 34, rbuffer_size will be 36. Suppose we had only
|
|
|
- //42 bytes of memory available, the top level check will fail, as it will
|
|
|
- //check for availability of 36 + 8 = 44 bytes.
|
|
|
- //However, the 42 bytes available memory is sufficient for 34 + 8 bytes data
|
|
|
- //and so, we can return after writing the data. Hence, this check
|
|
|
- if (buffer_size == 0) {
|
|
|
- rb->write_ptr=rb->data;
|
|
|
- return pdTRUE;
|
|
|
- } else {
|
|
|
- /* Indicate the wrapping */
|
|
|
- hdr->flags|=iflag_wrap;
|
|
|
- }
|
|
|
- } else {
|
|
|
- //Huh, only the header fit. Mark as dummy so the receive function doesn't receive
|
|
|
- //an useless zero-byte packet.
|
|
|
- hdr->flags|=iflag_dummydata;
|
|
|
- }
|
|
|
- rb->write_ptr=rb->data;
|
|
|
+static BaseType_t prvCheckItemFitsDefault( Ringbuffer_t *pxRingbuffer, size_t xItemSize)
|
|
|
+{
|
|
|
+ //Check arguments and buffer state
|
|
|
+ configASSERT(rbCHECK_ALIGNED(pxRingbuffer->pucWrite)); //pucWrite is always aligned in no-split ring buffers
|
|
|
+ configASSERT(pxRingbuffer->pucWrite >= pxRingbuffer->pucHead && pxRingbuffer->pucWrite < pxRingbuffer->pucTail); //Check write pointer is within bounds
|
|
|
+
|
|
|
+ size_t xTotalItemSize = rbALIGN_SIZE(xItemSize) + rbHEADER_SIZE; //Rounded up aligned item size with header
|
|
|
+ if (pxRingbuffer->pucWrite == pxRingbuffer->pucFree) {
|
|
|
+ //Buffer is either complete empty or completely full
|
|
|
+ return (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) ? pdFALSE : pdTRUE;
|
|
|
+ }
|
|
|
+ if (pxRingbuffer->pucFree > pxRingbuffer->pucWrite) {
|
|
|
+ //Free space does not wrap around
|
|
|
+ return (xTotalItemSize <= pxRingbuffer->pucFree - pxRingbuffer->pucWrite) ? pdTRUE : pdFALSE;
|
|
|
+ }
|
|
|
+ //Free space wraps around
|
|
|
+ if (xTotalItemSize <= pxRingbuffer->pucTail - pxRingbuffer->pucWrite) {
|
|
|
+ return pdTRUE; //Item fits without wrapping around
|
|
|
+ }
|
|
|
+ //Check if item fits by wrapping
|
|
|
+ if (pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG) {
|
|
|
+ //Allow split wrapping incurs an extra header
|
|
|
+ return (xTotalItemSize + rbHEADER_SIZE <= pxRingbuffer->xSize - (pxRingbuffer->pucWrite - pxRingbuffer->pucFree)) ? pdTRUE : pdFALSE;
|
|
|
} else {
|
|
|
- //No special handling needed. Checking if it's gonna fit probably still is a good idea.
|
|
|
- if (ringbufferFreeMem(rb) < sizeof(buf_entry_hdr_t)+rbuffer_size) {
|
|
|
- //Buffer is not going to fit, period.
|
|
|
- return pdFALSE;
|
|
|
- }
|
|
|
+ return (xTotalItemSize <= pxRingbuffer->pucFree - pxRingbuffer->pucHead) ? pdTRUE : pdFALSE;
|
|
|
}
|
|
|
+}
|
|
|
|
|
|
- //If we are here, the buffer is guaranteed to fit in the space starting at the write pointer.
|
|
|
- buf_entry_hdr_t *hdr=(buf_entry_hdr_t *)rb->write_ptr;
|
|
|
- hdr->len=buffer_size;
|
|
|
- hdr->flags=0;
|
|
|
- rb->write_ptr+=sizeof(buf_entry_hdr_t);
|
|
|
- memcpy(rb->write_ptr, buffer, buffer_size);
|
|
|
- rb->write_ptr+=rbuffer_size;
|
|
|
+static BaseType_t prvCheckItemFitsByteBuffer( Ringbuffer_t *pxRingbuffer, size_t xItemSize)
|
|
|
+{
|
|
|
+ //Check arguments and buffer state
|
|
|
+ configASSERT(pxRingbuffer->pucWrite >= pxRingbuffer->pucHead && pxRingbuffer->pucWrite < pxRingbuffer->pucTail); //Check write pointer is within bounds
|
|
|
|
|
|
- //The buffer will wrap around if we don't have room for a header anymore.
|
|
|
- if ((rb->data+rb->size)-rb->write_ptr < sizeof(buf_entry_hdr_t)) {
|
|
|
- //'Forward' the write buffer until we are at the start of the ringbuffer.
|
|
|
- //The read pointer will always be at the start of a full header, which cannot
|
|
|
- //exist at the point of the current write pointer, so there's no chance of overtaking
|
|
|
- //that.
|
|
|
- rb->write_ptr=rb->data;
|
|
|
+ if (pxRingbuffer->pucWrite == pxRingbuffer->pucFree) {
|
|
|
+ //Buffer is either complete empty or completely full
|
|
|
+ return (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) ? pdFALSE : pdTRUE;
|
|
|
}
|
|
|
- return pdTRUE;
|
|
|
+ if (pxRingbuffer->pucFree > pxRingbuffer->pucWrite) {
|
|
|
+ //Free space does not wrap around
|
|
|
+ return (xItemSize <= pxRingbuffer->pucFree - pxRingbuffer->pucWrite) ? pdTRUE : pdFALSE;
|
|
|
+ }
|
|
|
+ //Free space wraps around
|
|
|
+ return (xItemSize <= pxRingbuffer->xSize - (pxRingbuffer->pucWrite - pxRingbuffer->pucFree)) ? pdTRUE : pdFALSE;
|
|
|
}
|
|
|
|
|
|
+static void prvCopyItemNoSplit(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize)
|
|
|
+{
|
|
|
+ //Check arguments and buffer state
|
|
|
+ size_t xAlignedItemSize = rbALIGN_SIZE(xItemSize); //Rounded up aligned item size
|
|
|
+ size_t xRemLen = pxRingbuffer->pucTail - pxRingbuffer->pucWrite; //Length from pucWrite until end of buffer
|
|
|
+ configASSERT(rbCHECK_ALIGNED(pxRingbuffer->pucWrite)); //pucWrite is always aligned in no-split ring buffers
|
|
|
+ configASSERT(pxRingbuffer->pucWrite >= pxRingbuffer->pucHead && pxRingbuffer->pucWrite < pxRingbuffer->pucTail); //Check write pointer is within bounds
|
|
|
+ configASSERT(xRemLen >= rbHEADER_SIZE); //Remaining length must be able to at least fit an item header
|
|
|
+
|
|
|
+ //If remaining length can't fit item, set as dummy data and wrap around
|
|
|
+ if (xRemLen < xAlignedItemSize + rbHEADER_SIZE) {
|
|
|
+ ItemHeader_t *pxDummy = (ItemHeader_t *)pxRingbuffer->pucWrite;
|
|
|
+ pxDummy->uxItemFlags = rbITEM_DUMMY_DATA_FLAG; //Set remaining length as dummy data
|
|
|
+ pxDummy->xItemLen = 0; //Dummy data should have no length
|
|
|
+ pxRingbuffer->pucWrite = pxRingbuffer->pucHead; //Reset write pointer to wrap around
|
|
|
+ }
|
|
|
+
|
|
|
+ //Item should be guaranteed to fit at this point. Set item header and copy data
|
|
|
+ ItemHeader_t *pxHeader = (ItemHeader_t *)pxRingbuffer->pucWrite;
|
|
|
+ pxHeader->xItemLen = xItemSize;
|
|
|
+ pxHeader->uxItemFlags = 0;
|
|
|
+ pxRingbuffer->pucWrite += rbHEADER_SIZE; //Advance pucWrite past header
|
|
|
+ memcpy(pxRingbuffer->pucWrite, pucItem, xItemSize);
|
|
|
+ pxRingbuffer->xItemsWaiting++;
|
|
|
+ pxRingbuffer->pucWrite += xAlignedItemSize; //Advance pucWrite past item to next aligned address
|
|
|
+
|
|
|
+ //If current remaining length can't fit a header, wrap around write pointer
|
|
|
+ if (pxRingbuffer->pucTail - pxRingbuffer->pucWrite < rbHEADER_SIZE) {
|
|
|
+ pxRingbuffer->pucWrite = pxRingbuffer->pucHead; //Wrap around pucWrite
|
|
|
+ }
|
|
|
+ //Check if buffer is full
|
|
|
+ if (pxRingbuffer->pucWrite == pxRingbuffer->pucFree) {
|
|
|
+ //Mark the buffer as full to distinguish with an empty buffer
|
|
|
+ pxRingbuffer->uxRingbufferFlags |= rbBUFFER_FULL_FLAG;
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
-//Copies a bunch of daya to the ring bytebuffer. Assumes there is space in the ringbuffer and
|
|
|
-//the ringbuffer is locked. Increases write_ptr to the next item. Returns pdTRUE on
|
|
|
-//success, pdFALSE if it can't make the item fit and the calling routine needs to retry
|
|
|
-//later or fail.
|
|
|
-//This function by itself is not threadsafe, always call from within a muxed section.
|
|
|
-static BaseType_t copyItemToRingbufByteBuf(ringbuf_t *rb, uint8_t *buffer, size_t buffer_size)
|
|
|
+static void prvCopyItemAllowSplit(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize)
|
|
|
{
|
|
|
- size_t rem_len=(rb->data + rb->size) - rb->write_ptr; //length remaining until end of ringbuffer
|
|
|
+ //Check arguments and buffer state
|
|
|
+ size_t xAlignedItemSize = rbALIGN_SIZE(xItemSize); //Rounded up aligned item size
|
|
|
+ size_t xRemLen = pxRingbuffer->pucTail - pxRingbuffer->pucWrite; //Length from pucWrite until end of buffer
|
|
|
+ configASSERT(rbCHECK_ALIGNED(pxRingbuffer->pucWrite)); //pucWrite is always aligned in split ring buffers
|
|
|
+ configASSERT(pxRingbuffer->pucWrite >= pxRingbuffer->pucHead && pxRingbuffer->pucWrite < pxRingbuffer->pucTail); //Check write pointer is within bounds
|
|
|
+ configASSERT(xRemLen >= rbHEADER_SIZE); //Remaining length must be able to at least fit an item header
|
|
|
+
|
|
|
+ //Split item if necessary
|
|
|
+ if (xRemLen < xAlignedItemSize + rbHEADER_SIZE) {
|
|
|
+ //Write first part of the item
|
|
|
+ ItemHeader_t *pxFirstHeader = (ItemHeader_t *)pxRingbuffer->pucWrite;
|
|
|
+ pxFirstHeader->uxItemFlags = 0;
|
|
|
+ pxFirstHeader->xItemLen = xRemLen - rbHEADER_SIZE; //Fill remaining length with first part
|
|
|
+ pxRingbuffer->pucWrite += rbHEADER_SIZE; //Advance pucWrite past header
|
|
|
+ xRemLen -= rbHEADER_SIZE;
|
|
|
+ if (xRemLen > 0) {
|
|
|
+ memcpy(pxRingbuffer->pucWrite, pucItem, xRemLen);
|
|
|
+ pxRingbuffer->xItemsWaiting++;
|
|
|
+ //Update item arguments to account for data already copied
|
|
|
+ pucItem += xRemLen;
|
|
|
+ xItemSize -= xRemLen;
|
|
|
+ xAlignedItemSize -= xRemLen;
|
|
|
+ pxFirstHeader->uxItemFlags |= rbITEM_SPLIT_FLAG; //There must be more data
|
|
|
+ } else {
|
|
|
+ //Remaining length was only large enough to fit header
|
|
|
+ pxFirstHeader->uxItemFlags |= rbITEM_DUMMY_DATA_FLAG; //Item will completely be stored in 2nd part
|
|
|
+ }
|
|
|
+ pxRingbuffer->pucWrite = pxRingbuffer->pucHead; //Reset write pointer to start of buffer
|
|
|
+ }
|
|
|
+
|
|
|
+ //Item (whole or second part) should be guaranteed to fit at this point
|
|
|
+ ItemHeader_t *pxSecondHeader = (ItemHeader_t *)pxRingbuffer->pucWrite;
|
|
|
+ pxSecondHeader->xItemLen = xItemSize;
|
|
|
+ pxSecondHeader->uxItemFlags = 0;
|
|
|
+ pxRingbuffer->pucWrite += rbHEADER_SIZE; //Advance write pointer past header
|
|
|
+ memcpy(pxRingbuffer->pucWrite, pucItem, xItemSize);
|
|
|
+ pxRingbuffer->xItemsWaiting++;
|
|
|
+ pxRingbuffer->pucWrite += xAlignedItemSize; //Advance pucWrite past item to next aligned address
|
|
|
+
|
|
|
+ //If current remaining length can't fit a header, wrap around write pointer
|
|
|
+ if (pxRingbuffer->pucTail - pxRingbuffer->pucWrite < rbHEADER_SIZE) {
|
|
|
+ pxRingbuffer->pucWrite = pxRingbuffer->pucHead; //Wrap around pucWrite
|
|
|
+ }
|
|
|
+ //Check if buffer is full
|
|
|
+ if (pxRingbuffer->pucWrite == pxRingbuffer->pucFree) {
|
|
|
+ //Mark the buffer as full to distinguish with an empty buffer
|
|
|
+ pxRingbuffer->uxRingbufferFlags |= rbBUFFER_FULL_FLAG;
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
- //See if we have enough contiguous space to write the buffer.
|
|
|
- if (rem_len < buffer_size) {
|
|
|
- //...Nope. Write the data bit that fits.
|
|
|
- memcpy(rb->write_ptr, buffer, rem_len);
|
|
|
- //Update vars so the code later on will write the rest of the data.
|
|
|
- buffer+=rem_len;
|
|
|
- buffer_size-=rem_len;
|
|
|
- rb->write_ptr=rb->data;
|
|
|
+static void prvCopyItemByteBuf(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize)
|
|
|
+{
|
|
|
+ //Check arguments and buffer state
|
|
|
+ configASSERT(pxRingbuffer->pucWrite >= pxRingbuffer->pucHead && pxRingbuffer->pucWrite < pxRingbuffer->pucTail); //Check write pointer is within bounds
|
|
|
+
|
|
|
+ size_t xRemLen = pxRingbuffer->pucTail - pxRingbuffer->pucWrite; //Length from pucWrite until end of buffer
|
|
|
+ if (xRemLen < xItemSize) {
|
|
|
+ //Copy as much as possible into remaining length
|
|
|
+ memcpy(pxRingbuffer->pucWrite, pucItem, xRemLen);
|
|
|
+ pxRingbuffer->xItemsWaiting += xRemLen;
|
|
|
+ //Update item arguments to account for data already written
|
|
|
+ pucItem += xRemLen;
|
|
|
+ xItemSize -= xRemLen;
|
|
|
+ pxRingbuffer->pucWrite = pxRingbuffer->pucHead; //Reset write pointer to start of buffer
|
|
|
+ }
|
|
|
+ //Copy all or remaining portion of the item
|
|
|
+ memcpy(pxRingbuffer->pucWrite, pucItem, xItemSize);
|
|
|
+ pxRingbuffer->xItemsWaiting += xItemSize;
|
|
|
+ pxRingbuffer->pucWrite += xItemSize;
|
|
|
+
|
|
|
+ //Wrap around pucWrite if it reaches the end
|
|
|
+ if (pxRingbuffer->pucWrite == pxRingbuffer->pucTail) {
|
|
|
+ pxRingbuffer->pucWrite = pxRingbuffer->pucHead;
|
|
|
+ }
|
|
|
+ //Check if buffer is full
|
|
|
+ if (pxRingbuffer->pucWrite == pxRingbuffer->pucFree) {
|
|
|
+ pxRingbuffer->uxRingbufferFlags |= rbBUFFER_FULL_FLAG; //Mark the buffer as full to avoid confusion with an empty buffer
|
|
|
}
|
|
|
+}
|
|
|
|
|
|
- //If we are here, the buffer is guaranteed to fit in the space starting at the write pointer.
|
|
|
- memcpy(rb->write_ptr, buffer, buffer_size);
|
|
|
- rb->write_ptr+=buffer_size;
|
|
|
- //The buffer will wrap around if we're at the end.
|
|
|
- if ((rb->data+rb->size)==rb->write_ptr) {
|
|
|
- rb->write_ptr=rb->data;
|
|
|
+static BaseType_t prvCheckItemAvail(Ringbuffer_t *pxRingbuffer)
|
|
|
+{
|
|
|
+ if ((pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) && pxRingbuffer->pucRead != pxRingbuffer->pucFree) {
|
|
|
+ return pdFALSE; //Byte buffers do not allow multiple retrievals before return
|
|
|
+ }
|
|
|
+ if ((pxRingbuffer->xItemsWaiting > 0) && ((pxRingbuffer->pucRead != pxRingbuffer->pucWrite) || (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG))) {
|
|
|
+ return pdTRUE; //Items/data available for retrieval
|
|
|
+ } else {
|
|
|
+ return pdFALSE; //No items/data available for retrieval
|
|
|
}
|
|
|
- return pdTRUE;
|
|
|
}
|
|
|
|
|
|
-//Retrieves a pointer to the data of the next item, or NULL if this is not possible.
|
|
|
-//This function by itself is not threadsafe, always call from within a muxed section.
|
|
|
-//Because we always return one item, this function ignores the wanted_length variable.
|
|
|
-static uint8_t *getItemFromRingbufDefault(ringbuf_t *rb, size_t *length, int wanted_length)
|
|
|
+static void *prvGetItemDefault(Ringbuffer_t *pxRingbuffer, BaseType_t *pxIsSplit, size_t xUnusedParam, size_t *pxItemSize)
|
|
|
{
|
|
|
- uint8_t *ret;
|
|
|
- configASSERT(((int)rb->read_ptr&3)==0);
|
|
|
- if (rb->read_ptr == rb->write_ptr) {
|
|
|
- //No data available.
|
|
|
- return NULL;
|
|
|
+ //Check arguments and buffer state
|
|
|
+ ItemHeader_t *pxHeader = (ItemHeader_t *)pxRingbuffer->pucRead;
|
|
|
+ configASSERT(pxIsSplit != NULL);
|
|
|
+ configASSERT((pxRingbuffer->xItemsWaiting > 0) && ((pxRingbuffer->pucRead != pxRingbuffer->pucWrite) || (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG))); //Check there are items to be read
|
|
|
+ configASSERT(rbCHECK_ALIGNED(pxRingbuffer->pucRead)); //pucRead is always aligned in split ring buffers
|
|
|
+ configASSERT(pxRingbuffer->pucRead >= pxRingbuffer->pucHead && pxRingbuffer->pucRead < pxRingbuffer->pucTail); //Check read pointer is within bounds
|
|
|
+ configASSERT((pxHeader->xItemLen <= pxRingbuffer->xMaxItemSize) || (pxHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG));
|
|
|
+
|
|
|
+ uint8_t *pcReturn;
|
|
|
+ //Wrap around if dummy data (dummy data indicates wrap around in no-split buffers)
|
|
|
+ if (pxHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) {
|
|
|
+ pxRingbuffer->pucRead = pxRingbuffer->pucHead;
|
|
|
+ //Check for errors with the next item
|
|
|
+ pxHeader = (ItemHeader_t *)pxRingbuffer->pucRead;
|
|
|
+ configASSERT(pxHeader->xItemLen <= pxRingbuffer->xMaxItemSize);
|
|
|
}
|
|
|
- //The item written at the point of the read pointer may be a dummy item.
|
|
|
- //We need to skip past it first, if that's the case.
|
|
|
- buf_entry_hdr_t *hdr=(buf_entry_hdr_t *)rb->read_ptr;
|
|
|
- configASSERT((hdr->len < rb->size) || (hdr->flags & iflag_dummydata));
|
|
|
- if (hdr->flags & iflag_dummydata) {
|
|
|
- //Hdr is dummy data. Reset to start of ringbuffer.
|
|
|
- rb->read_ptr=rb->data;
|
|
|
- //Get real header
|
|
|
- hdr=(buf_entry_hdr_t *)rb->read_ptr;
|
|
|
- configASSERT(hdr->len < rb->size);
|
|
|
- //No need to re-check if the ringbuffer is empty: the write routine will
|
|
|
- //always write a dummy item plus the real data item in one go, so now we must
|
|
|
- //be at the real data item by definition.
|
|
|
- }
|
|
|
- //Okay, pass the data back.
|
|
|
- ret=rb->read_ptr+sizeof(buf_entry_hdr_t);
|
|
|
- *length=hdr->len;
|
|
|
- //...and move the read pointer past the data.
|
|
|
- rb->read_ptr+=sizeof(buf_entry_hdr_t)+((hdr->len+3)&~3);
|
|
|
- //The buffer will wrap around if we don't have room for a header anymore.
|
|
|
- //Integer typecasting is used because the first operand can result into a -ve
|
|
|
- //value for cases wherein the ringbuffer size is not a multiple of 4, but the
|
|
|
- //implementation logic aligns read_ptr to 4-byte boundary
|
|
|
- if ((int)((rb->data + rb->size) - rb->read_ptr) < (int)sizeof(buf_entry_hdr_t)) {
|
|
|
- rb->read_ptr=rb->data;
|
|
|
- }
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-//Retrieves a pointer to the data in the buffer, or NULL if this is not possible.
|
|
|
-//This function by itself is not threadsafe, always call from within a muxed section.
|
|
|
-//This function honours the wanted_length and will never return more data than this.
|
|
|
-static uint8_t *getItemFromRingbufByteBuf(ringbuf_t *rb, size_t *length, int wanted_length)
|
|
|
-{
|
|
|
- uint8_t *ret;
|
|
|
- if (rb->read_ptr != rb->free_ptr) {
|
|
|
- //This type of ringbuff does not support multiple outstanding buffers.
|
|
|
- return NULL;
|
|
|
+ pcReturn = pxRingbuffer->pucRead + rbHEADER_SIZE; //Get pointer to part of item containing data (point past the header)
|
|
|
+ if (pxHeader->xItemLen == 0) {
|
|
|
+ //Inclusive of pucTail for special case where item of zero length just fits at the end of the buffer
|
|
|
+ configASSERT(pcReturn >= pxRingbuffer->pucHead && pcReturn <= pxRingbuffer->pucTail);
|
|
|
+ } else {
|
|
|
+ //Exclusive of pucTali if length is larger than zero, pcReturn should never point to pucTail
|
|
|
+ configASSERT(pcReturn >= pxRingbuffer->pucHead && pcReturn < pxRingbuffer->pucTail);
|
|
|
}
|
|
|
- if (rb->read_ptr == rb->write_ptr) {
|
|
|
- //No data available.
|
|
|
- return NULL;
|
|
|
+ *pxItemSize = pxHeader->xItemLen; //Get length of item
|
|
|
+ pxRingbuffer->xItemsWaiting --; //Update item count
|
|
|
+ *pxIsSplit = (pxHeader->uxItemFlags & rbITEM_SPLIT_FLAG) ? pdTRUE : pdFALSE;
|
|
|
+
|
|
|
+ pxRingbuffer->pucRead += rbHEADER_SIZE + rbALIGN_SIZE(pxHeader->xItemLen); //Update pucRead
|
|
|
+ //Check if pucRead requires wrap around
|
|
|
+ if ((pxRingbuffer->pucTail - pxRingbuffer->pucRead) < rbHEADER_SIZE) {
|
|
|
+ pxRingbuffer->pucRead = pxRingbuffer->pucHead;
|
|
|
}
|
|
|
- ret=rb->read_ptr;
|
|
|
- if (rb->read_ptr > rb->write_ptr) {
|
|
|
- //Available data wraps around. Give data until the end of the buffer.
|
|
|
- *length=rb->size-(rb->read_ptr - rb->data);
|
|
|
- if (wanted_length != 0 && *length > wanted_length) {
|
|
|
- *length=wanted_length;
|
|
|
- rb->read_ptr+=wanted_length;
|
|
|
+ return (void *)pcReturn;
|
|
|
+}
|
|
|
+
|
|
|
+static void *prvGetItemByteBuf(Ringbuffer_t *pxRingbuffer, BaseType_t *pxUnusedParam ,size_t xMaxSize, size_t *pxItemSize)
|
|
|
+{
|
|
|
+ //Check arguments and buffer state
|
|
|
+ configASSERT((pxRingbuffer->xItemsWaiting > 0) && ((pxRingbuffer->pucRead != pxRingbuffer->pucWrite) || (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG))); //Check there are items to be read
|
|
|
+ configASSERT(pxRingbuffer->pucRead >= pxRingbuffer->pucHead && pxRingbuffer->pucRead < pxRingbuffer->pucTail); //Check read pointer is within bounds
|
|
|
+ configASSERT(pxRingbuffer->pucRead == pxRingbuffer->pucFree);
|
|
|
+
|
|
|
+ uint8_t *ret = pxRingbuffer->pucRead;
|
|
|
+ if ((pxRingbuffer->pucRead > pxRingbuffer->pucWrite) || (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG)) { //Available data wraps around
|
|
|
+ //Return contiguous piece from read pointer until buffer tail, or xMaxSize
|
|
|
+ if (xMaxSize == 0 || pxRingbuffer->pucTail - pxRingbuffer->pucRead <= xMaxSize) {
|
|
|
+ //All contiguous data from read pointer to tail
|
|
|
+ *pxItemSize = pxRingbuffer->pucTail - pxRingbuffer->pucRead;
|
|
|
+ pxRingbuffer->xItemsWaiting -= pxRingbuffer->pucTail - pxRingbuffer->pucRead;
|
|
|
+ pxRingbuffer->pucRead = pxRingbuffer->pucHead; //Wrap around read pointer
|
|
|
} else {
|
|
|
- rb->read_ptr=rb->data;
|
|
|
+ //Return xMaxSize amount of data
|
|
|
+ *pxItemSize = xMaxSize;
|
|
|
+ pxRingbuffer->xItemsWaiting -= xMaxSize;
|
|
|
+ pxRingbuffer->pucRead += xMaxSize; //Advance read pointer past retrieved data
|
|
|
}
|
|
|
- } else {
|
|
|
- //Return data up to write pointer.
|
|
|
- *length=rb->write_ptr -rb->read_ptr;
|
|
|
- if (wanted_length != 0 && *length > wanted_length) {
|
|
|
- *length=wanted_length;
|
|
|
- rb->read_ptr+=wanted_length;
|
|
|
+ } else { //Available data is contiguous between read and write pointer
|
|
|
+ if (xMaxSize == 0 || pxRingbuffer->pucWrite - pxRingbuffer->pucRead <= xMaxSize) {
|
|
|
+ //Return all contiguous data from read to write pointer
|
|
|
+ *pxItemSize = pxRingbuffer->pucWrite - pxRingbuffer->pucRead;
|
|
|
+ pxRingbuffer->xItemsWaiting -= pxRingbuffer->pucWrite - pxRingbuffer->pucRead;
|
|
|
+ pxRingbuffer->pucRead = pxRingbuffer->pucWrite;
|
|
|
} else {
|
|
|
- rb->read_ptr=rb->write_ptr;
|
|
|
+ //Return xMaxSize data from read pointer
|
|
|
+ *pxItemSize = xMaxSize;
|
|
|
+ pxRingbuffer->xItemsWaiting -= xMaxSize;
|
|
|
+ pxRingbuffer->pucRead += xMaxSize; //Advance read pointer past retrieved data
|
|
|
+
|
|
|
}
|
|
|
}
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-//Returns an item to the ringbuffer. Will mark the item as free, and will see if the free pointer
|
|
|
-//can be increase.
|
|
|
-//This function by itself is not threadsafe, always call from within a muxed section.
|
|
|
-static void returnItemToRingbufDefault(ringbuf_t *rb, void *item) {
|
|
|
- uint8_t *data=(uint8_t*)item;
|
|
|
- configASSERT(((int)rb->free_ptr&3)==0);
|
|
|
- configASSERT(data >= rb->data);
|
|
|
- configASSERT(data <= rb->data+rb->size);
|
|
|
- //Grab the buffer entry that preceeds the buffer
|
|
|
- buf_entry_hdr_t *hdr=(buf_entry_hdr_t*)(data-sizeof(buf_entry_hdr_t));
|
|
|
- configASSERT(hdr->len < rb->size);
|
|
|
- configASSERT((hdr->flags & iflag_dummydata)==0);
|
|
|
- configASSERT((hdr->flags & iflag_free)==0);
|
|
|
- //Mark the buffer as free.
|
|
|
- hdr->flags&=~iflag_wrap;
|
|
|
- hdr->flags|=iflag_free;
|
|
|
-
|
|
|
- //Do a cleanup pass.
|
|
|
- hdr=(buf_entry_hdr_t *)rb->free_ptr;
|
|
|
- //basically forward free_ptr until we run into either a block that is still in use or the write pointer.
|
|
|
- while (((hdr->flags & iflag_free) || (hdr->flags & iflag_dummydata)) && rb->free_ptr != rb->write_ptr) {
|
|
|
- if (hdr->flags & iflag_dummydata) {
|
|
|
- //Rest is dummy data. Reset to start of ringbuffer.
|
|
|
- rb->free_ptr=rb->data;
|
|
|
+ return (void *)ret;
|
|
|
+}
|
|
|
+
|
|
|
+static void prvReturnItemDefault(Ringbuffer_t *pxRingbuffer, uint8_t *pucItem)
|
|
|
+{
|
|
|
+ //Check arguments and buffer state
|
|
|
+ configASSERT(rbCHECK_ALIGNED(pucItem));
|
|
|
+ configASSERT(pucItem >= pxRingbuffer->pucHead);
|
|
|
+ configASSERT(pucItem <= pxRingbuffer->pucTail); //Inclusive of pucTail in the case of zero length item at the very end
|
|
|
+
|
|
|
+ //Get and check header of the item
|
|
|
+ ItemHeader_t *pxCurHeader = (ItemHeader_t *)(pucItem - rbHEADER_SIZE);
|
|
|
+ configASSERT(pxCurHeader->xItemLen <= pxRingbuffer->xMaxItemSize);
|
|
|
+ configASSERT((pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) == 0); //Dummy items should never have been read
|
|
|
+ configASSERT((pxCurHeader->uxItemFlags & rbITEM_FREE_FLAG) == 0); //Indicates item has already been returned before
|
|
|
+ pxCurHeader->uxItemFlags &= ~rbITEM_SPLIT_FLAG; //Clear wrap flag if set (not strictly necessary)
|
|
|
+ pxCurHeader->uxItemFlags |= rbITEM_FREE_FLAG; //Mark as free
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Items might not be returned in the order they were retrieved. Move the free pointer
|
|
|
+ * up to the next item that has not been marked as free (by free flag) or up
|
|
|
+ * till the read pointer. When advancing the free pointer, items that have already been
|
|
|
+ * freed or items with dummy data should be skipped over
|
|
|
+ */
|
|
|
+ pxCurHeader = (ItemHeader_t *)pxRingbuffer->pucFree;
|
|
|
+ //Skip over Items that have already been freed or are dummy items
|
|
|
+ while (((pxCurHeader->uxItemFlags & rbITEM_FREE_FLAG) || (pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG)) && pxRingbuffer->pucFree != pxRingbuffer->pucRead) {
|
|
|
+ if (pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) {
|
|
|
+ pxCurHeader->uxItemFlags |= rbITEM_FREE_FLAG; //Mark as freed (not strictly necessary but adds redundancy)
|
|
|
+ pxRingbuffer->pucFree = pxRingbuffer->pucHead; //Wrap around due to dummy data
|
|
|
} else {
|
|
|
- //Skip past item
|
|
|
- rb->free_ptr+=sizeof(buf_entry_hdr_t);
|
|
|
- //Check if the free_ptr overshoots the buffer.
|
|
|
- //Checking this before aligning free_ptr since it is possible that alignment
|
|
|
- //will cause pointer to overshoot, if the ringbuf size is not a multiple of 4
|
|
|
- configASSERT(rb->free_ptr+hdr->len<=rb->data+rb->size);
|
|
|
- //Align free_ptr to 4 byte boundary. Overshoot condition will result in wrap around below
|
|
|
- size_t len=(hdr->len+3)&~3;
|
|
|
- rb->free_ptr+=len;
|
|
|
+ //Item with data that has already been freed, advance free pointer past this item
|
|
|
+ size_t xAlignedItemSize = rbALIGN_SIZE(pxCurHeader->xItemLen);
|
|
|
+ pxRingbuffer->pucFree += xAlignedItemSize + rbHEADER_SIZE;
|
|
|
+ //Redundancy check to ensure free pointer has not overshot buffer bounds
|
|
|
+ configASSERT(pxRingbuffer->pucFree <= pxRingbuffer->pucHead + pxRingbuffer->xSize);
|
|
|
}
|
|
|
- //The buffer will wrap around if we don't have room for a header anymore.
|
|
|
- //Integer typecasting is used because the first operand can result into a -ve
|
|
|
- //value for cases wherein the ringbuffer size is not a multiple of 4, but the
|
|
|
- //implementation logic aligns free_ptr to 4-byte boundary
|
|
|
- if ((int)((rb->data+rb->size)-rb->free_ptr) < (int)sizeof(buf_entry_hdr_t)) {
|
|
|
- rb->free_ptr=rb->data;
|
|
|
+ //Check if pucRead requires wrap around
|
|
|
+ if ((pxRingbuffer->pucTail - pxRingbuffer->pucFree) < rbHEADER_SIZE) {
|
|
|
+ pxRingbuffer->pucFree = pxRingbuffer->pucHead;
|
|
|
}
|
|
|
- //The free_ptr can not exceed read_ptr, otherwise write_ptr might overwrite read_ptr.
|
|
|
- //Read_ptr can not set to rb->data with free_ptr, otherwise write_ptr might wrap around to rb->data.
|
|
|
- if(rb->free_ptr == rb->read_ptr) break;
|
|
|
- //Next header
|
|
|
- hdr=(buf_entry_hdr_t *)rb->free_ptr;
|
|
|
+ pxCurHeader = (ItemHeader_t *)pxRingbuffer->pucFree; //Update header to point to item
|
|
|
}
|
|
|
-}
|
|
|
|
|
|
-
|
|
|
-//Returns an item to the ringbuffer. Will mark the item as free, and will see if the free pointer
|
|
|
-//can be increase.
|
|
|
-//This function by itself is not threadsafe, always call from within a muxed section.
|
|
|
-static void returnItemToRingbufBytebuf(ringbuf_t *rb, void *item) {
|
|
|
- configASSERT((uint8_t *)item >= rb->data);
|
|
|
- configASSERT((uint8_t *)item < rb->data+rb->size);
|
|
|
- //Free the read memory.
|
|
|
- rb->free_ptr=rb->read_ptr;
|
|
|
+ //Check if the buffer full flag should be reset
|
|
|
+ if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
|
|
|
+ if (pxRingbuffer->pucFree != pxRingbuffer->pucWrite) {
|
|
|
+ pxRingbuffer->uxRingbufferFlags &= ~rbBUFFER_FULL_FLAG;
|
|
|
+ } else if (pxRingbuffer->pucFree == pxRingbuffer->pucWrite && pxRingbuffer->pucFree == pxRingbuffer->pucRead) {
|
|
|
+ //Special case where a full buffer is completely freed in one go
|
|
|
+ pxRingbuffer->uxRingbufferFlags &= ~rbBUFFER_FULL_FLAG;
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
-/*
|
|
|
- Check if the selected queue set member is the ringbuffer's read semaphore
|
|
|
-*/
|
|
|
-BaseType_t xRingbufferCanRead(RingbufHandle_t ringbuf, QueueSetMemberHandle_t member)
|
|
|
+
|
|
|
+static void prvReturnItemByteBuf(Ringbuffer_t *pxRingbuffer, uint8_t *pucItem)
|
|
|
{
|
|
|
- ringbuf_t *rb=(ringbuf_t *)ringbuf;
|
|
|
- configASSERT(rb);
|
|
|
- return (rb->items_buffered_sem == member)? pdTRUE : pdFALSE;
|
|
|
+ //Check pointer points to address inside buffer
|
|
|
+ configASSERT((uint8_t *)pucItem >= pxRingbuffer->pucHead);
|
|
|
+ configASSERT((uint8_t *)pucItem < pxRingbuffer->pucTail);
|
|
|
+ //Free the read memory. Simply moves free pointer to read pointer as byte buffers do not allow multiple outstanding reads
|
|
|
+ pxRingbuffer->pucFree = pxRingbuffer->pucRead;
|
|
|
+ //If buffer was full before, reset full flag as free pointer has moved
|
|
|
+ if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
|
|
|
+ pxRingbuffer->uxRingbufferFlags &= ~rbBUFFER_FULL_FLAG;
|
|
|
+ }
|
|
|
}
|
|
|
-/*
|
|
|
- Check if the selected queue set member is the ringbuffer's write semaphore
|
|
|
-*/
|
|
|
-BaseType_t xRingbufferCanWrite(RingbufHandle_t ringbuf, QueueSetMemberHandle_t member)
|
|
|
+
|
|
|
+static size_t prvGetCurMaxSizeNoSplit(Ringbuffer_t *pxRingbuffer)
|
|
|
{
|
|
|
- ringbuf_t *rb=(ringbuf_t *)ringbuf;
|
|
|
- configASSERT(rb);
|
|
|
- return (rb->free_space_sem == member)? pdTRUE : pdFALSE;
|
|
|
+ BaseType_t xFreeSize;
|
|
|
+ //Check if buffer is full
|
|
|
+ if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ if (pxRingbuffer->pucWrite < pxRingbuffer->pucFree) {
|
|
|
+ //Free space is contiguous between pucWrite and pucFree
|
|
|
+ xFreeSize = pxRingbuffer->pucFree - pxRingbuffer->pucWrite;
|
|
|
+ } else {
|
|
|
+ //Free space wraps around (or overlapped at pucHead), select largest
|
|
|
+ //contiguous free space as no-split items require contiguous space
|
|
|
+ size_t xSize1 = pxRingbuffer->pucTail - pxRingbuffer->pucWrite;
|
|
|
+ size_t xSize2 = pxRingbuffer->pucFree - pxRingbuffer->pucHead;
|
|
|
+ xFreeSize = (xSize1 > xSize2) ? xSize1 : xSize2;
|
|
|
+ }
|
|
|
+
|
|
|
+ //No-split ring buffer items need space for a header
|
|
|
+ xFreeSize -= rbHEADER_SIZE;
|
|
|
+ //Limit free size to be within bounds
|
|
|
+ if (xFreeSize > pxRingbuffer->xMaxItemSize) {
|
|
|
+ xFreeSize = pxRingbuffer->xMaxItemSize;
|
|
|
+ } else if (xFreeSize < 0) {
|
|
|
+ //Occurs when free space is less than header size
|
|
|
+ xFreeSize = 0;
|
|
|
+ }
|
|
|
+ return xFreeSize;
|
|
|
}
|
|
|
|
|
|
-void xRingbufferPrintInfo(RingbufHandle_t ringbuf)
|
|
|
+static size_t prvGetCurMaxSizeAllowSplit(Ringbuffer_t *pxRingbuffer)
|
|
|
{
|
|
|
- ringbuf_t *rb=(ringbuf_t *)ringbuf;
|
|
|
- configASSERT(rb);
|
|
|
- ets_printf("Rb size %d free %d rptr %d freeptr %d wptr %d\n",
|
|
|
- rb->size, ringbufferFreeMem(rb), rb->read_ptr-rb->data, rb->free_ptr-rb->data, rb->write_ptr-rb->data);
|
|
|
-}
|
|
|
+ BaseType_t xFreeSize;
|
|
|
+ //Check if buffer is full
|
|
|
+ if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ if (pxRingbuffer->pucWrite == pxRingbuffer->pucHead && pxRingbuffer->pucFree == pxRingbuffer->pucHead) {
|
|
|
+ //Check for special case where pucWrite and pucFree are both at pucHead
|
|
|
+ xFreeSize = pxRingbuffer->xSize - rbHEADER_SIZE;
|
|
|
+ } else if (pxRingbuffer->pucWrite < pxRingbuffer->pucFree) {
|
|
|
+ //Free space is contiguous between pucWrite and pucFree, requires single header
|
|
|
+ xFreeSize = (pxRingbuffer->pucFree - pxRingbuffer->pucWrite) - rbHEADER_SIZE;
|
|
|
+ } else {
|
|
|
+ //Free space wraps around, requires two headers
|
|
|
+ xFreeSize = (pxRingbuffer->pucFree - pxRingbuffer->pucHead) +
|
|
|
+ (pxRingbuffer->pucTail - pxRingbuffer->pucWrite) -
|
|
|
+ (rbHEADER_SIZE * 2);
|
|
|
+ }
|
|
|
|
|
|
+ //Limit free size to be within bounds
|
|
|
+ if (xFreeSize > pxRingbuffer->xMaxItemSize) {
|
|
|
+ xFreeSize = pxRingbuffer->xMaxItemSize;
|
|
|
+ } else if (xFreeSize < 0) {
|
|
|
+ xFreeSize = 0;
|
|
|
+ }
|
|
|
+ return xFreeSize;
|
|
|
+}
|
|
|
|
|
|
-size_t xRingbufferGetCurFreeSize(RingbufHandle_t ringbuf)
|
|
|
+static size_t prvGetCurMaxSizeByteBuf(Ringbuffer_t *pxRingbuffer)
|
|
|
{
|
|
|
- ringbuf_t *rb=(ringbuf_t *)ringbuf;
|
|
|
- configASSERT(rb);
|
|
|
- configASSERT(rb->getFreeSizeImpl);
|
|
|
- int free_size = rb->getFreeSizeImpl(rb);
|
|
|
- //Reserve one byte. If we do not do this and the entire buffer is filled, we get a situation
|
|
|
- //where read_ptr == free_ptr, messing up the next calculation.
|
|
|
- return free_size - 1;
|
|
|
+ BaseType_t xFreeSize;
|
|
|
+ //Check if buffer is full
|
|
|
+ if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Return whatever space is available depending on relative positions of the free
|
|
|
+ * pointer and write pointer. There is no overhead of headers in this mode
|
|
|
+ */
|
|
|
+ xFreeSize = pxRingbuffer->pucFree - pxRingbuffer->pucWrite;
|
|
|
+ if (xFreeSize <= 0) {
|
|
|
+ xFreeSize += pxRingbuffer->xSize;
|
|
|
+ }
|
|
|
+ return xFreeSize;
|
|
|
}
|
|
|
|
|
|
-static size_t getCurFreeSizeByteBuf(ringbuf_t *rb)
|
|
|
+static BaseType_t prvReceiveGeneric(Ringbuffer_t *pxRingbuffer, void **pvItem1, void **pvItem2, size_t *xItemSize1, size_t *xItemSize2, size_t xMaxSize, TickType_t xTicksToWait)
|
|
|
{
|
|
|
- //Return whatever space is available depending on relative positions of
|
|
|
- //the free pointer and write pointer. There is no overhead of headers in
|
|
|
- //this mode
|
|
|
- int free_size = rb->free_ptr-rb->write_ptr;
|
|
|
- if (free_size <= 0)
|
|
|
- free_size += rb->size;
|
|
|
- return free_size;
|
|
|
+ BaseType_t xReturn = pdFALSE;
|
|
|
+ BaseType_t xReturnSemaphore = pdFALSE;
|
|
|
+ TickType_t xTicksEnd = xTaskGetTickCount() + xTicksToWait;
|
|
|
+ TickType_t xTicksRemaining = xTicksToWait;
|
|
|
+ while (xTicksRemaining <= xTicksToWait) { //xTicksToWait will underflow once xTaskGetTickCount() > ticks_end
|
|
|
+ //Block until more free space becomes available or timeout
|
|
|
+ if (xSemaphoreTake(pxRingbuffer->xItemsBufferedSemaphore, xTicksRemaining) != pdTRUE) {
|
|
|
+ xReturn = pdFALSE; //Timed out attempting to get semaphore
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ //Semaphore obtained, check if item can be retrieved
|
|
|
+ taskENTER_CRITICAL(&pxRingbuffer->mux);
|
|
|
+ if (prvCheckItemAvail(pxRingbuffer) == pdTRUE) {
|
|
|
+ //Item is available for retrieval
|
|
|
+ BaseType_t xIsSplit;
|
|
|
+ if (pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) {
|
|
|
+ //Second argument (pxIsSplit) is unused for byte buffers
|
|
|
+ *pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, NULL, xMaxSize, xItemSize1);
|
|
|
+ } else {
|
|
|
+ //Third argument (xMaxSize) is unused for no-split/allow-split buffers
|
|
|
+ *pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize1);
|
|
|
+ }
|
|
|
+ //Check for item split if configured to do so
|
|
|
+ if ((pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG) && (pvItem2 != NULL) && (xItemSize2 != NULL)) {
|
|
|
+ if (xIsSplit == pdTRUE) {
|
|
|
+ *pvItem2 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize2);
|
|
|
+ configASSERT(*pvItem2 < *pvItem1); //Check wrap around has occurred
|
|
|
+ configASSERT(xIsSplit == pdFALSE); //Second part should not have wrapped flag
|
|
|
+ } else {
|
|
|
+ *pvItem2 = NULL;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ xReturn = pdTRUE;
|
|
|
+ if (pxRingbuffer->xItemsWaiting > 0) {
|
|
|
+ xReturnSemaphore = pdTRUE;
|
|
|
+ }
|
|
|
+ taskEXIT_CRITICAL(&pxRingbuffer->mux);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ //No item available for retrieval, adjust ticks and take the semaphore again
|
|
|
+ if (xTicksToWait != portMAX_DELAY) {
|
|
|
+ xTicksRemaining = xTicksEnd - xTaskGetTickCount();
|
|
|
+ }
|
|
|
+ taskEXIT_CRITICAL(&pxRingbuffer->mux);
|
|
|
+ /*
|
|
|
+ * Gap between critical section and re-acquiring of the semaphore. If
|
|
|
+ * semaphore is given now, priority inversion might occur (see docs)
|
|
|
+ */
|
|
|
+ }
|
|
|
+
|
|
|
+ if (xReturnSemaphore == pdTRUE) {
|
|
|
+ xSemaphoreGive(pxRingbuffer->xItemsBufferedSemaphore); //Give semaphore back so other tasks can retrieve
|
|
|
+ }
|
|
|
+ return xReturn;
|
|
|
}
|
|
|
|
|
|
-static size_t getCurFreeSizeAllowSplit(ringbuf_t *rb)
|
|
|
+static BaseType_t prvReceiveGenericFromISR(Ringbuffer_t *pxRingbuffer, void **pvItem1, void **pvItem2, size_t *xItemSize1, size_t *xItemSize2, size_t xMaxSize)
|
|
|
{
|
|
|
- int free_size;
|
|
|
- //If Both, the write and free pointer are at the start. Hence, the entire buffer
|
|
|
- //is available (minus the space for the header)
|
|
|
- if (rb->write_ptr == rb->free_ptr && rb->write_ptr == rb->data) {
|
|
|
- free_size = rb->size - sizeof(buf_entry_hdr_t);
|
|
|
- } else if (rb->write_ptr < rb->free_ptr) {
|
|
|
- //Else if the free pointer is beyond the write pointer, only the space between
|
|
|
- //them would be available (minus the space for the header)
|
|
|
- free_size = rb->free_ptr - rb->write_ptr - sizeof(buf_entry_hdr_t);
|
|
|
- } else {
|
|
|
- //Else the data can wrap around and 2 headers will be required
|
|
|
- free_size = rb->free_ptr - rb->write_ptr + rb->size - (2 * sizeof(buf_entry_hdr_t));
|
|
|
+ BaseType_t xReturn = pdFALSE;
|
|
|
+ BaseType_t xReturnSemaphore = pdFALSE;
|
|
|
+
|
|
|
+ taskENTER_CRITICAL_ISR(&pxRingbuffer->mux);
|
|
|
+ if(prvCheckItemAvail(pxRingbuffer) == pdTRUE) {
|
|
|
+ BaseType_t xIsSplit;
|
|
|
+ if (pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) {
|
|
|
+ //Second argument (pxIsSplit) is unused for byte buffers
|
|
|
+ *pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, NULL, xMaxSize, xItemSize1);
|
|
|
+ } else {
|
|
|
+ //Third argument (xMaxSize) is unused for no-split/allow-split buffers
|
|
|
+ *pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize1);
|
|
|
+ }
|
|
|
+ //Check for item split if configured to do so
|
|
|
+ if ((pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG) && pvItem2 != NULL && xItemSize2 != NULL) {
|
|
|
+ if (xIsSplit == pdTRUE) {
|
|
|
+ *pvItem2 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize2);
|
|
|
+ configASSERT(*pvItem2 < *pvItem1); //Check wrap around has occurred
|
|
|
+ configASSERT(xIsSplit == pdFALSE); //Second part should not have wrapped flag
|
|
|
+ } else {
|
|
|
+ *pvItem2 = NULL;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ xReturn = pdTRUE;
|
|
|
+ if (pxRingbuffer->xItemsWaiting > 0) {
|
|
|
+ xReturnSemaphore = pdTRUE;
|
|
|
+ }
|
|
|
}
|
|
|
- return free_size;
|
|
|
+ taskEXIT_CRITICAL_ISR(&pxRingbuffer->mux);
|
|
|
+
|
|
|
+ if (xReturnSemaphore == pdTRUE) {
|
|
|
+ xSemaphoreGiveFromISR(pxRingbuffer->xItemsBufferedSemaphore, NULL); //Give semaphore back so other tasks can retrieve
|
|
|
+ }
|
|
|
+ return xReturn;
|
|
|
}
|
|
|
|
|
|
-static size_t getCurFreeSizeNoSplit(ringbuf_t *rb)
|
|
|
+/* ------------------------------------------------- Public Definitions -------------------------------------------- */
|
|
|
+
|
|
|
+RingbufHandle_t xRingbufferCreate(size_t xBufferSize, ringbuf_type_t xBufferType)
|
|
|
{
|
|
|
- int free_size;
|
|
|
- //If the free pointer is beyond the write pointer, only the space between
|
|
|
- //them would be available
|
|
|
- if (rb->write_ptr < rb->free_ptr) {
|
|
|
- free_size = rb->free_ptr - rb->write_ptr;
|
|
|
- } else {
|
|
|
- //Else check which one is bigger amongst the below 2
|
|
|
- //1) Space from the write pointer to the end of buffer
|
|
|
- int size1 = rb->data + rb->size - rb->write_ptr;
|
|
|
- //2) Space from the start of buffer to the free pointer
|
|
|
- int size2 = rb->free_ptr - rb->data;
|
|
|
- //And then select the larger of the two
|
|
|
- free_size = size1 > size2 ? size1 : size2;
|
|
|
- }
|
|
|
- //In any case, a single header will be used, so subtracting the space that
|
|
|
- //would be required for it
|
|
|
- return free_size - sizeof(buf_entry_hdr_t);
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-RingbufHandle_t xRingbufferCreate(size_t buf_length, ringbuf_type_t type)
|
|
|
-{
|
|
|
- ringbuf_t *rb = malloc(sizeof(ringbuf_t));
|
|
|
- if (rb==NULL) goto err;
|
|
|
- memset(rb, 0, sizeof(ringbuf_t));
|
|
|
- rb->data = malloc(buf_length);
|
|
|
- if (rb->data == NULL) goto err;
|
|
|
- rb->size = buf_length;
|
|
|
- rb->free_ptr = rb->data;
|
|
|
- rb->read_ptr = rb->data;
|
|
|
- rb->write_ptr = rb->data;
|
|
|
- rb->free_space_sem = xSemaphoreCreateBinary();
|
|
|
- rb->items_buffered_sem = xSemaphoreCreateBinary();
|
|
|
- rb->flags=0;
|
|
|
- if (type==RINGBUF_TYPE_ALLOWSPLIT) {
|
|
|
- rb->flags|=flag_allowsplit;
|
|
|
- rb->copyItemToRingbufImpl=copyItemToRingbufAllowSplit;
|
|
|
- rb->getItemFromRingbufImpl=getItemFromRingbufDefault;
|
|
|
- rb->returnItemToRingbufImpl=returnItemToRingbufDefault;
|
|
|
- //Calculate max item size. Worst case, we need to split an item into two, which means two headers of overhead.
|
|
|
- rb->maxItemSize=rb->size-(sizeof(buf_entry_hdr_t)*2)-4;
|
|
|
- rb->getFreeSizeImpl=getCurFreeSizeAllowSplit;
|
|
|
- } else if (type==RINGBUF_TYPE_BYTEBUF) {
|
|
|
- rb->flags|=flag_bytebuf;
|
|
|
- rb->copyItemToRingbufImpl=copyItemToRingbufByteBuf;
|
|
|
- rb->getItemFromRingbufImpl=getItemFromRingbufByteBuf;
|
|
|
- rb->returnItemToRingbufImpl=returnItemToRingbufBytebuf;
|
|
|
- //Calculate max item size. We have no headers and can split anywhere -> size is total size minus one.
|
|
|
- rb->maxItemSize=rb->size-1;
|
|
|
- rb->getFreeSizeImpl=getCurFreeSizeByteBuf;
|
|
|
- } else if (type==RINGBUF_TYPE_NOSPLIT) {
|
|
|
- rb->copyItemToRingbufImpl=copyItemToRingbufNoSplit;
|
|
|
- rb->getItemFromRingbufImpl=getItemFromRingbufDefault;
|
|
|
- rb->returnItemToRingbufImpl=returnItemToRingbufDefault;
|
|
|
- //Calculate max item size. Worst case, we have the write ptr in such a position that we are lacking four bytes of free
|
|
|
- //memory to put an item into the rest of the memory. If this happens, we have to dummy-fill
|
|
|
- //(item_data-4) bytes of buffer, then we only have (size-(item_data-4) bytes left to fill
|
|
|
- //with the real item. (item size being header+data)
|
|
|
- rb->maxItemSize=(rb->size/2)-sizeof(buf_entry_hdr_t)-4;
|
|
|
- rb->getFreeSizeImpl=getCurFreeSizeNoSplit;
|
|
|
+ //Allocate memory
|
|
|
+ Ringbuffer_t *pxRingbuffer = calloc(1, sizeof(Ringbuffer_t));
|
|
|
+ if (pxRingbuffer == NULL) {
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
+ if (xBufferType != RINGBUF_TYPE_BYTEBUF) {
|
|
|
+ xBufferSize = rbALIGN_SIZE(xBufferSize); //xBufferSize is rounded up for no-split/allow-split buffers
|
|
|
+ }
|
|
|
+ pxRingbuffer->pucHead = malloc(xBufferSize);
|
|
|
+ if (pxRingbuffer->pucHead == NULL) {
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
+
|
|
|
+ //Initialize values
|
|
|
+ pxRingbuffer->xSize = xBufferSize;
|
|
|
+ pxRingbuffer->pucTail = pxRingbuffer->pucHead + xBufferSize;
|
|
|
+ pxRingbuffer->pucFree = pxRingbuffer->pucHead;
|
|
|
+ pxRingbuffer->pucRead = pxRingbuffer->pucHead;
|
|
|
+ pxRingbuffer->pucWrite = pxRingbuffer->pucHead;
|
|
|
+ pxRingbuffer->xItemsWaiting = 0;
|
|
|
+ pxRingbuffer->xFreeSpaceSemaphore = xSemaphoreCreateBinary();
|
|
|
+ pxRingbuffer->xItemsBufferedSemaphore = xSemaphoreCreateBinary();
|
|
|
+ pxRingbuffer->uxRingbufferFlags = 0;
|
|
|
+
|
|
|
+ //Initialize type dependent values and function pointers
|
|
|
+ if (xBufferType == RINGBUF_TYPE_NOSPLIT) {
|
|
|
+ pxRingbuffer->xCheckItemFits = prvCheckItemFitsDefault;
|
|
|
+ pxRingbuffer->vCopyItem = prvCopyItemNoSplit;
|
|
|
+ pxRingbuffer->pvGetItem = prvGetItemDefault;
|
|
|
+ pxRingbuffer->vReturnItem = prvReturnItemDefault;
|
|
|
+ /*
|
|
|
+ * Buffer lengths are always aligned. No-split buffer (read/write/free)
|
|
|
+ * pointers are also always aligned. Therefore worse case scenario is
|
|
|
+ * the write pointer is at the most aligned halfway point.
|
|
|
+ */
|
|
|
+ pxRingbuffer->xMaxItemSize = rbALIGN_SIZE(pxRingbuffer->xSize / 2) - rbHEADER_SIZE;
|
|
|
+ pxRingbuffer->xGetCurMaxSize = prvGetCurMaxSizeNoSplit;
|
|
|
+ } else if (xBufferType == RINGBUF_TYPE_ALLOWSPLIT) {
|
|
|
+ pxRingbuffer->uxRingbufferFlags |= rbALLOW_SPLIT_FLAG;
|
|
|
+ pxRingbuffer->xCheckItemFits = prvCheckItemFitsDefault;
|
|
|
+ pxRingbuffer->vCopyItem = prvCopyItemAllowSplit;
|
|
|
+ pxRingbuffer->pvGetItem = prvGetItemDefault;
|
|
|
+ pxRingbuffer->vReturnItem = prvReturnItemDefault;
|
|
|
+ //Worst case an item is split into two, incurring two headers of overhead
|
|
|
+ pxRingbuffer->xMaxItemSize = pxRingbuffer->xSize - (sizeof(ItemHeader_t) * 2);
|
|
|
+ pxRingbuffer->xGetCurMaxSize = prvGetCurMaxSizeAllowSplit;
|
|
|
+ } else if (xBufferType == RINGBUF_TYPE_BYTEBUF) {
|
|
|
+ pxRingbuffer->uxRingbufferFlags |= rbBYTE_BUFFER_FLAG;
|
|
|
+ pxRingbuffer->xCheckItemFits = prvCheckItemFitsByteBuffer;
|
|
|
+ pxRingbuffer->vCopyItem = prvCopyItemByteBuf;
|
|
|
+ pxRingbuffer->pvGetItem = prvGetItemByteBuf;
|
|
|
+ pxRingbuffer->vReturnItem = prvReturnItemByteBuf;
|
|
|
+ //Byte buffers do not incur any overhead
|
|
|
+ pxRingbuffer->xMaxItemSize = pxRingbuffer->xSize;
|
|
|
+ pxRingbuffer->xGetCurMaxSize = prvGetCurMaxSizeByteBuf;
|
|
|
} else {
|
|
|
+ //Unsupported type
|
|
|
configASSERT(0);
|
|
|
}
|
|
|
- if (rb->free_space_sem == NULL || rb->items_buffered_sem == NULL) goto err;
|
|
|
- vPortCPUInitializeMutex(&rb->mux);
|
|
|
|
|
|
- return (RingbufHandle_t)rb;
|
|
|
+ if (pxRingbuffer->xFreeSpaceSemaphore == NULL || pxRingbuffer->xItemsBufferedSemaphore == NULL) {
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
+ xSemaphoreGive(pxRingbuffer->xFreeSpaceSemaphore);
|
|
|
+ vPortCPUInitializeMutex(&pxRingbuffer->mux);
|
|
|
+
|
|
|
+ return (RingbufHandle_t)pxRingbuffer;
|
|
|
|
|
|
err:
|
|
|
//Some error has happened. Free/destroy all allocated things and return NULL.
|
|
|
- if (rb) {
|
|
|
- free(rb->data);
|
|
|
- if (rb->free_space_sem) vSemaphoreDelete(rb->free_space_sem);
|
|
|
- if (rb->items_buffered_sem) vSemaphoreDelete(rb->items_buffered_sem);
|
|
|
+ if (pxRingbuffer) {
|
|
|
+ free(pxRingbuffer->pucHead);
|
|
|
+ if (pxRingbuffer->xFreeSpaceSemaphore) {
|
|
|
+ vSemaphoreDelete(pxRingbuffer->xFreeSpaceSemaphore);
|
|
|
+ }
|
|
|
+ if (pxRingbuffer->xItemsBufferedSemaphore) {
|
|
|
+ vSemaphoreDelete(pxRingbuffer->xItemsBufferedSemaphore);
|
|
|
+ }
|
|
|
}
|
|
|
- free(rb);
|
|
|
+ free(pxRingbuffer);
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
-RingbufHandle_t xRingbufferCreateNoSplit(size_t item_size, size_t num_item)
|
|
|
+RingbufHandle_t xRingbufferCreateNoSplit(size_t xItemSize, size_t xItemNum)
|
|
|
{
|
|
|
- size_t aligned_size = (item_size+3)&~3;
|
|
|
- return xRingbufferCreate((aligned_size + sizeof(buf_entry_hdr_t)) * num_item, RINGBUF_TYPE_NOSPLIT);
|
|
|
+ return xRingbufferCreate((rbALIGN_SIZE(xItemSize) + rbHEADER_SIZE) * xItemNum, RINGBUF_TYPE_NOSPLIT);
|
|
|
}
|
|
|
|
|
|
-void vRingbufferDelete(RingbufHandle_t ringbuf) {
|
|
|
- ringbuf_t *rb=(ringbuf_t *)ringbuf;
|
|
|
- if (rb) {
|
|
|
- free(rb->data);
|
|
|
- if (rb->free_space_sem) vSemaphoreDelete(rb->free_space_sem);
|
|
|
- if (rb->items_buffered_sem) vSemaphoreDelete(rb->items_buffered_sem);
|
|
|
+BaseType_t xRingbufferSend(RingbufHandle_t xRingbuffer, const void *pvItem, size_t xItemSize, TickType_t xTicksToWait)
|
|
|
+{
|
|
|
+ //Check arguments
|
|
|
+ Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
|
|
|
+ configASSERT(pxRingbuffer);
|
|
|
+ configASSERT(pvItem != NULL || xItemSize == 0);
|
|
|
+ if (xItemSize > pxRingbuffer->xMaxItemSize) {
|
|
|
+ return pdFALSE; //Data will never ever fit in the queue.
|
|
|
+ }
|
|
|
+ if ((pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) && xItemSize == 0) {
|
|
|
+ return pdTRUE; //Sending 0 bytes to byte buffer has no effect
|
|
|
+ }
|
|
|
+
|
|
|
+ //Attempt to send an item
|
|
|
+ BaseType_t xReturn = pdFALSE;
|
|
|
+ BaseType_t xReturnSemaphore = pdFALSE;
|
|
|
+ TickType_t xTicksEnd = xTaskGetTickCount() + xTicksToWait;
|
|
|
+ TickType_t xTicksRemaining = xTicksToWait;
|
|
|
+ while (xTicksRemaining <= xTicksToWait) { //xTicksToWait will underflow once xTaskGetTickCount() > ticks_end
|
|
|
+ //Block until more free space becomes available or timeout
|
|
|
+ if (xSemaphoreTake(pxRingbuffer->xFreeSpaceSemaphore, xTicksRemaining) != pdTRUE) {
|
|
|
+ xReturn = pdFALSE;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ //Semaphore obtained, check if item can fit
|
|
|
+ taskENTER_CRITICAL(&pxRingbuffer->mux);
|
|
|
+ if(pxRingbuffer->xCheckItemFits(pxRingbuffer, xItemSize) == pdTRUE) {
|
|
|
+ //Item will fit, copy item
|
|
|
+ pxRingbuffer->vCopyItem(pxRingbuffer, pvItem, xItemSize);
|
|
|
+ xReturn = pdTRUE;
|
|
|
+ //Check if the free semaphore should be returned to allow other tasks to send
|
|
|
+ if (prvGetFreeSize(pxRingbuffer) > 0) {
|
|
|
+ xReturnSemaphore = pdTRUE;
|
|
|
+ }
|
|
|
+ taskEXIT_CRITICAL(&pxRingbuffer->mux);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ //Item doesn't fit, adjust ticks and take the semaphore again
|
|
|
+ if (xTicksToWait != portMAX_DELAY) {
|
|
|
+ xTicksRemaining = xTicksEnd - xTaskGetTickCount();
|
|
|
+ }
|
|
|
+ taskEXIT_CRITICAL(&pxRingbuffer->mux);
|
|
|
+ /*
|
|
|
+ * Gap between critical section and re-acquiring of the semaphore. If
|
|
|
+ * semaphore is given now, priority inversion might occur (see docs)
|
|
|
+ */
|
|
|
+ }
|
|
|
+
|
|
|
+ if (xReturn == pdTRUE) {
|
|
|
+ //Indicate item was successfully sent
|
|
|
+ xSemaphoreGive(pxRingbuffer->xItemsBufferedSemaphore);
|
|
|
+ }
|
|
|
+ if (xReturnSemaphore == pdTRUE) {
|
|
|
+ xSemaphoreGive(pxRingbuffer->xFreeSpaceSemaphore); //Give back semaphore so other tasks can send
|
|
|
}
|
|
|
- free(rb);
|
|
|
+ return xReturn;
|
|
|
}
|
|
|
|
|
|
-size_t xRingbufferGetMaxItemSize(RingbufHandle_t ringbuf)
|
|
|
+BaseType_t xRingbufferSendFromISR(RingbufHandle_t xRingbuffer, const void *pvItem, size_t xItemSize, BaseType_t *pxHigherPriorityTaskWoken)
|
|
|
{
|
|
|
- ringbuf_t *rb=(ringbuf_t *)ringbuf;
|
|
|
- configASSERT(rb);
|
|
|
- return rb->maxItemSize;
|
|
|
+ //Check arguments
|
|
|
+ Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
|
|
|
+ configASSERT(pxRingbuffer);
|
|
|
+ configASSERT(pvItem != NULL || xItemSize == 0);
|
|
|
+ if (xItemSize > pxRingbuffer->xMaxItemSize) {
|
|
|
+ return pdFALSE; //Data will never ever fit in the queue.
|
|
|
+ }
|
|
|
+ if ((pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) && xItemSize == 0) {
|
|
|
+ return pdTRUE; //Sending 0 bytes to byte buffer has no effect
|
|
|
+ }
|
|
|
+
|
|
|
+ //Attempt to send an item
|
|
|
+ BaseType_t xReturn;
|
|
|
+ BaseType_t xReturnSemaphore = pdFALSE;
|
|
|
+ taskENTER_CRITICAL_ISR(&pxRingbuffer->mux);
|
|
|
+ if (pxRingbuffer->xCheckItemFits(xRingbuffer, xItemSize) == pdTRUE) {
|
|
|
+ pxRingbuffer->vCopyItem(xRingbuffer, pvItem, xItemSize);
|
|
|
+ xReturn = pdTRUE;
|
|
|
+ //Check if the free semaphore should be returned to allow other tasks to send
|
|
|
+ if (prvGetFreeSize(pxRingbuffer) > 0) {
|
|
|
+ xReturnSemaphore = pdTRUE;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ xReturn = pdFALSE;
|
|
|
+ }
|
|
|
+ taskEXIT_CRITICAL_ISR(&pxRingbuffer->mux);
|
|
|
+
|
|
|
+ if (xReturn == pdTRUE) {
|
|
|
+ //Indicate item was successfully sent
|
|
|
+ xSemaphoreGiveFromISR(pxRingbuffer->xItemsBufferedSemaphore, pxHigherPriorityTaskWoken);
|
|
|
+ }
|
|
|
+ if (xReturnSemaphore == pdTRUE) {
|
|
|
+ xSemaphoreGiveFromISR(pxRingbuffer->xFreeSpaceSemaphore, pxHigherPriorityTaskWoken); //Give back semaphore so other tasks can send
|
|
|
+ }
|
|
|
+ return xReturn;
|
|
|
}
|
|
|
|
|
|
-bool xRingbufferIsNextItemWrapped(RingbufHandle_t ringbuf)
|
|
|
+void *xRingbufferReceive(RingbufHandle_t xRingbuffer, size_t *pxItemSize, TickType_t xTicksToWait)
|
|
|
{
|
|
|
- ringbuf_t *rb=(ringbuf_t *)ringbuf;
|
|
|
- configASSERT(rb);
|
|
|
- buf_entry_hdr_t *hdr=(buf_entry_hdr_t *)rb->read_ptr;
|
|
|
- return hdr->flags & iflag_wrap;
|
|
|
+ //Check arguments
|
|
|
+ Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
|
|
|
+ configASSERT(pxRingbuffer);
|
|
|
+
|
|
|
+ //Attempt to retrieve an item
|
|
|
+ void *pvTempItem;
|
|
|
+ size_t xTempSize;
|
|
|
+ if (prvReceiveGeneric(pxRingbuffer, &pvTempItem, NULL, &xTempSize, NULL, 0, xTicksToWait) == pdTRUE) {
|
|
|
+ if (pxItemSize != NULL) {
|
|
|
+ *pxItemSize = xTempSize;
|
|
|
+ }
|
|
|
+ return pvTempItem;
|
|
|
+ } else {
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
-
|
|
|
-BaseType_t xRingbufferSend(RingbufHandle_t ringbuf, void *data, size_t dataSize, TickType_t ticks_to_wait)
|
|
|
+void *xRingbufferReceiveFromISR(RingbufHandle_t xRingbuffer, size_t *pxItemSize)
|
|
|
{
|
|
|
- ringbuf_t *rb=(ringbuf_t *)ringbuf;
|
|
|
- size_t needed_size=dataSize+sizeof(buf_entry_hdr_t);
|
|
|
- BaseType_t done=pdFALSE;
|
|
|
- TickType_t ticks_end = xTaskGetTickCount() + ticks_to_wait;
|
|
|
- TickType_t ticks_remaining = ticks_to_wait;
|
|
|
-
|
|
|
- configASSERT(rb);
|
|
|
+ //Check arguments
|
|
|
+ Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
|
|
|
+ configASSERT(pxRingbuffer);
|
|
|
+
|
|
|
+ //Attempt to retrieve an item
|
|
|
+ void *pvTempItem;
|
|
|
+ size_t xTempSize;
|
|
|
+ if (prvReceiveGenericFromISR(pxRingbuffer, &pvTempItem, NULL, &xTempSize, NULL, 0) == pdTRUE) {
|
|
|
+ if (pxItemSize != NULL) {
|
|
|
+ *pxItemSize = xTempSize;
|
|
|
+ }
|
|
|
+ return pvTempItem;
|
|
|
+ } else {
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
- if (dataSize > xRingbufferGetMaxItemSize(ringbuf)) {
|
|
|
- //Data will never ever fit in the queue.
|
|
|
+BaseType_t xRingbufferReceiveSplit(RingbufHandle_t xRingbuffer, void **ppvHeadItem, void **ppvTailItem, size_t *pxHeadItemSize, size_t *pxTailItemSize, TickType_t xTicksToWait)
|
|
|
+{
|
|
|
+ //Check arguments
|
|
|
+ Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
|
|
|
+ configASSERT(pxRingbuffer);
|
|
|
+ configASSERT(pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG);
|
|
|
+ configASSERT(ppvHeadItem != NULL && ppvTailItem != NULL);
|
|
|
+
|
|
|
+ //Attempt to retrieve multiple items
|
|
|
+ void *pvTempHeadItem, *pvTempTailItem;
|
|
|
+ size_t xTempHeadSize, xTempTailSize;
|
|
|
+ if (prvReceiveGeneric(pxRingbuffer, &pvTempHeadItem, &pvTempTailItem, &xTempHeadSize, &xTempTailSize, 0, xTicksToWait) == pdTRUE) {
|
|
|
+ //At least one item was retrieved
|
|
|
+ *ppvHeadItem = pvTempHeadItem;
|
|
|
+ if(pxHeadItemSize != NULL){
|
|
|
+ *pxHeadItemSize = xTempHeadSize;
|
|
|
+ }
|
|
|
+ //Check to see if a second item was also retrieved
|
|
|
+ if (pvTempTailItem != NULL) {
|
|
|
+ *ppvTailItem = pvTempTailItem;
|
|
|
+ if (pxTailItemSize != NULL) {
|
|
|
+ *pxTailItemSize = xTempTailSize;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ *ppvTailItem = NULL;
|
|
|
+ }
|
|
|
+ return pdTRUE;
|
|
|
+ } else {
|
|
|
+ //No items retrieved
|
|
|
+ *ppvHeadItem = NULL;
|
|
|
+ *ppvTailItem = NULL;
|
|
|
return pdFALSE;
|
|
|
}
|
|
|
+}
|
|
|
|
|
|
- while (!done) {
|
|
|
- //Check if there is enough room in the buffer. If not, wait until there is.
|
|
|
- do {
|
|
|
- if (ringbufferFreeMem(rb) < needed_size) {
|
|
|
- //Data does not fit yet. Wait until the free_space_sem is given, then re-evaluate.
|
|
|
-
|
|
|
- BaseType_t r = xSemaphoreTake(rb->free_space_sem, ticks_remaining);
|
|
|
- if (r == pdFALSE) {
|
|
|
- //Timeout.
|
|
|
- return pdFALSE;
|
|
|
- }
|
|
|
- //Adjust ticks_remaining; we may have waited less than that and in the case the free memory still is not enough,
|
|
|
- //we will need to wait some more.
|
|
|
- if (ticks_to_wait != portMAX_DELAY) {
|
|
|
- ticks_remaining = ticks_end - xTaskGetTickCount();
|
|
|
-
|
|
|
- // ticks_remaining will always be less than or equal to the original ticks_to_wait,
|
|
|
- // unless the timeout is reached - in which case it unsigned underflows to a much
|
|
|
- // higher value.
|
|
|
- //
|
|
|
- // (Check is written this non-intuitive way to allow for the case where xTaskGetTickCount()
|
|
|
- // has overflowed but the ticks_end value has not overflowed.)
|
|
|
- if(ticks_remaining > ticks_to_wait) {
|
|
|
- //Timeout, but there is not enough free space for the item that need to be sent.
|
|
|
- xSemaphoreGive(rb->free_space_sem);
|
|
|
- return pdFALSE;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
+BaseType_t xRingbufferReceiveSplitFromISR(RingbufHandle_t xRingbuffer, void **ppvHeadItem, void **ppvTailItem, size_t *pxHeadItemSize, size_t *pxTailItemSize)
|
|
|
+{
|
|
|
+ //Check arguments
|
|
|
+ Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
|
|
|
+ configASSERT(pxRingbuffer);
|
|
|
+ configASSERT(pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG);
|
|
|
+ configASSERT(ppvHeadItem != NULL && ppvTailItem != NULL);
|
|
|
+
|
|
|
+ //Attempt to retrieve multiple items
|
|
|
+ void *pvTempHeadItem, *pvTempTailItem;
|
|
|
+ size_t xTempHeadSize, xTempTailSize;
|
|
|
+ if (prvReceiveGenericFromISR(pxRingbuffer, &pvTempHeadItem, &pvTempTailItem, &xTempHeadSize, &xTempTailSize, 0) == pdTRUE) {
|
|
|
+ //At least one item was received
|
|
|
+ *ppvHeadItem = pvTempHeadItem;
|
|
|
+ if (pxHeadItemSize != NULL) {
|
|
|
+ *pxHeadItemSize = xTempHeadSize;
|
|
|
+ }
|
|
|
+ //Check to see if a second item was also retrieved
|
|
|
+ if (pvTempTailItem != NULL) {
|
|
|
+ *ppvTailItem = pvTempTailItem;
|
|
|
+ if (pxTailItemSize != NULL) {
|
|
|
+ *pxTailItemSize = xTempTailSize;
|
|
|
}
|
|
|
- } while (ringbufferFreeMem(rb) < needed_size);
|
|
|
-
|
|
|
- //Lock the mux in order to make sure no one else is messing with the ringbuffer and do the copy.
|
|
|
- portENTER_CRITICAL(&rb->mux);
|
|
|
- //Another thread may have been able to sneak its write first. Check again now we locked the ringbuff, and retry
|
|
|
- //everything if this is the case. Otherwise, we can write and are done.
|
|
|
- done=rb->copyItemToRingbufImpl(rb, data, dataSize);
|
|
|
- portEXIT_CRITICAL(&rb->mux);
|
|
|
+ } else {
|
|
|
+ *ppvTailItem = NULL;
|
|
|
+ }
|
|
|
+ return pdTRUE;
|
|
|
+ } else {
|
|
|
+ *ppvHeadItem = NULL;
|
|
|
+ *ppvTailItem = NULL;
|
|
|
+ return pdFALSE;
|
|
|
}
|
|
|
- xSemaphoreGive(rb->items_buffered_sem);
|
|
|
- return pdTRUE;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
-BaseType_t xRingbufferSendFromISR(RingbufHandle_t ringbuf, void *data, size_t dataSize, BaseType_t *higher_prio_task_awoken)
|
|
|
+void *xRingbufferReceiveUpTo(RingbufHandle_t xRingbuffer, size_t *pxItemSize, TickType_t xTicksToWait, size_t xMaxSize)
|
|
|
{
|
|
|
- ringbuf_t *rb=(ringbuf_t *)ringbuf;
|
|
|
- BaseType_t write_succeeded;
|
|
|
- configASSERT(rb);
|
|
|
- size_t needed_size=dataSize+sizeof(buf_entry_hdr_t);
|
|
|
- portENTER_CRITICAL_ISR(&rb->mux);
|
|
|
- if (needed_size>ringbufferFreeMem(rb)) {
|
|
|
- //Does not fit in the remaining space in the ringbuffer.
|
|
|
- write_succeeded=pdFALSE;
|
|
|
- } else {
|
|
|
- write_succeeded = rb->copyItemToRingbufImpl(rb, data, dataSize);
|
|
|
+ //Check arguments
|
|
|
+ Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
|
|
|
+ configASSERT(pxRingbuffer);
|
|
|
+ configASSERT(pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG); //This function should only be called for byte buffers
|
|
|
+ if (xMaxSize == 0) {
|
|
|
+ return NULL;
|
|
|
}
|
|
|
- portEXIT_CRITICAL_ISR(&rb->mux);
|
|
|
- if (write_succeeded) {
|
|
|
- xSemaphoreGiveFromISR(rb->items_buffered_sem, higher_prio_task_awoken);
|
|
|
+
|
|
|
+ //Attempt to retrieve up to xMaxSize bytes
|
|
|
+ void *pvTempItem;
|
|
|
+ size_t xTempSize;
|
|
|
+ if (prvReceiveGeneric(pxRingbuffer, &pvTempItem, NULL, &xTempSize, NULL, xMaxSize, xTicksToWait) == pdTRUE) {
|
|
|
+ if (pxItemSize != NULL) {
|
|
|
+ *pxItemSize = xTempSize;
|
|
|
+ }
|
|
|
+ return pvTempItem;
|
|
|
+ } else {
|
|
|
+ return NULL;
|
|
|
}
|
|
|
- return write_succeeded;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
-static void *xRingbufferReceiveGeneric(RingbufHandle_t ringbuf, size_t *item_size, TickType_t ticks_to_wait, size_t wanted_size)
|
|
|
+void *xRingbufferReceiveUpToFromISR(RingbufHandle_t xRingbuffer, size_t *pxItemSize, size_t xMaxSize)
|
|
|
{
|
|
|
- ringbuf_t *rb=(ringbuf_t *)ringbuf;
|
|
|
- uint8_t *itemData;
|
|
|
- BaseType_t done=pdFALSE;
|
|
|
- configASSERT(rb);
|
|
|
- while(!done) {
|
|
|
- //See if there's any data available. If not, wait until there is.
|
|
|
- while (rb->read_ptr == rb->write_ptr) {
|
|
|
- BaseType_t r=xSemaphoreTake(rb->items_buffered_sem, ticks_to_wait);
|
|
|
- if (r == pdFALSE) {
|
|
|
- //Timeout.
|
|
|
- return NULL;
|
|
|
- }
|
|
|
- }
|
|
|
- //Okay, we seem to have data in the buffer. Grab the mux and copy it out if it's still there.
|
|
|
- portENTER_CRITICAL(&rb->mux);
|
|
|
- itemData=rb->getItemFromRingbufImpl(rb, item_size, wanted_size);
|
|
|
- portEXIT_CRITICAL(&rb->mux);
|
|
|
- if (itemData) {
|
|
|
- //We managed to get an item.
|
|
|
- done=pdTRUE;
|
|
|
+ //Check arguments
|
|
|
+ Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
|
|
|
+ configASSERT(pxRingbuffer);
|
|
|
+ configASSERT(pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG); //This function should only be called for byte buffers
|
|
|
+ if (xMaxSize == 0) {
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ //Attempt to retrieve up to xMaxSize bytes
|
|
|
+ void *pvTempItem;
|
|
|
+ size_t xTempSize;
|
|
|
+ if (prvReceiveGenericFromISR(pxRingbuffer, &pvTempItem, NULL, &xTempSize, NULL, xMaxSize) == pdTRUE) {
|
|
|
+ if (pxItemSize != NULL) {
|
|
|
+ *pxItemSize = xTempSize;
|
|
|
}
|
|
|
+ return pvTempItem;
|
|
|
+ } else {
|
|
|
+ return NULL;
|
|
|
}
|
|
|
- return (void*)itemData;
|
|
|
}
|
|
|
|
|
|
-void *xRingbufferReceive(RingbufHandle_t ringbuf, size_t *item_size, TickType_t ticks_to_wait)
|
|
|
+void vRingbufferReturnItem(RingbufHandle_t xRingbuffer, void *pvItem)
|
|
|
{
|
|
|
- return xRingbufferReceiveGeneric(ringbuf, item_size, ticks_to_wait, 0);
|
|
|
+ Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
|
|
|
+ configASSERT(pxRingbuffer);
|
|
|
+ configASSERT(pvItem != NULL);
|
|
|
+
|
|
|
+ taskENTER_CRITICAL(&pxRingbuffer->mux);
|
|
|
+ pxRingbuffer->vReturnItem(pxRingbuffer, (uint8_t *)pvItem);
|
|
|
+ taskEXIT_CRITICAL(&pxRingbuffer->mux);
|
|
|
+ xSemaphoreGive(pxRingbuffer->xFreeSpaceSemaphore);
|
|
|
}
|
|
|
|
|
|
-
|
|
|
-void *xRingbufferReceiveFromISR(RingbufHandle_t ringbuf, size_t *item_size)
|
|
|
+void vRingbufferReturnItemFromISR(RingbufHandle_t xRingbuffer, void *pvItem, BaseType_t *pxHigherPriorityTaskWoken)
|
|
|
{
|
|
|
- ringbuf_t *rb=(ringbuf_t *)ringbuf;
|
|
|
- uint8_t *itemData;
|
|
|
- configASSERT(rb);
|
|
|
- portENTER_CRITICAL_ISR(&rb->mux);
|
|
|
- itemData=rb->getItemFromRingbufImpl(rb, item_size, 0);
|
|
|
- portEXIT_CRITICAL_ISR(&rb->mux);
|
|
|
- return (void*)itemData;
|
|
|
+ Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
|
|
|
+ configASSERT(pxRingbuffer);
|
|
|
+ configASSERT(pvItem != NULL);
|
|
|
+
|
|
|
+ taskENTER_CRITICAL_ISR(&pxRingbuffer->mux);
|
|
|
+ pxRingbuffer->vReturnItem(pxRingbuffer, (uint8_t *)pvItem);
|
|
|
+ taskEXIT_CRITICAL_ISR(&pxRingbuffer->mux);
|
|
|
+ xSemaphoreGiveFromISR(pxRingbuffer->xFreeSpaceSemaphore, pxHigherPriorityTaskWoken);
|
|
|
}
|
|
|
|
|
|
-void *xRingbufferReceiveUpTo(RingbufHandle_t ringbuf, size_t *item_size, TickType_t ticks_to_wait, size_t wanted_size) {
|
|
|
- if (wanted_size == 0) return NULL;
|
|
|
- configASSERT(ringbuf);
|
|
|
- configASSERT(((ringbuf_t *)ringbuf)->flags & flag_bytebuf);
|
|
|
- return xRingbufferReceiveGeneric(ringbuf, item_size, ticks_to_wait, wanted_size);
|
|
|
+void vRingbufferDelete(RingbufHandle_t xRingbuffer)
|
|
|
+{
|
|
|
+ Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
|
|
|
+ configASSERT(pxRingbuffer);
|
|
|
+
|
|
|
+ if (pxRingbuffer) {
|
|
|
+ free(pxRingbuffer->pucHead);
|
|
|
+ if (pxRingbuffer->xFreeSpaceSemaphore) {
|
|
|
+ vSemaphoreDelete(pxRingbuffer->xFreeSpaceSemaphore);
|
|
|
+ }
|
|
|
+ if (pxRingbuffer->xItemsBufferedSemaphore) {
|
|
|
+ vSemaphoreDelete(pxRingbuffer->xItemsBufferedSemaphore);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ free(pxRingbuffer);
|
|
|
}
|
|
|
|
|
|
-void *xRingbufferReceiveUpToFromISR(RingbufHandle_t ringbuf, size_t *item_size, size_t wanted_size)
|
|
|
+size_t xRingbufferGetMaxItemSize(RingbufHandle_t xRingbuffer)
|
|
|
{
|
|
|
- ringbuf_t *rb=(ringbuf_t *)ringbuf;
|
|
|
- uint8_t *itemData;
|
|
|
- if (wanted_size == 0) return NULL;
|
|
|
- configASSERT(rb);
|
|
|
- configASSERT(rb->flags & flag_bytebuf);
|
|
|
- portENTER_CRITICAL_ISR(&rb->mux);
|
|
|
- itemData=rb->getItemFromRingbufImpl(rb, item_size, wanted_size);
|
|
|
- portEXIT_CRITICAL_ISR(&rb->mux);
|
|
|
- return (void*)itemData;
|
|
|
+ Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
|
|
|
+ configASSERT(pxRingbuffer);
|
|
|
+ return pxRingbuffer->xMaxItemSize;
|
|
|
}
|
|
|
|
|
|
+size_t xRingbufferGetCurFreeSize(RingbufHandle_t xRingbuffer)
|
|
|
+{
|
|
|
+ Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
|
|
|
+ configASSERT(pxRingbuffer);
|
|
|
+
|
|
|
+ size_t xFreeSize;
|
|
|
+ taskENTER_CRITICAL(&pxRingbuffer->mux);
|
|
|
+ xFreeSize = pxRingbuffer->xGetCurMaxSize(pxRingbuffer);
|
|
|
+ taskEXIT_CRITICAL(&pxRingbuffer->mux);
|
|
|
+ return xFreeSize;
|
|
|
+}
|
|
|
|
|
|
-void vRingbufferReturnItem(RingbufHandle_t ringbuf, void *item)
|
|
|
+BaseType_t xRingbufferAddToQueueSetRead(RingbufHandle_t xRingbuffer, QueueSetHandle_t xQueueSet)
|
|
|
{
|
|
|
- ringbuf_t *rb=(ringbuf_t *)ringbuf;
|
|
|
- portENTER_CRITICAL(&rb->mux);
|
|
|
- rb->returnItemToRingbufImpl(rb, item);
|
|
|
- portEXIT_CRITICAL(&rb->mux);
|
|
|
- xSemaphoreGive(rb->free_space_sem);
|
|
|
+ Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
|
|
|
+ configASSERT(pxRingbuffer);
|
|
|
+
|
|
|
+ BaseType_t xReturn;
|
|
|
+ taskENTER_CRITICAL(&pxRingbuffer->mux);
|
|
|
+ //Cannot add semaphore to queue set if semaphore is not empty. Temporarily hold semaphore
|
|
|
+ BaseType_t xHoldSemaphore = xSemaphoreTake(pxRingbuffer->xItemsBufferedSemaphore, 0);
|
|
|
+ xReturn = xQueueAddToSet(pxRingbuffer->xItemsBufferedSemaphore, xQueueSet);
|
|
|
+ if (xHoldSemaphore == pdTRUE) {
|
|
|
+ //Return semaphore if temporarily held
|
|
|
+ configASSERT(xSemaphoreGive(pxRingbuffer->xItemsBufferedSemaphore) == pdTRUE);
|
|
|
+ }
|
|
|
+ taskEXIT_CRITICAL(&pxRingbuffer->mux);
|
|
|
+ return xReturn;
|
|
|
}
|
|
|
|
|
|
+BaseType_t xRingbufferCanRead(RingbufHandle_t xRingbuffer, QueueSetMemberHandle_t xMember)
|
|
|
+{
|
|
|
+ //Check if the selected queue set member is the ring buffer's read semaphore
|
|
|
+ Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
|
|
|
+ configASSERT(pxRingbuffer);
|
|
|
+ return (pxRingbuffer->xItemsBufferedSemaphore == xMember) ? pdTRUE : pdFALSE;
|
|
|
+}
|
|
|
|
|
|
-void vRingbufferReturnItemFromISR(RingbufHandle_t ringbuf, void *item, BaseType_t *higher_prio_task_awoken)
|
|
|
+BaseType_t xRingbufferRemoveFromQueueSetRead(RingbufHandle_t xRingbuffer, QueueSetHandle_t xQueueSet)
|
|
|
{
|
|
|
- ringbuf_t *rb=(ringbuf_t *)ringbuf;
|
|
|
- portENTER_CRITICAL_ISR(&rb->mux);
|
|
|
- rb->returnItemToRingbufImpl(rb, item);
|
|
|
- portEXIT_CRITICAL_ISR(&rb->mux);
|
|
|
- xSemaphoreGiveFromISR(rb->free_space_sem, higher_prio_task_awoken);
|
|
|
+ Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
|
|
|
+ configASSERT(pxRingbuffer);
|
|
|
+
|
|
|
+ BaseType_t xReturn;
|
|
|
+ taskENTER_CRITICAL(&pxRingbuffer->mux);
|
|
|
+ //Cannot remove semaphore from queue set if semaphore is not empty. Temporarily hold semaphore
|
|
|
+ BaseType_t xHoldSemaphore = xSemaphoreTake(pxRingbuffer->xItemsBufferedSemaphore, 0);
|
|
|
+ xReturn = xQueueRemoveFromSet(pxRingbuffer->xItemsBufferedSemaphore, xQueueSet);
|
|
|
+ if (xHoldSemaphore == pdTRUE) {
|
|
|
+ //Return semaphore if temporarily held
|
|
|
+ configASSERT(xSemaphoreGive(pxRingbuffer->xItemsBufferedSemaphore) == pdTRUE);
|
|
|
+ }
|
|
|
+ taskEXIT_CRITICAL(&pxRingbuffer->mux);
|
|
|
+ return xReturn;
|
|
|
}
|
|
|
|
|
|
+void vRingbufferGetInfo(RingbufHandle_t xRingbuffer, UBaseType_t *uxFree, UBaseType_t *uxRead, UBaseType_t *uxWrite, UBaseType_t *uxItemsWaiting)
|
|
|
+{
|
|
|
+ Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
|
|
|
+ configASSERT(pxRingbuffer);
|
|
|
|
|
|
-BaseType_t xRingbufferAddToQueueSetRead(RingbufHandle_t ringbuf, QueueSetHandle_t xQueueSet)
|
|
|
+ taskENTER_CRITICAL(&pxRingbuffer->mux);
|
|
|
+ if (uxFree != NULL) {
|
|
|
+ *uxFree = (UBaseType_t)(pxRingbuffer->pucFree - pxRingbuffer->pucHead);
|
|
|
+ }
|
|
|
+ if (uxRead != NULL) {
|
|
|
+ *uxRead = (UBaseType_t)(pxRingbuffer->pucRead - pxRingbuffer->pucHead);
|
|
|
+ }
|
|
|
+ if (uxWrite != NULL) {
|
|
|
+ *uxWrite = (UBaseType_t)(pxRingbuffer->pucWrite - pxRingbuffer->pucHead);
|
|
|
+ }
|
|
|
+ if (uxItemsWaiting != NULL) {
|
|
|
+ *uxItemsWaiting = (UBaseType_t)(pxRingbuffer->xItemsWaiting);
|
|
|
+ }
|
|
|
+ taskEXIT_CRITICAL(&pxRingbuffer->mux);
|
|
|
+}
|
|
|
+
|
|
|
+void xRingbufferPrintInfo(RingbufHandle_t xRingbuffer)
|
|
|
{
|
|
|
- ringbuf_t *rb=(ringbuf_t *)ringbuf;
|
|
|
- configASSERT(rb);
|
|
|
- return xQueueAddToSet(rb->items_buffered_sem, xQueueSet);
|
|
|
+ Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
|
|
|
+ configASSERT(pxRingbuffer);
|
|
|
+ printf("Rb size:%d\tfree: %d\trptr: %d\tfreeptr: %d\twptr: %d\n",
|
|
|
+ pxRingbuffer->xSize, prvGetFreeSize(pxRingbuffer),
|
|
|
+ pxRingbuffer->pucRead - pxRingbuffer->pucHead,
|
|
|
+ pxRingbuffer->pucFree - pxRingbuffer->pucHead,
|
|
|
+ pxRingbuffer->pucWrite - pxRingbuffer->pucHead);
|
|
|
}
|
|
|
|
|
|
+/* --------------------------------- Deprecated Functions ------------------------------ */
|
|
|
+//Todo: Remove the following deprecated functions in next release
|
|
|
|
|
|
-BaseType_t xRingbufferAddToQueueSetWrite(RingbufHandle_t ringbuf, QueueSetHandle_t xQueueSet)
|
|
|
+bool xRingbufferIsNextItemWrapped(RingbufHandle_t xRingbuffer)
|
|
|
{
|
|
|
- ringbuf_t *rb=(ringbuf_t *)ringbuf;
|
|
|
- configASSERT(rb);
|
|
|
- return xQueueAddToSet(rb->free_space_sem, xQueueSet);
|
|
|
+ //This function is deprecated, use xRingbufferReceiveSplit() instead
|
|
|
+ Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
|
|
|
+ configASSERT(pxRingbuffer);
|
|
|
+ bool is_wrapped;
|
|
|
+
|
|
|
+ portENTER_CRITICAL(&pxRingbuffer->mux);
|
|
|
+ ItemHeader_t *xHeader = (ItemHeader_t *)pxRingbuffer->pucRead;
|
|
|
+ is_wrapped = xHeader->uxItemFlags & rbITEM_SPLIT_FLAG;
|
|
|
+ portEXIT_CRITICAL(&pxRingbuffer->mux);
|
|
|
+ return is_wrapped;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
-BaseType_t xRingbufferRemoveFromQueueSetRead(RingbufHandle_t ringbuf, QueueSetHandle_t xQueueSet)
|
|
|
+BaseType_t xRingbufferAddToQueueSetWrite(RingbufHandle_t xRingbuffer, QueueSetHandle_t xQueueSet)
|
|
|
{
|
|
|
- ringbuf_t *rb=(ringbuf_t *)ringbuf;
|
|
|
- configASSERT(rb);
|
|
|
- return xQueueRemoveFromSet(rb->items_buffered_sem, xQueueSet);
|
|
|
+ //This function is deprecated. QueueSetWrite no longer supported
|
|
|
+ Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
|
|
|
+ configASSERT(pxRingbuffer);
|
|
|
+
|
|
|
+ BaseType_t xReturn;
|
|
|
+ portENTER_CRITICAL(&pxRingbuffer->mux);
|
|
|
+ //Cannot add semaphore to queue set if semaphore is not empty. Temporary hold semaphore
|
|
|
+ BaseType_t xHoldSemaphore = xSemaphoreTake(pxRingbuffer->xFreeSpaceSemaphore, 0);
|
|
|
+ xReturn = xQueueAddToSet(pxRingbuffer->xFreeSpaceSemaphore, xQueueSet);
|
|
|
+ if (xHoldSemaphore == pdTRUE) {
|
|
|
+ //Return semaphore is temporarily held
|
|
|
+ configASSERT(xSemaphoreGive(pxRingbuffer->xFreeSpaceSemaphore) == pdTRUE);
|
|
|
+ }
|
|
|
+ portEXIT_CRITICAL(&pxRingbuffer->mux);
|
|
|
+ return xReturn;
|
|
|
}
|
|
|
|
|
|
-BaseType_t xRingbufferRemoveFromQueueSetWrite(RingbufHandle_t ringbuf, QueueSetHandle_t xQueueSet)
|
|
|
+BaseType_t xRingbufferRemoveFromQueueSetWrite(RingbufHandle_t xRingbuffer, QueueSetHandle_t xQueueSet)
|
|
|
{
|
|
|
- ringbuf_t *rb=(ringbuf_t *)ringbuf;
|
|
|
- configASSERT(rb);
|
|
|
- return xQueueRemoveFromSet(rb->free_space_sem, xQueueSet);
|
|
|
+ //This function is deprecated. QueueSetWrite no longer supported
|
|
|
+ Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
|
|
|
+ configASSERT(pxRingbuffer);
|
|
|
+
|
|
|
+ BaseType_t xReturn;
|
|
|
+ portENTER_CRITICAL(&pxRingbuffer->mux);
|
|
|
+ //Cannot remove semaphore from queue set if semaphore is not empty. Temporary hold semaphore
|
|
|
+ BaseType_t xHoldSemaphore = xSemaphoreTake(pxRingbuffer->xFreeSpaceSemaphore, 0);
|
|
|
+ xReturn = xQueueRemoveFromSet(pxRingbuffer->xFreeSpaceSemaphore, xQueueSet);
|
|
|
+ if (xHoldSemaphore == pdTRUE) {
|
|
|
+ //Return semaphore is temporarily held
|
|
|
+ configASSERT(xSemaphoreGive(pxRingbuffer->xFreeSpaceSemaphore) == pdTRUE);
|
|
|
+ }
|
|
|
+ portEXIT_CRITICAL(&pxRingbuffer->mux);
|
|
|
+ return xReturn;
|
|
|
}
|
|
|
|