ringbuf.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658
  1. // Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. // http://www.apache.org/licenses/LICENSE-2.0
  7. //
  8. // Unless required by applicable law or agreed to in writing, software
  9. // distributed under the License is distributed on an "AS IS" BASIS,
  10. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11. // See the License for the specific language governing permissions and
  12. // limitations under the License.
  13. #include "freertos/FreeRTOS.h"
  14. #include "freertos/task.h"
  15. #include "freertos/semphr.h"
  16. #include "freertos/queue.h"
  17. #include "freertos/xtensa_api.h"
  18. #include "freertos/ringbuf.h"
  19. #include "esp_attr.h"
  20. #include <stdint.h>
  21. #include <string.h>
  22. #include <stdlib.h>
  23. #include <stdio.h>
  24. typedef enum {
  25. flag_allowsplit = 1,
  26. flag_bytebuf = 2,
  27. } rbflag_t;
  28. typedef enum {
  29. iflag_free = 1, //Buffer is not read and given back by application, free to overwrite
  30. iflag_dummydata = 2, //Data from here to end of ringbuffer is dummy. Restart reading at start of ringbuffer.
  31. } itemflag_t;
  32. typedef struct ringbuf_t ringbuf_t;
  33. //The ringbuffer structure
  34. struct ringbuf_t {
  35. SemaphoreHandle_t free_space_sem; //Binary semaphore, wakes up writing threads when there's more free space
  36. SemaphoreHandle_t items_buffered_sem; //Binary semaphore, indicates there are new packets in the circular buffer. See remark.
  37. size_t size; //Size of the data storage
  38. uint8_t *write_ptr; //Pointer where the next item is written
  39. uint8_t *read_ptr; //Pointer from where the next item is read
  40. uint8_t *free_ptr; //Pointer to the last block that hasn't been given back to the ringbuffer yet
  41. uint8_t *data; //Data storage
  42. portMUX_TYPE mux; //Spinlock for actual data/ptr/struct modification
  43. rbflag_t flags;
  44. size_t maxItemSize;
  45. //The following keep function pointers to hold different implementations for ringbuffer management.
  46. BaseType_t (*copyItemToRingbufImpl)(ringbuf_t *rb, uint8_t *buffer, size_t buffer_size);
  47. uint8_t *(*getItemFromRingbufImpl)(ringbuf_t *rb, size_t *length, int wanted_length);
  48. void (*returnItemToRingbufImpl)(ringbuf_t *rb, void *item);
  49. };
  50. /*
  51. Remark: A counting semaphore for items_buffered_sem would be more logical, but counting semaphores in
  52. FreeRTOS need a maximum count, and allocate more memory the larger the maximum count is. Here, we
  53. would need to set the maximum to the maximum amount of times a null-byte unit firs in the buffer,
  54. which is quite high and so would waste a fair amount of memory.
  55. */
  56. //The header prepended to each ringbuffer entry. Size is assumed to be a multiple of 32bits.
  57. typedef struct {
  58. size_t len;
  59. itemflag_t flags;
  60. } buf_entry_hdr_t;
  61. //Calculate space free in the buffer
  62. static int ringbufferFreeMem(ringbuf_t *rb)
  63. {
  64. int free_size = rb->free_ptr-rb->write_ptr;
  65. if (free_size <= 0) free_size += rb->size;
  66. //Reserve one byte. If we do not do this and the entire buffer is filled, we get a situation
  67. //where read_ptr == free_ptr, messing up the next calculation.
  68. return free_size-1;
  69. }
  70. //Copies a single item to the ring buffer; refuses to split items. Assumes there is space in the ringbuffer and
  71. //the ringbuffer is locked. Increases write_ptr to the next item. Returns pdTRUE on
  72. //success, pdFALSE if it can't make the item fit and the calling routine needs to retry
  73. //later or fail.
  74. //This function by itself is not threadsafe, always call from within a muxed section.
  75. static BaseType_t copyItemToRingbufNoSplit(ringbuf_t *rb, uint8_t *buffer, size_t buffer_size)
  76. {
  77. size_t rbuffer_size;
  78. rbuffer_size=(buffer_size+3)&~3; //Payload length, rounded to next 32-bit value
  79. configASSERT(((int)rb->write_ptr&3)==0); //write_ptr needs to be 32-bit aligned
  80. configASSERT(rb->write_ptr-(rb->data+rb->size) >= sizeof(buf_entry_hdr_t)); //need to have at least the size
  81. //of a header to the end of the ringbuff
  82. size_t rem_len=(rb->data + rb->size) - rb->write_ptr; //length remaining until end of ringbuffer
  83. //See if we have enough contiguous space to write the buffer.
  84. if (rem_len < rbuffer_size + sizeof(buf_entry_hdr_t)) {
  85. //Buffer plus header is not going to fit in the room from wr_pos to the end of the
  86. //ringbuffer... but we're not allowed to split the buffer. We need to fill the
  87. //rest of the ringbuffer with a dummy item so we can place the data at the _start_ of
  88. //the ringbuffer..
  89. //First, find out if we actually have enough space at the start of the ringbuffer to
  90. //make this work (Again, we need 4 bytes extra because otherwise read_ptr==free_ptr)
  91. if (rb->free_ptr-rb->data < rbuffer_size+sizeof(buf_entry_hdr_t)+4) {
  92. //Will not fit.
  93. return pdFALSE;
  94. }
  95. //If the read buffer hasn't wrapped around yet, there's no way this will work either.
  96. if (rb->free_ptr > rb->write_ptr) {
  97. //No luck.
  98. return pdFALSE;
  99. }
  100. //Okay, it will fit. Mark the rest of the ringbuffer space with a dummy packet.
  101. buf_entry_hdr_t *hdr=(buf_entry_hdr_t *)rb->write_ptr;
  102. hdr->flags=iflag_dummydata;
  103. //Reset the write pointer to the start of the ringbuffer so the code later on can
  104. //happily write the data.
  105. rb->write_ptr=rb->data;
  106. } else {
  107. //No special handling needed. Checking if it's gonna fit probably still is a good idea.
  108. if (ringbufferFreeMem(rb) < sizeof(buf_entry_hdr_t)+rbuffer_size) {
  109. //Buffer is not going to fit, period.
  110. return pdFALSE;
  111. }
  112. }
  113. //If we are here, the buffer is guaranteed to fit in the space starting at the write pointer.
  114. buf_entry_hdr_t *hdr=(buf_entry_hdr_t *)rb->write_ptr;
  115. hdr->len=buffer_size;
  116. hdr->flags=0;
  117. rb->write_ptr+=sizeof(buf_entry_hdr_t);
  118. memcpy(rb->write_ptr, buffer, buffer_size);
  119. rb->write_ptr+=rbuffer_size;
  120. //The buffer will wrap around if we don't have room for a header anymore.
  121. if ((rb->data+rb->size)-rb->write_ptr < sizeof(buf_entry_hdr_t)) {
  122. //'Forward' the write buffer until we are at the start of the ringbuffer.
  123. //The read pointer will always be at the start of a full header, which cannot
  124. //exist at the point of the current write pointer, so there's no chance of overtaking
  125. //that.
  126. rb->write_ptr=rb->data;
  127. }
  128. return pdTRUE;
  129. }
  130. //Copies a single item to the ring buffer; allows split items. Assumes there is space in the ringbuffer and
  131. //the ringbuffer is locked. Increases write_ptr to the next item. Returns pdTRUE on
  132. //success, pdFALSE if it can't make the item fit and the calling routine needs to retry
  133. //later or fail.
  134. //This function by itself is not threadsafe, always call from within a muxed section.
  135. static BaseType_t copyItemToRingbufAllowSplit(ringbuf_t *rb, uint8_t *buffer, size_t buffer_size)
  136. {
  137. size_t rbuffer_size;
  138. rbuffer_size=(buffer_size+3)&~3; //Payload length, rounded to next 32-bit value
  139. configASSERT(((int)rb->write_ptr&3)==0); //write_ptr needs to be 32-bit aligned
  140. configASSERT(rb->write_ptr-(rb->data+rb->size) >= sizeof(buf_entry_hdr_t)); //need to have at least the size
  141. //of a header to the end of the ringbuff
  142. size_t rem_len=(rb->data + rb->size) - rb->write_ptr; //length remaining until end of ringbuffer
  143. //See if we have enough contiguous space to write the buffer.
  144. if (rem_len < rbuffer_size + sizeof(buf_entry_hdr_t)) {
  145. //The buffer can't be contiguously written to the ringbuffer, but needs special handling. Do
  146. //that depending on how the ringbuffer is configured.
  147. //The code here is also expected to check if the buffer, mangled in whatever way is implemented,
  148. //will still fit, and return pdFALSE if that is not the case.
  149. //Buffer plus header is not going to fit in the room from wr_pos to the end of the
  150. //ringbuffer... we need to split the write in two.
  151. //First, see if this will fit at all.
  152. if (ringbufferFreeMem(rb) < (sizeof(buf_entry_hdr_t)*2)+rbuffer_size) {
  153. //Will not fit.
  154. return pdFALSE;
  155. }
  156. //Because the code at the end of the function makes sure we always have
  157. //room for a header, this should never assert.
  158. configASSERT(rem_len>=sizeof(buf_entry_hdr_t));
  159. //Okay, it should fit. Write everything.
  160. //First, place bit of buffer that does fit. Write header first...
  161. buf_entry_hdr_t *hdr=(buf_entry_hdr_t *)rb->write_ptr;
  162. hdr->flags=0;
  163. hdr->len=rem_len-sizeof(buf_entry_hdr_t);
  164. rb->write_ptr+=sizeof(buf_entry_hdr_t);
  165. rem_len-=sizeof(buf_entry_hdr_t);
  166. if (rem_len!=0) {
  167. //..then write the data bit that fits.
  168. memcpy(rb->write_ptr, buffer, rem_len);
  169. //Update vars so the code later on will write the rest of the data.
  170. buffer+=rem_len;
  171. rbuffer_size-=rem_len;
  172. buffer_size-=rem_len;
  173. } else {
  174. //Huh, only the header fit. Mark as dummy so the receive function doesn't receive
  175. //an useless zero-byte packet.
  176. hdr->flags|=iflag_dummydata;
  177. }
  178. rb->write_ptr=rb->data;
  179. } else {
  180. //No special handling needed. Checking if it's gonna fit probably still is a good idea.
  181. if (ringbufferFreeMem(rb) < sizeof(buf_entry_hdr_t)+rbuffer_size) {
  182. //Buffer is not going to fit, period.
  183. return pdFALSE;
  184. }
  185. }
  186. //If we are here, the buffer is guaranteed to fit in the space starting at the write pointer.
  187. buf_entry_hdr_t *hdr=(buf_entry_hdr_t *)rb->write_ptr;
  188. hdr->len=buffer_size;
  189. hdr->flags=0;
  190. rb->write_ptr+=sizeof(buf_entry_hdr_t);
  191. memcpy(rb->write_ptr, buffer, buffer_size);
  192. rb->write_ptr+=rbuffer_size;
  193. //The buffer will wrap around if we don't have room for a header anymore.
  194. if ((rb->data+rb->size)-rb->write_ptr < sizeof(buf_entry_hdr_t)) {
  195. //'Forward' the write buffer until we are at the start of the ringbuffer.
  196. //The read pointer will always be at the start of a full header, which cannot
  197. //exist at the point of the current write pointer, so there's no chance of overtaking
  198. //that.
  199. rb->write_ptr=rb->data;
  200. }
  201. return pdTRUE;
  202. }
  203. //Copies a bunch of daya to the ring bytebuffer. Assumes there is space in the ringbuffer and
  204. //the ringbuffer is locked. Increases write_ptr to the next item. Returns pdTRUE on
  205. //success, pdFALSE if it can't make the item fit and the calling routine needs to retry
  206. //later or fail.
  207. //This function by itself is not threadsafe, always call from within a muxed section.
  208. static BaseType_t copyItemToRingbufByteBuf(ringbuf_t *rb, uint8_t *buffer, size_t buffer_size)
  209. {
  210. size_t rem_len=(rb->data + rb->size) - rb->write_ptr; //length remaining until end of ringbuffer
  211. //See if we have enough contiguous space to write the buffer.
  212. if (rem_len < buffer_size) {
  213. //...Nope. Write the data bit that fits.
  214. memcpy(rb->write_ptr, buffer, rem_len);
  215. //Update vars so the code later on will write the rest of the data.
  216. buffer+=rem_len;
  217. buffer_size-=rem_len;
  218. rb->write_ptr=rb->data;
  219. }
  220. //If we are here, the buffer is guaranteed to fit in the space starting at the write pointer.
  221. memcpy(rb->write_ptr, buffer, buffer_size);
  222. rb->write_ptr+=buffer_size;
  223. //The buffer will wrap around if we're at the end.
  224. if ((rb->data+rb->size)==rb->write_ptr) {
  225. rb->write_ptr=rb->data;
  226. }
  227. return pdTRUE;
  228. }
  229. //Retrieves a pointer to the data of the next item, or NULL if this is not possible.
  230. //This function by itself is not threadsafe, always call from within a muxed section.
  231. //Because we always return one item, this function ignores the wanted_length variable.
  232. static uint8_t *getItemFromRingbufDefault(ringbuf_t *rb, size_t *length, int wanted_length)
  233. {
  234. uint8_t *ret;
  235. configASSERT(((int)rb->read_ptr&3)==0);
  236. if (rb->read_ptr == rb->write_ptr) {
  237. //No data available.
  238. return NULL;
  239. }
  240. //The item written at the point of the read pointer may be a dummy item.
  241. //We need to skip past it first, if that's the case.
  242. buf_entry_hdr_t *hdr=(buf_entry_hdr_t *)rb->read_ptr;
  243. configASSERT((hdr->len < rb->size) || (hdr->flags & iflag_dummydata));
  244. if (hdr->flags & iflag_dummydata) {
  245. //Hdr is dummy data. Reset to start of ringbuffer.
  246. rb->read_ptr=rb->data;
  247. //Get real header
  248. hdr=(buf_entry_hdr_t *)rb->read_ptr;
  249. configASSERT(hdr->len < rb->size);
  250. //No need to re-check if the ringbuffer is empty: the write routine will
  251. //always write a dummy item plus the real data item in one go, so now we must
  252. //be at the real data item by definition.
  253. }
  254. //Okay, pass the data back.
  255. ret=rb->read_ptr+sizeof(buf_entry_hdr_t);
  256. *length=hdr->len;
  257. //...and move the read pointer past the data.
  258. rb->read_ptr+=sizeof(buf_entry_hdr_t)+((hdr->len+3)&~3);
  259. //The buffer will wrap around if we don't have room for a header anymore.
  260. if ((rb->data + rb->size) - rb->read_ptr < sizeof(buf_entry_hdr_t)) {
  261. rb->read_ptr=rb->data;
  262. }
  263. return ret;
  264. }
  265. //Retrieves a pointer to the data in the buffer, or NULL if this is not possible.
  266. //This function by itself is not threadsafe, always call from within a muxed section.
  267. //This function honours the wanted_length and will never return more data than this.
  268. static uint8_t *getItemFromRingbufByteBuf(ringbuf_t *rb, size_t *length, int wanted_length)
  269. {
  270. uint8_t *ret;
  271. if (rb->read_ptr != rb->free_ptr) {
  272. //This type of ringbuff does not support multiple outstanding buffers.
  273. return NULL;
  274. }
  275. if (rb->read_ptr == rb->write_ptr) {
  276. //No data available.
  277. return NULL;
  278. }
  279. ret=rb->read_ptr;
  280. if (rb->read_ptr > rb->write_ptr) {
  281. //Available data wraps around. Give data until the end of the buffer.
  282. *length=rb->size-(rb->read_ptr - rb->data);
  283. if (wanted_length != 0 && *length > wanted_length) {
  284. *length=wanted_length;
  285. rb->read_ptr+=wanted_length;
  286. } else {
  287. rb->read_ptr=rb->data;
  288. }
  289. } else {
  290. //Return data up to write pointer.
  291. *length=rb->write_ptr -rb->read_ptr;
  292. if (wanted_length != 0 && *length > wanted_length) {
  293. *length=wanted_length;
  294. rb->read_ptr+=wanted_length;
  295. } else {
  296. rb->read_ptr=rb->write_ptr;
  297. }
  298. }
  299. return ret;
  300. }
  301. //Returns an item to the ringbuffer. Will mark the item as free, and will see if the free pointer
  302. //can be increase.
  303. //This function by itself is not threadsafe, always call from within a muxed section.
  304. static void returnItemToRingbufDefault(ringbuf_t *rb, void *item) {
  305. uint8_t *data=(uint8_t*)item;
  306. configASSERT(((int)rb->free_ptr&3)==0);
  307. configASSERT(data >= rb->data);
  308. configASSERT(data < rb->data+rb->size);
  309. //Grab the buffer entry that preceeds the buffer
  310. buf_entry_hdr_t *hdr=(buf_entry_hdr_t*)(data-sizeof(buf_entry_hdr_t));
  311. configASSERT(hdr->len < rb->size);
  312. configASSERT((hdr->flags & iflag_dummydata)==0);
  313. configASSERT((hdr->flags & iflag_free)==0);
  314. //Mark the buffer as free.
  315. hdr->flags|=iflag_free;
  316. //Do a cleanup pass.
  317. hdr=(buf_entry_hdr_t *)rb->free_ptr;
  318. //basically forward free_ptr until we run into either a block that is still in use or the write pointer.
  319. while (((hdr->flags & iflag_free) || (hdr->flags & iflag_dummydata)) && rb->free_ptr != rb->write_ptr) {
  320. if (hdr->flags & iflag_dummydata) {
  321. //Rest is dummy data. Reset to start of ringbuffer.
  322. rb->free_ptr=rb->data;
  323. } else {
  324. //Skip past item
  325. size_t len=(hdr->len+3)&~3;
  326. rb->free_ptr+=len+sizeof(buf_entry_hdr_t);
  327. configASSERT(rb->free_ptr<=rb->data+rb->size);
  328. }
  329. //The buffer will wrap around if we don't have room for a header anymore.
  330. if ((rb->data+rb->size)-rb->free_ptr < sizeof(buf_entry_hdr_t)) {
  331. rb->free_ptr=rb->data;
  332. }
  333. //The free_ptr can not exceed read_ptr, otherwise write_ptr might overwrite read_ptr.
  334. //Read_ptr can not set to rb->data with free_ptr, otherwise write_ptr might wrap around to rb->data.
  335. if(rb->free_ptr == rb->read_ptr) break;
  336. //Next header
  337. hdr=(buf_entry_hdr_t *)rb->free_ptr;
  338. }
  339. }
  340. //Returns an item to the ringbuffer. Will mark the item as free, and will see if the free pointer
  341. //can be increase.
  342. //This function by itself is not threadsafe, always call from within a muxed section.
  343. static void returnItemToRingbufBytebuf(ringbuf_t *rb, void *item) {
  344. uint8_t *data=(uint8_t*)item;
  345. configASSERT(data >= rb->data);
  346. configASSERT(data < rb->data+rb->size);
  347. //Free the read memory.
  348. rb->free_ptr=rb->read_ptr;
  349. }
  350. void xRingbufferPrintInfo(RingbufHandle_t ringbuf)
  351. {
  352. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  353. configASSERT(rb);
  354. ets_printf("Rb size %d free %d rptr %d freeptr %d wptr %d\n",
  355. rb->size, ringbufferFreeMem(rb), rb->read_ptr-rb->data, rb->free_ptr-rb->data, rb->write_ptr-rb->data);
  356. }
  357. RingbufHandle_t xRingbufferCreate(size_t buf_length, ringbuf_type_t type)
  358. {
  359. ringbuf_t *rb = malloc(sizeof(ringbuf_t));
  360. if (rb==NULL) goto err;
  361. memset(rb, 0, sizeof(ringbuf_t));
  362. rb->data = malloc(buf_length);
  363. if (rb->data == NULL) goto err;
  364. rb->size = buf_length;
  365. rb->free_ptr = rb->data;
  366. rb->read_ptr = rb->data;
  367. rb->write_ptr = rb->data;
  368. rb->free_space_sem = xSemaphoreCreateBinary();
  369. rb->items_buffered_sem = xSemaphoreCreateBinary();
  370. rb->flags=0;
  371. if (type==RINGBUF_TYPE_ALLOWSPLIT) {
  372. rb->flags|=flag_allowsplit;
  373. rb->copyItemToRingbufImpl=copyItemToRingbufAllowSplit;
  374. rb->getItemFromRingbufImpl=getItemFromRingbufDefault;
  375. rb->returnItemToRingbufImpl=returnItemToRingbufDefault;
  376. //Calculate max item size. Worst case, we need to split an item into two, which means two headers of overhead.
  377. rb->maxItemSize=rb->size-(sizeof(buf_entry_hdr_t)*2)-4;
  378. } else if (type==RINGBUF_TYPE_BYTEBUF) {
  379. rb->flags|=flag_bytebuf;
  380. rb->copyItemToRingbufImpl=copyItemToRingbufByteBuf;
  381. rb->getItemFromRingbufImpl=getItemFromRingbufByteBuf;
  382. rb->returnItemToRingbufImpl=returnItemToRingbufBytebuf;
  383. //Calculate max item size. We have no headers and can split anywhere -> size is total size minus one.
  384. rb->maxItemSize=rb->size-1;
  385. } else if (type==RINGBUF_TYPE_NOSPLIT) {
  386. rb->copyItemToRingbufImpl=copyItemToRingbufNoSplit;
  387. rb->getItemFromRingbufImpl=getItemFromRingbufDefault;
  388. rb->returnItemToRingbufImpl=returnItemToRingbufDefault;
  389. //Calculate max item size. Worst case, we have the write ptr in such a position that we are lacking four bytes of free
  390. //memory to put an item into the rest of the memory. If this happens, we have to dummy-fill
  391. //(item_data-4) bytes of buffer, then we only have (size-(item_data-4) bytes left to fill
  392. //with the real item. (item size being header+data)
  393. rb->maxItemSize=(rb->size/2)-sizeof(buf_entry_hdr_t)-4;
  394. } else {
  395. configASSERT(0);
  396. }
  397. if (rb->free_space_sem == NULL || rb->items_buffered_sem == NULL) goto err;
  398. vPortCPUInitializeMutex(&rb->mux);
  399. return (RingbufHandle_t)rb;
  400. err:
  401. //Some error has happened. Free/destroy all allocated things and return NULL.
  402. if (rb) {
  403. free(rb->data);
  404. if (rb->free_space_sem) vSemaphoreDelete(rb->free_space_sem);
  405. if (rb->items_buffered_sem) vSemaphoreDelete(rb->items_buffered_sem);
  406. }
  407. free(rb);
  408. return NULL;
  409. }
  410. void vRingbufferDelete(RingbufHandle_t ringbuf) {
  411. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  412. if (rb) {
  413. free(rb->data);
  414. if (rb->free_space_sem) vSemaphoreDelete(rb->free_space_sem);
  415. if (rb->items_buffered_sem) vSemaphoreDelete(rb->items_buffered_sem);
  416. }
  417. free(rb);
  418. }
  419. size_t xRingbufferGetMaxItemSize(RingbufHandle_t ringbuf)
  420. {
  421. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  422. configASSERT(rb);
  423. return rb->maxItemSize;
  424. }
  425. BaseType_t xRingbufferSend(RingbufHandle_t ringbuf, void *data, size_t dataSize, TickType_t ticks_to_wait)
  426. {
  427. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  428. size_t needed_size=dataSize+sizeof(buf_entry_hdr_t);
  429. BaseType_t done=pdFALSE;
  430. TickType_t ticks_end = xTaskGetTickCount() + ticks_to_wait;
  431. TickType_t ticks_remaining = ticks_to_wait;
  432. configASSERT(rb);
  433. if (dataSize > xRingbufferGetMaxItemSize(ringbuf)) {
  434. //Data will never ever fit in the queue.
  435. return pdFALSE;
  436. }
  437. while (!done) {
  438. //Check if there is enough room in the buffer. If not, wait until there is.
  439. do {
  440. if (ringbufferFreeMem(rb) < needed_size) {
  441. //Data does not fit yet. Wait until the free_space_sem is given, then re-evaluate.
  442. BaseType_t r = xSemaphoreTake(rb->free_space_sem, ticks_remaining);
  443. if (r == pdFALSE) {
  444. //Timeout.
  445. return pdFALSE;
  446. }
  447. //Adjust ticks_remaining; we may have waited less than that and in the case the free memory still is not enough,
  448. //we will need to wait some more.
  449. if (ticks_to_wait != portMAX_DELAY) {
  450. ticks_remaining = ticks_end - xTaskGetTickCount();
  451. }
  452. // ticks_remaining will always be less than or equal to the original ticks_to_wait,
  453. // unless the timeout is reached - in which case it unsigned underflows to a much
  454. // higher value.
  455. //
  456. // (Check is written this non-intuitive way to allow for the case where xTaskGetTickCount()
  457. // has overflowed but the ticks_end value has not overflowed.)
  458. }
  459. } while (ringbufferFreeMem(rb) < needed_size && ticks_remaining > 0 && ticks_remaining <= ticks_to_wait);
  460. //Lock the mux in order to make sure no one else is messing with the ringbuffer and do the copy.
  461. portENTER_CRITICAL(&rb->mux);
  462. //Another thread may have been able to sneak its write first. Check again now we locked the ringbuff, and retry
  463. //everything if this is the case. Otherwise, we can write and are done.
  464. done=rb->copyItemToRingbufImpl(rb, data, dataSize);
  465. portEXIT_CRITICAL(&rb->mux);
  466. }
  467. xSemaphoreGive(rb->items_buffered_sem);
  468. return pdTRUE;
  469. }
  470. BaseType_t xRingbufferSendFromISR(RingbufHandle_t ringbuf, void *data, size_t dataSize, BaseType_t *higher_prio_task_awoken)
  471. {
  472. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  473. BaseType_t write_succeeded;
  474. configASSERT(rb);
  475. size_t needed_size=dataSize+sizeof(buf_entry_hdr_t);
  476. portENTER_CRITICAL_ISR(&rb->mux);
  477. if (needed_size>ringbufferFreeMem(rb)) {
  478. //Does not fit in the remaining space in the ringbuffer.
  479. write_succeeded=pdFALSE;
  480. } else {
  481. write_succeeded = rb->copyItemToRingbufImpl(rb, data, dataSize);
  482. }
  483. portEXIT_CRITICAL_ISR(&rb->mux);
  484. if (write_succeeded) {
  485. xSemaphoreGiveFromISR(rb->items_buffered_sem, higher_prio_task_awoken);
  486. }
  487. return write_succeeded;
  488. }
  489. static void *xRingbufferReceiveGeneric(RingbufHandle_t ringbuf, size_t *item_size, TickType_t ticks_to_wait, size_t wanted_size)
  490. {
  491. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  492. uint8_t *itemData;
  493. BaseType_t done=pdFALSE;
  494. configASSERT(rb);
  495. while(!done) {
  496. //See if there's any data available. If not, wait until there is.
  497. while (rb->read_ptr == rb->write_ptr) {
  498. BaseType_t r=xSemaphoreTake(rb->items_buffered_sem, ticks_to_wait);
  499. if (r == pdFALSE) {
  500. //Timeout.
  501. return NULL;
  502. }
  503. }
  504. //Okay, we seem to have data in the buffer. Grab the mux and copy it out if it's still there.
  505. portENTER_CRITICAL(&rb->mux);
  506. itemData=rb->getItemFromRingbufImpl(rb, item_size, wanted_size);
  507. portEXIT_CRITICAL(&rb->mux);
  508. if (itemData) {
  509. //We managed to get an item.
  510. done=pdTRUE;
  511. }
  512. }
  513. return (void*)itemData;
  514. }
  515. void *xRingbufferReceive(RingbufHandle_t ringbuf, size_t *item_size, TickType_t ticks_to_wait)
  516. {
  517. return xRingbufferReceiveGeneric(ringbuf, item_size, ticks_to_wait, 0);
  518. }
  519. void *xRingbufferReceiveFromISR(RingbufHandle_t ringbuf, size_t *item_size)
  520. {
  521. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  522. uint8_t *itemData;
  523. configASSERT(rb);
  524. portENTER_CRITICAL_ISR(&rb->mux);
  525. itemData=rb->getItemFromRingbufImpl(rb, item_size, 0);
  526. portEXIT_CRITICAL_ISR(&rb->mux);
  527. return (void*)itemData;
  528. }
  529. void *xRingbufferReceiveUpTo(RingbufHandle_t ringbuf, size_t *item_size, TickType_t ticks_to_wait, size_t wanted_size) {
  530. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  531. if (wanted_size == 0) return NULL;
  532. configASSERT(rb);
  533. configASSERT(rb->flags & flag_bytebuf);
  534. return xRingbufferReceiveGeneric(ringbuf, item_size, ticks_to_wait, wanted_size);
  535. }
  536. void *xRingbufferReceiveUpToFromISR(RingbufHandle_t ringbuf, size_t *item_size, size_t wanted_size)
  537. {
  538. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  539. uint8_t *itemData;
  540. if (wanted_size == 0) return NULL;
  541. configASSERT(rb);
  542. configASSERT(rb->flags & flag_bytebuf);
  543. portENTER_CRITICAL_ISR(&rb->mux);
  544. itemData=rb->getItemFromRingbufImpl(rb, item_size, wanted_size);
  545. portEXIT_CRITICAL_ISR(&rb->mux);
  546. return (void*)itemData;
  547. }
  548. void vRingbufferReturnItem(RingbufHandle_t ringbuf, void *item)
  549. {
  550. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  551. portENTER_CRITICAL(&rb->mux);
  552. rb->returnItemToRingbufImpl(rb, item);
  553. portEXIT_CRITICAL(&rb->mux);
  554. xSemaphoreGive(rb->free_space_sem);
  555. }
  556. void vRingbufferReturnItemFromISR(RingbufHandle_t ringbuf, void *item, BaseType_t *higher_prio_task_awoken)
  557. {
  558. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  559. portENTER_CRITICAL_ISR(&rb->mux);
  560. rb->returnItemToRingbufImpl(rb, item);
  561. portEXIT_CRITICAL_ISR(&rb->mux);
  562. xSemaphoreGiveFromISR(rb->free_space_sem, higher_prio_task_awoken);
  563. }
  564. BaseType_t xRingbufferAddToQueueSetRead(RingbufHandle_t ringbuf, QueueSetHandle_t xQueueSet)
  565. {
  566. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  567. configASSERT(rb);
  568. return xQueueAddToSet(rb->items_buffered_sem, xQueueSet);
  569. }
  570. BaseType_t xRingbufferAddToQueueSetWrite(RingbufHandle_t ringbuf, QueueSetHandle_t xQueueSet)
  571. {
  572. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  573. configASSERT(rb);
  574. return xQueueAddToSet(rb->free_space_sem, xQueueSet);
  575. }
  576. BaseType_t xRingbufferRemoveFromQueueSetRead(RingbufHandle_t ringbuf, QueueSetHandle_t xQueueSet)
  577. {
  578. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  579. configASSERT(rb);
  580. return xQueueRemoveFromSet(rb->items_buffered_sem, xQueueSet);
  581. }
  582. BaseType_t xRingbufferRemoveFromQueueSetWrite(RingbufHandle_t ringbuf, QueueSetHandle_t xQueueSet)
  583. {
  584. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  585. configASSERT(rb);
  586. return xQueueRemoveFromSet(rb->free_space_sem, xQueueSet);
  587. }