ringbuf.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769
  1. // Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. // http://www.apache.org/licenses/LICENSE-2.0
  7. //
  8. // Unless required by applicable law or agreed to in writing, software
  9. // distributed under the License is distributed on an "AS IS" BASIS,
  10. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11. // See the License for the specific language governing permissions and
  12. // limitations under the License.
  13. #include "freertos/FreeRTOS.h"
  14. #include "freertos/task.h"
  15. #include "freertos/semphr.h"
  16. #include "freertos/queue.h"
  17. #include "freertos/xtensa_api.h"
  18. #include "freertos/ringbuf.h"
  19. #include "esp_attr.h"
  20. #include <stdint.h>
  21. #include <string.h>
  22. #include <stdlib.h>
  23. #include <stdio.h>
  24. typedef enum {
  25. flag_allowsplit = 1,
  26. flag_bytebuf = 2,
  27. } rbflag_t;
  28. typedef enum {
  29. iflag_free = 1, //Buffer is not read and given back by application, free to overwrite
  30. iflag_dummydata = 2, //Data from here to end of ringbuffer is dummy. Restart reading at start of ringbuffer.
  31. iflag_wrap = 4, //Valid for RINGBUF_TYPE_ALLOWSPLIT, indicating that rest of the data is wrapped around
  32. } itemflag_t;
  33. typedef struct ringbuf_t ringbuf_t;
  34. //The ringbuffer structure
  35. struct ringbuf_t {
  36. SemaphoreHandle_t free_space_sem; //Binary semaphore, wakes up writing threads when there's more free space
  37. SemaphoreHandle_t items_buffered_sem; //Binary semaphore, indicates there are new packets in the circular buffer. See remark.
  38. size_t size; //Size of the data storage
  39. uint8_t *write_ptr; //Pointer where the next item is written
  40. uint8_t *read_ptr; //Pointer from where the next item is read
  41. uint8_t *free_ptr; //Pointer to the last block that hasn't been given back to the ringbuffer yet
  42. uint8_t *data; //Data storage
  43. portMUX_TYPE mux; //Spinlock for actual data/ptr/struct modification
  44. rbflag_t flags;
  45. size_t maxItemSize;
  46. //The following keep function pointers to hold different implementations for ringbuffer management.
  47. BaseType_t (*copyItemToRingbufImpl)(ringbuf_t *rb, uint8_t *buffer, size_t buffer_size);
  48. uint8_t *(*getItemFromRingbufImpl)(ringbuf_t *rb, size_t *length, int wanted_length);
  49. void (*returnItemToRingbufImpl)(ringbuf_t *rb, void *item);
  50. size_t (*getFreeSizeImpl)(ringbuf_t *rb);
  51. };
  52. /*
  53. Remark: A counting semaphore for items_buffered_sem would be more logical, but counting semaphores in
  54. FreeRTOS need a maximum count, and allocate more memory the larger the maximum count is. Here, we
  55. would need to set the maximum to the maximum amount of times a null-byte unit firs in the buffer,
  56. which is quite high and so would waste a fair amount of memory.
  57. */
  58. //The header prepended to each ringbuffer entry. Size is assumed to be a multiple of 32bits.
  59. typedef struct {
  60. size_t len;
  61. itemflag_t flags;
  62. } buf_entry_hdr_t;
  63. //Calculate space free in the buffer
  64. static int ringbufferFreeMem(ringbuf_t *rb)
  65. {
  66. int free_size = rb->free_ptr-rb->write_ptr;
  67. if (free_size <= 0) free_size += rb->size;
  68. //Reserve one byte. If we do not do this and the entire buffer is filled, we get a situation
  69. //where read_ptr == free_ptr, messing up the next calculation.
  70. return free_size-1;
  71. }
  72. //Copies a single item to the ring buffer; refuses to split items. Assumes there is space in the ringbuffer and
  73. //the ringbuffer is locked. Increases write_ptr to the next item. Returns pdTRUE on
  74. //success, pdFALSE if it can't make the item fit and the calling routine needs to retry
  75. //later or fail.
  76. //This function by itself is not threadsafe, always call from within a muxed section.
  77. static BaseType_t copyItemToRingbufNoSplit(ringbuf_t *rb, uint8_t *buffer, size_t buffer_size)
  78. {
  79. size_t rbuffer_size;
  80. rbuffer_size=(buffer_size+3)&~3; //Payload length, rounded to next 32-bit value
  81. configASSERT(((int)rb->write_ptr&3)==0); //write_ptr needs to be 32-bit aligned
  82. configASSERT(rb->write_ptr-(rb->data+rb->size) >= sizeof(buf_entry_hdr_t)); //need to have at least the size
  83. //of a header to the end of the ringbuff
  84. size_t rem_len=(rb->data + rb->size) - rb->write_ptr; //length remaining until end of ringbuffer
  85. //See if we have enough contiguous space to write the buffer.
  86. if (rem_len < rbuffer_size + sizeof(buf_entry_hdr_t)) {
  87. //Buffer plus header is not going to fit in the room from wr_pos to the end of the
  88. //ringbuffer... but we're not allowed to split the buffer. We need to fill the
  89. //rest of the ringbuffer with a dummy item so we can place the data at the _start_ of
  90. //the ringbuffer..
  91. //First, find out if we actually have enough space at the start of the ringbuffer to
  92. //make this work (Again, we need 4 bytes extra because otherwise read_ptr==free_ptr)
  93. if (rb->free_ptr-rb->data < rbuffer_size+sizeof(buf_entry_hdr_t)+4) {
  94. //Will not fit.
  95. return pdFALSE;
  96. }
  97. //If the read buffer hasn't wrapped around yet, there's no way this will work either.
  98. if (rb->free_ptr > rb->write_ptr) {
  99. //No luck.
  100. return pdFALSE;
  101. }
  102. //Okay, it will fit. Mark the rest of the ringbuffer space with a dummy packet.
  103. buf_entry_hdr_t *hdr=(buf_entry_hdr_t *)rb->write_ptr;
  104. hdr->flags=iflag_dummydata;
  105. //Reset the write pointer to the start of the ringbuffer so the code later on can
  106. //happily write the data.
  107. rb->write_ptr=rb->data;
  108. } else {
  109. //No special handling needed. Checking if it's gonna fit probably still is a good idea.
  110. if (ringbufferFreeMem(rb) < sizeof(buf_entry_hdr_t)+rbuffer_size) {
  111. //Buffer is not going to fit, period.
  112. return pdFALSE;
  113. }
  114. }
  115. //If we are here, the buffer is guaranteed to fit in the space starting at the write pointer.
  116. buf_entry_hdr_t *hdr=(buf_entry_hdr_t *)rb->write_ptr;
  117. hdr->len=buffer_size;
  118. hdr->flags=0;
  119. rb->write_ptr+=sizeof(buf_entry_hdr_t);
  120. memcpy(rb->write_ptr, buffer, buffer_size);
  121. rb->write_ptr+=rbuffer_size;
  122. //The buffer will wrap around if we don't have room for a header anymore.
  123. if ((rb->data+rb->size)-rb->write_ptr < sizeof(buf_entry_hdr_t)) {
  124. //'Forward' the write buffer until we are at the start of the ringbuffer.
  125. //The read pointer will always be at the start of a full header, which cannot
  126. //exist at the point of the current write pointer, so there's no chance of overtaking
  127. //that.
  128. rb->write_ptr=rb->data;
  129. }
  130. return pdTRUE;
  131. }
  132. //Copies a single item to the ring buffer; allows split items. Assumes there is space in the ringbuffer and
  133. //the ringbuffer is locked. Increases write_ptr to the next item. Returns pdTRUE on
  134. //success, pdFALSE if it can't make the item fit and the calling routine needs to retry
  135. //later or fail.
  136. //This function by itself is not threadsafe, always call from within a muxed section.
  137. static BaseType_t copyItemToRingbufAllowSplit(ringbuf_t *rb, uint8_t *buffer, size_t buffer_size)
  138. {
  139. size_t rbuffer_size;
  140. rbuffer_size=(buffer_size+3)&~3; //Payload length, rounded to next 32-bit value
  141. configASSERT(((int)rb->write_ptr&3)==0); //write_ptr needs to be 32-bit aligned
  142. configASSERT(rb->write_ptr-(rb->data+rb->size) >= sizeof(buf_entry_hdr_t)); //need to have at least the size
  143. //of a header to the end of the ringbuff
  144. size_t rem_len=(rb->data + rb->size) - rb->write_ptr; //length remaining until end of ringbuffer
  145. //See if we have enough contiguous space to write the buffer.
  146. if (rem_len < rbuffer_size + sizeof(buf_entry_hdr_t)) {
  147. //The buffer can't be contiguously written to the ringbuffer, but needs special handling. Do
  148. //that depending on how the ringbuffer is configured.
  149. //The code here is also expected to check if the buffer, mangled in whatever way is implemented,
  150. //will still fit, and return pdFALSE if that is not the case.
  151. //Buffer plus header is not going to fit in the room from wr_pos to the end of the
  152. //ringbuffer... we need to split the write in two.
  153. //First, see if this will fit at all.
  154. if (ringbufferFreeMem(rb) < (sizeof(buf_entry_hdr_t)*2)+rbuffer_size) {
  155. //Will not fit.
  156. return pdFALSE;
  157. }
  158. //Because the code at the end of the function makes sure we always have
  159. //room for a header, this should never assert.
  160. configASSERT(rem_len>=sizeof(buf_entry_hdr_t));
  161. //Okay, it should fit. Write everything.
  162. //First, place bit of buffer that does fit. Write header first...
  163. buf_entry_hdr_t *hdr=(buf_entry_hdr_t *)rb->write_ptr;
  164. hdr->flags=0;
  165. hdr->len=rem_len-sizeof(buf_entry_hdr_t);
  166. rb->write_ptr+=sizeof(buf_entry_hdr_t);
  167. rem_len-=sizeof(buf_entry_hdr_t);
  168. if (rem_len!=0) {
  169. //..then write the data bit that fits.
  170. memcpy(rb->write_ptr, buffer, rem_len);
  171. //Update vars so the code later on will write the rest of the data.
  172. buffer+=rem_len;
  173. buffer_size-=rem_len;
  174. //Re-adjust the rbuffer value to be 4 byte aligned
  175. rbuffer_size=(buffer_size+3)&~3;
  176. //It is possible that we are here because we checked for 4byte aligned
  177. //size, but actual data was smaller.
  178. //Eg. For buffer_size = 34, rbuffer_size will be 36. Suppose we had only
  179. //42 bytes of memory available, the top level check will fail, as it will
  180. //check for availability of 36 + 8 = 44 bytes.
  181. //However, the 42 bytes available memory is sufficient for 34 + 8 bytes data
  182. //and so, we can return after writing the data. Hence, this check
  183. if (buffer_size == 0) {
  184. rb->write_ptr=rb->data;
  185. return pdTRUE;
  186. } else {
  187. /* Indicate the wrapping */
  188. hdr->flags|=iflag_wrap;
  189. }
  190. } else {
  191. //Huh, only the header fit. Mark as dummy so the receive function doesn't receive
  192. //an useless zero-byte packet.
  193. hdr->flags|=iflag_dummydata;
  194. }
  195. rb->write_ptr=rb->data;
  196. } else {
  197. //No special handling needed. Checking if it's gonna fit probably still is a good idea.
  198. if (ringbufferFreeMem(rb) < sizeof(buf_entry_hdr_t)+rbuffer_size) {
  199. //Buffer is not going to fit, period.
  200. return pdFALSE;
  201. }
  202. }
  203. //If we are here, the buffer is guaranteed to fit in the space starting at the write pointer.
  204. buf_entry_hdr_t *hdr=(buf_entry_hdr_t *)rb->write_ptr;
  205. hdr->len=buffer_size;
  206. hdr->flags=0;
  207. rb->write_ptr+=sizeof(buf_entry_hdr_t);
  208. memcpy(rb->write_ptr, buffer, buffer_size);
  209. rb->write_ptr+=rbuffer_size;
  210. //The buffer will wrap around if we don't have room for a header anymore.
  211. if ((rb->data+rb->size)-rb->write_ptr < sizeof(buf_entry_hdr_t)) {
  212. //'Forward' the write buffer until we are at the start of the ringbuffer.
  213. //The read pointer will always be at the start of a full header, which cannot
  214. //exist at the point of the current write pointer, so there's no chance of overtaking
  215. //that.
  216. rb->write_ptr=rb->data;
  217. }
  218. return pdTRUE;
  219. }
  220. //Copies a bunch of daya to the ring bytebuffer. Assumes there is space in the ringbuffer and
  221. //the ringbuffer is locked. Increases write_ptr to the next item. Returns pdTRUE on
  222. //success, pdFALSE if it can't make the item fit and the calling routine needs to retry
  223. //later or fail.
  224. //This function by itself is not threadsafe, always call from within a muxed section.
  225. static BaseType_t copyItemToRingbufByteBuf(ringbuf_t *rb, uint8_t *buffer, size_t buffer_size)
  226. {
  227. size_t rem_len=(rb->data + rb->size) - rb->write_ptr; //length remaining until end of ringbuffer
  228. //See if we have enough contiguous space to write the buffer.
  229. if (rem_len < buffer_size) {
  230. //...Nope. Write the data bit that fits.
  231. memcpy(rb->write_ptr, buffer, rem_len);
  232. //Update vars so the code later on will write the rest of the data.
  233. buffer+=rem_len;
  234. buffer_size-=rem_len;
  235. rb->write_ptr=rb->data;
  236. }
  237. //If we are here, the buffer is guaranteed to fit in the space starting at the write pointer.
  238. memcpy(rb->write_ptr, buffer, buffer_size);
  239. rb->write_ptr+=buffer_size;
  240. //The buffer will wrap around if we're at the end.
  241. if ((rb->data+rb->size)==rb->write_ptr) {
  242. rb->write_ptr=rb->data;
  243. }
  244. return pdTRUE;
  245. }
  246. //Retrieves a pointer to the data of the next item, or NULL if this is not possible.
  247. //This function by itself is not threadsafe, always call from within a muxed section.
  248. //Because we always return one item, this function ignores the wanted_length variable.
  249. static uint8_t *getItemFromRingbufDefault(ringbuf_t *rb, size_t *length, int wanted_length)
  250. {
  251. uint8_t *ret;
  252. configASSERT(((int)rb->read_ptr&3)==0);
  253. if (rb->read_ptr == rb->write_ptr) {
  254. //No data available.
  255. return NULL;
  256. }
  257. //The item written at the point of the read pointer may be a dummy item.
  258. //We need to skip past it first, if that's the case.
  259. buf_entry_hdr_t *hdr=(buf_entry_hdr_t *)rb->read_ptr;
  260. configASSERT((hdr->len < rb->size) || (hdr->flags & iflag_dummydata));
  261. if (hdr->flags & iflag_dummydata) {
  262. //Hdr is dummy data. Reset to start of ringbuffer.
  263. rb->read_ptr=rb->data;
  264. //Get real header
  265. hdr=(buf_entry_hdr_t *)rb->read_ptr;
  266. configASSERT(hdr->len < rb->size);
  267. //No need to re-check if the ringbuffer is empty: the write routine will
  268. //always write a dummy item plus the real data item in one go, so now we must
  269. //be at the real data item by definition.
  270. }
  271. //Okay, pass the data back.
  272. ret=rb->read_ptr+sizeof(buf_entry_hdr_t);
  273. *length=hdr->len;
  274. //...and move the read pointer past the data.
  275. rb->read_ptr+=sizeof(buf_entry_hdr_t)+((hdr->len+3)&~3);
  276. //The buffer will wrap around if we don't have room for a header anymore.
  277. //Integer typecasting is used because the first operand can result into a -ve
  278. //value for cases wherein the ringbuffer size is not a multiple of 4, but the
  279. //implementation logic aligns read_ptr to 4-byte boundary
  280. if ((int)((rb->data + rb->size) - rb->read_ptr) < (int)sizeof(buf_entry_hdr_t)) {
  281. rb->read_ptr=rb->data;
  282. }
  283. return ret;
  284. }
  285. //Retrieves a pointer to the data in the buffer, or NULL if this is not possible.
  286. //This function by itself is not threadsafe, always call from within a muxed section.
  287. //This function honours the wanted_length and will never return more data than this.
  288. static uint8_t *getItemFromRingbufByteBuf(ringbuf_t *rb, size_t *length, int wanted_length)
  289. {
  290. uint8_t *ret;
  291. if (rb->read_ptr != rb->free_ptr) {
  292. //This type of ringbuff does not support multiple outstanding buffers.
  293. return NULL;
  294. }
  295. if (rb->read_ptr == rb->write_ptr) {
  296. //No data available.
  297. return NULL;
  298. }
  299. ret=rb->read_ptr;
  300. if (rb->read_ptr > rb->write_ptr) {
  301. //Available data wraps around. Give data until the end of the buffer.
  302. *length=rb->size-(rb->read_ptr - rb->data);
  303. if (wanted_length != 0 && *length > wanted_length) {
  304. *length=wanted_length;
  305. rb->read_ptr+=wanted_length;
  306. } else {
  307. rb->read_ptr=rb->data;
  308. }
  309. } else {
  310. //Return data up to write pointer.
  311. *length=rb->write_ptr -rb->read_ptr;
  312. if (wanted_length != 0 && *length > wanted_length) {
  313. *length=wanted_length;
  314. rb->read_ptr+=wanted_length;
  315. } else {
  316. rb->read_ptr=rb->write_ptr;
  317. }
  318. }
  319. return ret;
  320. }
  321. //Returns an item to the ringbuffer. Will mark the item as free, and will see if the free pointer
  322. //can be increase.
  323. //This function by itself is not threadsafe, always call from within a muxed section.
  324. static void returnItemToRingbufDefault(ringbuf_t *rb, void *item) {
  325. uint8_t *data=(uint8_t*)item;
  326. configASSERT(((int)rb->free_ptr&3)==0);
  327. configASSERT(data >= rb->data);
  328. configASSERT(data <= rb->data+rb->size);
  329. //Grab the buffer entry that preceeds the buffer
  330. buf_entry_hdr_t *hdr=(buf_entry_hdr_t*)(data-sizeof(buf_entry_hdr_t));
  331. configASSERT(hdr->len < rb->size);
  332. configASSERT((hdr->flags & iflag_dummydata)==0);
  333. configASSERT((hdr->flags & iflag_free)==0);
  334. //Mark the buffer as free.
  335. hdr->flags&=~iflag_wrap;
  336. hdr->flags|=iflag_free;
  337. //Do a cleanup pass.
  338. hdr=(buf_entry_hdr_t *)rb->free_ptr;
  339. //basically forward free_ptr until we run into either a block that is still in use or the write pointer.
  340. while (((hdr->flags & iflag_free) || (hdr->flags & iflag_dummydata)) && rb->free_ptr != rb->write_ptr) {
  341. if (hdr->flags & iflag_dummydata) {
  342. //Rest is dummy data. Reset to start of ringbuffer.
  343. rb->free_ptr=rb->data;
  344. } else {
  345. //Skip past item
  346. rb->free_ptr+=sizeof(buf_entry_hdr_t);
  347. //Check if the free_ptr overshoots the buffer.
  348. //Checking this before aligning free_ptr since it is possible that alignment
  349. //will cause pointer to overshoot, if the ringbuf size is not a multiple of 4
  350. configASSERT(rb->free_ptr+hdr->len<=rb->data+rb->size);
  351. //Align free_ptr to 4 byte boundary. Overshoot condition will result in wrap around below
  352. size_t len=(hdr->len+3)&~3;
  353. rb->free_ptr+=len;
  354. }
  355. //The buffer will wrap around if we don't have room for a header anymore.
  356. //Integer typecasting is used because the first operand can result into a -ve
  357. //value for cases wherein the ringbuffer size is not a multiple of 4, but the
  358. //implementation logic aligns free_ptr to 4-byte boundary
  359. if ((int)((rb->data+rb->size)-rb->free_ptr) < (int)sizeof(buf_entry_hdr_t)) {
  360. rb->free_ptr=rb->data;
  361. }
  362. //The free_ptr can not exceed read_ptr, otherwise write_ptr might overwrite read_ptr.
  363. //Read_ptr can not set to rb->data with free_ptr, otherwise write_ptr might wrap around to rb->data.
  364. if(rb->free_ptr == rb->read_ptr) break;
  365. //Next header
  366. hdr=(buf_entry_hdr_t *)rb->free_ptr;
  367. }
  368. }
  369. //Returns an item to the ringbuffer. Will mark the item as free, and will see if the free pointer
  370. //can be increase.
  371. //This function by itself is not threadsafe, always call from within a muxed section.
  372. static void returnItemToRingbufBytebuf(ringbuf_t *rb, void *item) {
  373. configASSERT((uint8_t *)item >= rb->data);
  374. configASSERT((uint8_t *)item < rb->data+rb->size);
  375. //Free the read memory.
  376. rb->free_ptr=rb->read_ptr;
  377. }
  378. void xRingbufferPrintInfo(RingbufHandle_t ringbuf)
  379. {
  380. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  381. configASSERT(rb);
  382. ets_printf("Rb size %d free %d rptr %d freeptr %d wptr %d\n",
  383. rb->size, ringbufferFreeMem(rb), rb->read_ptr-rb->data, rb->free_ptr-rb->data, rb->write_ptr-rb->data);
  384. }
  385. size_t xRingbufferGetCurFreeSize(RingbufHandle_t ringbuf)
  386. {
  387. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  388. configASSERT(rb);
  389. configASSERT(rb->getFreeSizeImpl);
  390. int free_size = rb->getFreeSizeImpl(rb);
  391. //Reserve one byte. If we do not do this and the entire buffer is filled, we get a situation
  392. //where read_ptr == free_ptr, messing up the next calculation.
  393. return free_size - 1;
  394. }
  395. static size_t getCurFreeSizeByteBuf(ringbuf_t *rb)
  396. {
  397. //Return whatever space is available depending on relative positions of
  398. //the free pointer and write pointer. There is no overhead of headers in
  399. //this mode
  400. int free_size = rb->free_ptr-rb->write_ptr;
  401. if (free_size <= 0)
  402. free_size += rb->size;
  403. return free_size;
  404. }
  405. static size_t getCurFreeSizeAllowSplit(ringbuf_t *rb)
  406. {
  407. int free_size;
  408. //If Both, the write and free pointer are at the start. Hence, the entire buffer
  409. //is available (minus the space for the header)
  410. if (rb->write_ptr == rb->free_ptr && rb->write_ptr == rb->data) {
  411. free_size = rb->size - sizeof(buf_entry_hdr_t);
  412. } else if (rb->write_ptr < rb->free_ptr) {
  413. //Else if the free pointer is beyond the write pointer, only the space between
  414. //them would be available (minus the space for the header)
  415. free_size = rb->free_ptr - rb->write_ptr - sizeof(buf_entry_hdr_t);
  416. } else {
  417. //Else the data can wrap around and 2 headers will be required
  418. free_size = rb->free_ptr - rb->write_ptr + rb->size - (2 * sizeof(buf_entry_hdr_t));
  419. }
  420. return free_size;
  421. }
  422. static size_t getCurFreeSizeNoSplit(ringbuf_t *rb)
  423. {
  424. int free_size;
  425. //If the free pointer is beyond the write pointer, only the space between
  426. //them would be available
  427. if (rb->write_ptr < rb->free_ptr) {
  428. free_size = rb->free_ptr - rb->write_ptr;
  429. } else {
  430. //Else check which one is bigger amongst the below 2
  431. //1) Space from the write pointer to the end of buffer
  432. int size1 = rb->data + rb->size - rb->write_ptr;
  433. //2) Space from the start of buffer to the free pointer
  434. int size2 = rb->free_ptr - rb->data;
  435. //And then select the larger of the two
  436. free_size = size1 > size2 ? size1 : size2;
  437. }
  438. //In any case, a single header will be used, so subtracting the space that
  439. //would be required for it
  440. return free_size - sizeof(buf_entry_hdr_t);
  441. }
  442. RingbufHandle_t xRingbufferCreate(size_t buf_length, ringbuf_type_t type)
  443. {
  444. ringbuf_t *rb = malloc(sizeof(ringbuf_t));
  445. if (rb==NULL) goto err;
  446. memset(rb, 0, sizeof(ringbuf_t));
  447. rb->data = malloc(buf_length);
  448. if (rb->data == NULL) goto err;
  449. rb->size = buf_length;
  450. rb->free_ptr = rb->data;
  451. rb->read_ptr = rb->data;
  452. rb->write_ptr = rb->data;
  453. rb->free_space_sem = xSemaphoreCreateBinary();
  454. rb->items_buffered_sem = xSemaphoreCreateBinary();
  455. rb->flags=0;
  456. if (type==RINGBUF_TYPE_ALLOWSPLIT) {
  457. rb->flags|=flag_allowsplit;
  458. rb->copyItemToRingbufImpl=copyItemToRingbufAllowSplit;
  459. rb->getItemFromRingbufImpl=getItemFromRingbufDefault;
  460. rb->returnItemToRingbufImpl=returnItemToRingbufDefault;
  461. //Calculate max item size. Worst case, we need to split an item into two, which means two headers of overhead.
  462. rb->maxItemSize=rb->size-(sizeof(buf_entry_hdr_t)*2)-4;
  463. rb->getFreeSizeImpl=getCurFreeSizeAllowSplit;
  464. } else if (type==RINGBUF_TYPE_BYTEBUF) {
  465. rb->flags|=flag_bytebuf;
  466. rb->copyItemToRingbufImpl=copyItemToRingbufByteBuf;
  467. rb->getItemFromRingbufImpl=getItemFromRingbufByteBuf;
  468. rb->returnItemToRingbufImpl=returnItemToRingbufBytebuf;
  469. //Calculate max item size. We have no headers and can split anywhere -> size is total size minus one.
  470. rb->maxItemSize=rb->size-1;
  471. rb->getFreeSizeImpl=getCurFreeSizeByteBuf;
  472. } else if (type==RINGBUF_TYPE_NOSPLIT) {
  473. rb->copyItemToRingbufImpl=copyItemToRingbufNoSplit;
  474. rb->getItemFromRingbufImpl=getItemFromRingbufDefault;
  475. rb->returnItemToRingbufImpl=returnItemToRingbufDefault;
  476. //Calculate max item size. Worst case, we have the write ptr in such a position that we are lacking four bytes of free
  477. //memory to put an item into the rest of the memory. If this happens, we have to dummy-fill
  478. //(item_data-4) bytes of buffer, then we only have (size-(item_data-4) bytes left to fill
  479. //with the real item. (item size being header+data)
  480. rb->maxItemSize=(rb->size/2)-sizeof(buf_entry_hdr_t)-4;
  481. rb->getFreeSizeImpl=getCurFreeSizeNoSplit;
  482. } else {
  483. configASSERT(0);
  484. }
  485. if (rb->free_space_sem == NULL || rb->items_buffered_sem == NULL) goto err;
  486. vPortCPUInitializeMutex(&rb->mux);
  487. return (RingbufHandle_t)rb;
  488. err:
  489. //Some error has happened. Free/destroy all allocated things and return NULL.
  490. if (rb) {
  491. free(rb->data);
  492. if (rb->free_space_sem) vSemaphoreDelete(rb->free_space_sem);
  493. if (rb->items_buffered_sem) vSemaphoreDelete(rb->items_buffered_sem);
  494. }
  495. free(rb);
  496. return NULL;
  497. }
  498. RingbufHandle_t xRingbufferCreateNoSplit(size_t item_size, size_t num_item)
  499. {
  500. size_t aligned_size = (item_size+3)&~3;
  501. return xRingbufferCreate((aligned_size + sizeof(buf_entry_hdr_t)) * num_item, RINGBUF_TYPE_NOSPLIT);
  502. }
  503. void vRingbufferDelete(RingbufHandle_t ringbuf) {
  504. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  505. if (rb) {
  506. free(rb->data);
  507. if (rb->free_space_sem) vSemaphoreDelete(rb->free_space_sem);
  508. if (rb->items_buffered_sem) vSemaphoreDelete(rb->items_buffered_sem);
  509. }
  510. free(rb);
  511. }
  512. size_t xRingbufferGetMaxItemSize(RingbufHandle_t ringbuf)
  513. {
  514. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  515. configASSERT(rb);
  516. return rb->maxItemSize;
  517. }
  518. bool xRingbufferIsNextItemWrapped(RingbufHandle_t ringbuf)
  519. {
  520. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  521. configASSERT(rb);
  522. buf_entry_hdr_t *hdr=(buf_entry_hdr_t *)rb->read_ptr;
  523. return hdr->flags & iflag_wrap;
  524. }
  525. BaseType_t xRingbufferSend(RingbufHandle_t ringbuf, void *data, size_t dataSize, TickType_t ticks_to_wait)
  526. {
  527. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  528. size_t needed_size=dataSize+sizeof(buf_entry_hdr_t);
  529. BaseType_t done=pdFALSE;
  530. TickType_t ticks_end = xTaskGetTickCount() + ticks_to_wait;
  531. TickType_t ticks_remaining = ticks_to_wait;
  532. configASSERT(rb);
  533. if (dataSize > xRingbufferGetMaxItemSize(ringbuf)) {
  534. //Data will never ever fit in the queue.
  535. return pdFALSE;
  536. }
  537. while (!done) {
  538. //Check if there is enough room in the buffer. If not, wait until there is.
  539. do {
  540. if (ringbufferFreeMem(rb) < needed_size) {
  541. //Data does not fit yet. Wait until the free_space_sem is given, then re-evaluate.
  542. BaseType_t r = xSemaphoreTake(rb->free_space_sem, ticks_remaining);
  543. if (r == pdFALSE) {
  544. //Timeout.
  545. return pdFALSE;
  546. }
  547. //Adjust ticks_remaining; we may have waited less than that and in the case the free memory still is not enough,
  548. //we will need to wait some more.
  549. if (ticks_to_wait != portMAX_DELAY) {
  550. ticks_remaining = ticks_end - xTaskGetTickCount();
  551. // ticks_remaining will always be less than or equal to the original ticks_to_wait,
  552. // unless the timeout is reached - in which case it unsigned underflows to a much
  553. // higher value.
  554. //
  555. // (Check is written this non-intuitive way to allow for the case where xTaskGetTickCount()
  556. // has overflowed but the ticks_end value has not overflowed.)
  557. if(ticks_remaining > ticks_to_wait) {
  558. //Timeout, but there is not enough free space for the item that need to be sent.
  559. xSemaphoreGive(rb->free_space_sem);
  560. return pdFALSE;
  561. }
  562. }
  563. }
  564. } while (ringbufferFreeMem(rb) < needed_size);
  565. //Lock the mux in order to make sure no one else is messing with the ringbuffer and do the copy.
  566. portENTER_CRITICAL(&rb->mux);
  567. //Another thread may have been able to sneak its write first. Check again now we locked the ringbuff, and retry
  568. //everything if this is the case. Otherwise, we can write and are done.
  569. done=rb->copyItemToRingbufImpl(rb, data, dataSize);
  570. portEXIT_CRITICAL(&rb->mux);
  571. }
  572. xSemaphoreGive(rb->items_buffered_sem);
  573. return pdTRUE;
  574. }
  575. BaseType_t xRingbufferSendFromISR(RingbufHandle_t ringbuf, void *data, size_t dataSize, BaseType_t *higher_prio_task_awoken)
  576. {
  577. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  578. BaseType_t write_succeeded;
  579. configASSERT(rb);
  580. size_t needed_size=dataSize+sizeof(buf_entry_hdr_t);
  581. portENTER_CRITICAL_ISR(&rb->mux);
  582. if (needed_size>ringbufferFreeMem(rb)) {
  583. //Does not fit in the remaining space in the ringbuffer.
  584. write_succeeded=pdFALSE;
  585. } else {
  586. write_succeeded = rb->copyItemToRingbufImpl(rb, data, dataSize);
  587. }
  588. portEXIT_CRITICAL_ISR(&rb->mux);
  589. if (write_succeeded) {
  590. xSemaphoreGiveFromISR(rb->items_buffered_sem, higher_prio_task_awoken);
  591. }
  592. return write_succeeded;
  593. }
  594. static void *xRingbufferReceiveGeneric(RingbufHandle_t ringbuf, size_t *item_size, TickType_t ticks_to_wait, size_t wanted_size)
  595. {
  596. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  597. uint8_t *itemData;
  598. BaseType_t done=pdFALSE;
  599. configASSERT(rb);
  600. while(!done) {
  601. //See if there's any data available. If not, wait until there is.
  602. while (rb->read_ptr == rb->write_ptr) {
  603. BaseType_t r=xSemaphoreTake(rb->items_buffered_sem, ticks_to_wait);
  604. if (r == pdFALSE) {
  605. //Timeout.
  606. return NULL;
  607. }
  608. }
  609. //Okay, we seem to have data in the buffer. Grab the mux and copy it out if it's still there.
  610. portENTER_CRITICAL(&rb->mux);
  611. itemData=rb->getItemFromRingbufImpl(rb, item_size, wanted_size);
  612. portEXIT_CRITICAL(&rb->mux);
  613. if (itemData) {
  614. //We managed to get an item.
  615. done=pdTRUE;
  616. }
  617. }
  618. return (void*)itemData;
  619. }
  620. void *xRingbufferReceive(RingbufHandle_t ringbuf, size_t *item_size, TickType_t ticks_to_wait)
  621. {
  622. return xRingbufferReceiveGeneric(ringbuf, item_size, ticks_to_wait, 0);
  623. }
  624. void *xRingbufferReceiveFromISR(RingbufHandle_t ringbuf, size_t *item_size)
  625. {
  626. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  627. uint8_t *itemData;
  628. configASSERT(rb);
  629. portENTER_CRITICAL_ISR(&rb->mux);
  630. itemData=rb->getItemFromRingbufImpl(rb, item_size, 0);
  631. portEXIT_CRITICAL_ISR(&rb->mux);
  632. return (void*)itemData;
  633. }
  634. void *xRingbufferReceiveUpTo(RingbufHandle_t ringbuf, size_t *item_size, TickType_t ticks_to_wait, size_t wanted_size) {
  635. if (wanted_size == 0) return NULL;
  636. configASSERT(ringbuf);
  637. configASSERT(((ringbuf_t *)ringbuf)->flags & flag_bytebuf);
  638. return xRingbufferReceiveGeneric(ringbuf, item_size, ticks_to_wait, wanted_size);
  639. }
  640. void *xRingbufferReceiveUpToFromISR(RingbufHandle_t ringbuf, size_t *item_size, size_t wanted_size)
  641. {
  642. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  643. uint8_t *itemData;
  644. if (wanted_size == 0) return NULL;
  645. configASSERT(rb);
  646. configASSERT(rb->flags & flag_bytebuf);
  647. portENTER_CRITICAL_ISR(&rb->mux);
  648. itemData=rb->getItemFromRingbufImpl(rb, item_size, wanted_size);
  649. portEXIT_CRITICAL_ISR(&rb->mux);
  650. return (void*)itemData;
  651. }
  652. void vRingbufferReturnItem(RingbufHandle_t ringbuf, void *item)
  653. {
  654. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  655. portENTER_CRITICAL(&rb->mux);
  656. rb->returnItemToRingbufImpl(rb, item);
  657. portEXIT_CRITICAL(&rb->mux);
  658. xSemaphoreGive(rb->free_space_sem);
  659. }
  660. void vRingbufferReturnItemFromISR(RingbufHandle_t ringbuf, void *item, BaseType_t *higher_prio_task_awoken)
  661. {
  662. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  663. portENTER_CRITICAL_ISR(&rb->mux);
  664. rb->returnItemToRingbufImpl(rb, item);
  665. portEXIT_CRITICAL_ISR(&rb->mux);
  666. xSemaphoreGiveFromISR(rb->free_space_sem, higher_prio_task_awoken);
  667. }
  668. BaseType_t xRingbufferAddToQueueSetRead(RingbufHandle_t ringbuf, QueueSetHandle_t xQueueSet)
  669. {
  670. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  671. configASSERT(rb);
  672. return xQueueAddToSet(rb->items_buffered_sem, xQueueSet);
  673. }
  674. BaseType_t xRingbufferAddToQueueSetWrite(RingbufHandle_t ringbuf, QueueSetHandle_t xQueueSet)
  675. {
  676. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  677. configASSERT(rb);
  678. return xQueueAddToSet(rb->free_space_sem, xQueueSet);
  679. }
  680. BaseType_t xRingbufferRemoveFromQueueSetRead(RingbufHandle_t ringbuf, QueueSetHandle_t xQueueSet)
  681. {
  682. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  683. configASSERT(rb);
  684. return xQueueRemoveFromSet(rb->items_buffered_sem, xQueueSet);
  685. }
  686. BaseType_t xRingbufferRemoveFromQueueSetWrite(RingbufHandle_t ringbuf, QueueSetHandle_t xQueueSet)
  687. {
  688. ringbuf_t *rb=(ringbuf_t *)ringbuf;
  689. configASSERT(rb);
  690. return xQueueRemoveFromSet(rb->free_space_sem, xQueueSet);
  691. }