ringblk_buf.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2018-08-25 armink the first version
  9. */
  10. #include <rthw.h>
  11. #include <rtthread.h>
  12. #include <rtdevice.h>
  13. /**
  14. * ring block buffer object initialization
  15. *
  16. * @param rbb ring block buffer object
  17. * @param buf buffer
  18. * @param buf_size buffer size
  19. * @param block_set block set
  20. * @param blk_max_num max block number
  21. *
  22. * @note When your application need align access, please make the buffer address is aligned.
  23. */
  24. void rt_rbb_init(rt_rbb_t rbb, rt_uint8_t *buf, rt_size_t buf_size, rt_rbb_blk_t block_set, rt_size_t blk_max_num)
  25. {
  26. rt_size_t i;
  27. RT_ASSERT(rbb);
  28. RT_ASSERT(buf);
  29. RT_ASSERT(block_set);
  30. rbb->buf = buf;
  31. rbb->buf_size = buf_size;
  32. rbb->blk_set = block_set;
  33. rbb->blk_max_num = blk_max_num;
  34. rt_slist_init(&rbb->blk_list);
  35. /* initialize block status */
  36. for (i = 0; i < blk_max_num; i++)
  37. {
  38. block_set[i].status = RT_RBB_BLK_UNUSED;
  39. }
  40. }
  41. RTM_EXPORT(rt_rbb_init);
  42. /**
  43. * ring block buffer object create
  44. *
  45. * @param buf_size buffer size
  46. * @param blk_max_num max block number
  47. *
  48. * @return != NULL: ring block buffer object
  49. * NULL: create failed
  50. */
  51. rt_rbb_t rt_rbb_create(rt_size_t buf_size, rt_size_t blk_max_num)
  52. {
  53. rt_rbb_t rbb = NULL;
  54. rt_uint8_t *buf;
  55. rt_rbb_blk_t blk_set;
  56. rbb = (rt_rbb_t)rt_malloc(sizeof(struct rt_rbb));
  57. if (!rbb)
  58. {
  59. return NULL;
  60. }
  61. buf = (rt_uint8_t *)rt_malloc(buf_size);
  62. if (!buf)
  63. {
  64. rt_free(rbb);
  65. return NULL;
  66. }
  67. blk_set = (rt_rbb_blk_t)rt_malloc(sizeof(struct rt_rbb_blk) * blk_max_num);
  68. if (!blk_set)
  69. {
  70. rt_free(buf);
  71. rt_free(rbb);
  72. return NULL;
  73. }
  74. rt_rbb_init(rbb, buf, buf_size, blk_set, blk_max_num);
  75. return rbb;
  76. }
  77. RTM_EXPORT(rt_rbb_create);
  78. /**
  79. * ring block buffer object destroy
  80. *
  81. * @param rbb ring block buffer object
  82. */
  83. void rt_rbb_destroy(rt_rbb_t rbb)
  84. {
  85. RT_ASSERT(rbb);
  86. rt_free(rbb->buf);
  87. rt_free(rbb->blk_set);
  88. rt_free(rbb);
  89. }
  90. RTM_EXPORT(rt_rbb_destroy);
  91. static rt_rbb_blk_t find_empty_blk_in_set(rt_rbb_t rbb)
  92. {
  93. rt_size_t i;
  94. RT_ASSERT(rbb);
  95. for (i = 0; i < rbb->blk_max_num; i ++)
  96. {
  97. if (rbb->blk_set[i].status == RT_RBB_BLK_UNUSED)
  98. {
  99. return &rbb->blk_set[i];
  100. }
  101. }
  102. return NULL;
  103. }
  104. /**
  105. * Allocate a block by given size. The block will add to blk_list when allocate success.
  106. *
  107. * @param rbb ring block buffer object
  108. * @param blk_size block size
  109. *
  110. * @note When your application need align access, please make the blk_szie is aligned.
  111. *
  112. * @return != NULL: allocated block
  113. * NULL: allocate failed
  114. */
  115. rt_rbb_blk_t rt_rbb_blk_alloc(rt_rbb_t rbb, rt_size_t blk_size)
  116. {
  117. rt_base_t level;
  118. rt_size_t empty1 = 0, empty2 = 0;
  119. rt_rbb_blk_t head, tail, new_rbb = NULL;
  120. RT_ASSERT(rbb);
  121. RT_ASSERT(blk_size < (1L << 24));
  122. level = rt_hw_interrupt_disable();
  123. new_rbb = find_empty_blk_in_set(rbb);
  124. if (rt_slist_len(&rbb->blk_list) < rbb->blk_max_num && new_rbb)
  125. {
  126. if (rt_slist_len(&rbb->blk_list) > 0)
  127. {
  128. head = rt_slist_first_entry(&rbb->blk_list, struct rt_rbb_blk, list);
  129. tail = rt_slist_tail_entry(&rbb->blk_list, struct rt_rbb_blk, list);
  130. if (head->buf <= tail->buf)
  131. {
  132. /**
  133. * head tail
  134. * +--------------------------------------+-----------------+------------------+
  135. * | empty2 | block1 | block2 | block3 | empty1 |
  136. * +--------------------------------------+-----------------+------------------+
  137. * rbb->buf
  138. */
  139. empty1 = (rbb->buf + rbb->buf_size) - (tail->buf + tail->size);
  140. empty2 = head->buf - rbb->buf;
  141. if (empty1 >= blk_size)
  142. {
  143. rt_slist_append(&rbb->blk_list, &new_rbb->list);
  144. new_rbb->status = RT_RBB_BLK_INITED;
  145. new_rbb->buf = tail->buf + tail->size;
  146. new_rbb->size = blk_size;
  147. }
  148. else if (empty2 >= blk_size)
  149. {
  150. rt_slist_append(&rbb->blk_list, &new_rbb->list);
  151. new_rbb->status = RT_RBB_BLK_INITED;
  152. new_rbb->buf = rbb->buf;
  153. new_rbb->size = blk_size;
  154. }
  155. else
  156. {
  157. /* no space */
  158. new_rbb = NULL;
  159. }
  160. }
  161. else
  162. {
  163. /**
  164. * tail head
  165. * +----------------+-------------------------------------+--------+-----------+
  166. * | block3 | empty1 | block1 | block2 |
  167. * +----------------+-------------------------------------+--------+-----------+
  168. * rbb->buf
  169. */
  170. empty1 = head->buf - (tail->buf + tail->size);
  171. if (empty1 >= blk_size)
  172. {
  173. rt_slist_append(&rbb->blk_list, &new_rbb->list);
  174. new_rbb->status = RT_RBB_BLK_INITED;
  175. new_rbb->buf = tail->buf + tail->size;
  176. new_rbb->size = blk_size;
  177. }
  178. else
  179. {
  180. /* no space */
  181. new_rbb = NULL;
  182. }
  183. }
  184. }
  185. else
  186. {
  187. /* the list is empty */
  188. rt_slist_append(&rbb->blk_list, &new_rbb->list);
  189. new_rbb->status = RT_RBB_BLK_INITED;
  190. new_rbb->buf = rbb->buf;
  191. new_rbb->size = blk_size;
  192. }
  193. }
  194. else
  195. {
  196. new_rbb = NULL;
  197. }
  198. rt_hw_interrupt_enable(level);
  199. return new_rbb;
  200. }
  201. RTM_EXPORT(rt_rbb_blk_alloc);
  202. /**
  203. * put a block to ring block buffer object
  204. *
  205. * @param block the block
  206. */
  207. void rt_rbb_blk_put(rt_rbb_blk_t block)
  208. {
  209. RT_ASSERT(block);
  210. RT_ASSERT(block->status == RT_RBB_BLK_INITED);
  211. block->status = RT_RBB_BLK_PUT;
  212. }
  213. RTM_EXPORT(rt_rbb_blk_put);
  214. /**
  215. * get a block from the ring block buffer object
  216. *
  217. * @param rbb ring block buffer object
  218. *
  219. * @return != NULL: block
  220. * NULL: get failed
  221. */
  222. rt_rbb_blk_t rt_rbb_blk_get(rt_rbb_t rbb)
  223. {
  224. rt_base_t level;
  225. rt_rbb_blk_t block = NULL;
  226. rt_slist_t *node;
  227. RT_ASSERT(rbb);
  228. if (rt_slist_isempty(&rbb->blk_list))
  229. return 0;
  230. level = rt_hw_interrupt_disable();
  231. for (node = rt_slist_first(&rbb->blk_list); node; node = rt_slist_next(node))
  232. {
  233. block = rt_slist_entry(node, struct rt_rbb_blk, list);
  234. if (block->status == RT_RBB_BLK_PUT)
  235. {
  236. block->status = RT_RBB_BLK_GET;
  237. goto __exit;
  238. }
  239. }
  240. /* not found */
  241. block = NULL;
  242. __exit:
  243. rt_hw_interrupt_enable(level);
  244. return block;
  245. }
  246. RTM_EXPORT(rt_rbb_blk_get);
  247. /**
  248. * return the block size
  249. *
  250. * @param block the block
  251. *
  252. * @return block size
  253. */
  254. rt_size_t rt_rbb_blk_size(rt_rbb_blk_t block)
  255. {
  256. RT_ASSERT(block);
  257. return block->size;
  258. }
  259. RTM_EXPORT(rt_rbb_blk_size);
  260. /**
  261. * return the block buffer
  262. *
  263. * @param block the block
  264. *
  265. * @return block buffer
  266. */
  267. rt_uint8_t *rt_rbb_blk_buf(rt_rbb_blk_t block)
  268. {
  269. RT_ASSERT(block);
  270. return block->buf;
  271. }
  272. RTM_EXPORT(rt_rbb_blk_buf);
  273. /**
  274. * free the block
  275. *
  276. * @param rbb ring block buffer object
  277. * @param block the block
  278. */
  279. void rt_rbb_blk_free(rt_rbb_t rbb, rt_rbb_blk_t block)
  280. {
  281. rt_base_t level;
  282. RT_ASSERT(rbb);
  283. RT_ASSERT(block);
  284. RT_ASSERT(block->status != RT_RBB_BLK_UNUSED);
  285. level = rt_hw_interrupt_disable();
  286. /* remove it on rbb block list */
  287. rt_slist_remove(&rbb->blk_list, &block->list);
  288. block->status = RT_RBB_BLK_UNUSED;
  289. rt_hw_interrupt_enable(level);
  290. }
  291. RTM_EXPORT(rt_rbb_blk_free);
  292. /**
  293. * get a continuous block to queue by given size
  294. *
  295. * tail head
  296. * +------------------+---------------+--------+----------+--------+
  297. * | block3 | empty1 | block1 | block2 |fragment|
  298. * +------------------+------------------------+----------+--------+
  299. * |<-- return_size -->| |
  300. * |<--- queue_data_len --->|
  301. *
  302. * tail head
  303. * +------------------+---------------+--------+----------+--------+
  304. * | block3 | empty1 | block1 | block2 |fragment|
  305. * +------------------+------------------------+----------+--------+
  306. * |<-- return_size -->| out of len(b1+b2+b3) |
  307. * |<-------------------- queue_data_len -------------------->|
  308. *
  309. * @param rbb ring block buffer object
  310. * @param queue_data_len The max queue data size, and the return size must less then it.
  311. * @param queue continuous block queue
  312. *
  313. * @return the block queue data total size
  314. */
  315. rt_size_t rt_rbb_blk_queue_get(rt_rbb_t rbb, rt_size_t queue_data_len, rt_rbb_blk_queue_t blk_queue)
  316. {
  317. rt_base_t level;
  318. rt_size_t data_total_size = 0;
  319. rt_slist_t *node;
  320. rt_rbb_blk_t last_block = NULL, block;
  321. RT_ASSERT(rbb);
  322. RT_ASSERT(blk_queue);
  323. if (rt_slist_isempty(&rbb->blk_list))
  324. return 0;
  325. level = rt_hw_interrupt_disable();
  326. for (node = rt_slist_first(&rbb->blk_list); node; node = rt_slist_next(node))
  327. {
  328. if (!last_block)
  329. {
  330. last_block = rt_slist_entry(node, struct rt_rbb_blk, list);
  331. if (last_block->status == RT_RBB_BLK_PUT)
  332. {
  333. /* save the first put status block to queue */
  334. blk_queue->blocks = last_block;
  335. blk_queue->blk_num = 0;
  336. }
  337. else
  338. {
  339. /* the first block must be put status */
  340. last_block = NULL;
  341. continue;
  342. }
  343. }
  344. else
  345. {
  346. block = rt_slist_entry(node, struct rt_rbb_blk, list);
  347. /*
  348. * these following conditions will break the loop:
  349. * 1. the current block is not put status
  350. * 2. the last block and current block is not continuous
  351. * 3. the data_total_size will out of range
  352. */
  353. if (block->status != RT_RBB_BLK_PUT ||
  354. last_block->buf > block->buf ||
  355. data_total_size + block->size > queue_data_len)
  356. {
  357. break;
  358. }
  359. /* backup last block */
  360. last_block = block;
  361. }
  362. /* remove current block */
  363. rt_slist_remove(&rbb->blk_list, &last_block->list);
  364. data_total_size += last_block->size;
  365. last_block->status = RT_RBB_BLK_GET;
  366. blk_queue->blk_num++;
  367. }
  368. rt_hw_interrupt_enable(level);
  369. return data_total_size;
  370. }
  371. RTM_EXPORT(rt_rbb_blk_queue_get);
  372. /**
  373. * get all block length on block queue
  374. *
  375. * @param blk_queue the block queue
  376. *
  377. * @return total length
  378. */
  379. rt_size_t rt_rbb_blk_queue_len(rt_rbb_blk_queue_t blk_queue)
  380. {
  381. rt_size_t i, data_total_size = 0;
  382. RT_ASSERT(blk_queue);
  383. for (i = 0; i < blk_queue->blk_num; i++)
  384. {
  385. data_total_size += blk_queue->blocks[i].size;
  386. }
  387. return data_total_size;
  388. }
  389. RTM_EXPORT(rt_rbb_blk_queue_len);
  390. /**
  391. * return the block queue buffer
  392. *
  393. * @param blk_queue the block queue
  394. *
  395. * @return block queue buffer
  396. */
  397. rt_uint8_t *rt_rbb_blk_queue_buf(rt_rbb_blk_queue_t blk_queue)
  398. {
  399. RT_ASSERT(blk_queue);
  400. return blk_queue->blocks[0].buf;
  401. }
  402. RTM_EXPORT(rt_rbb_blk_queue_buf);
  403. /**
  404. * free the block queue
  405. *
  406. * @param rbb ring block buffer object
  407. * @param blk_queue the block queue
  408. */
  409. void rt_rbb_blk_queue_free(rt_rbb_t rbb, rt_rbb_blk_queue_t blk_queue)
  410. {
  411. rt_size_t i;
  412. RT_ASSERT(rbb);
  413. RT_ASSERT(blk_queue);
  414. for (i = 0; i < blk_queue->blk_num; i++)
  415. {
  416. rt_rbb_blk_free(rbb, &blk_queue->blocks[i]);
  417. }
  418. }
  419. RTM_EXPORT(rt_rbb_blk_queue_free);
  420. /**
  421. * The put status and buffer continuous blocks can be make a block queue.
  422. * This function will return the length which from next can be make block queue.
  423. *
  424. * @param rbb ring block buffer object
  425. *
  426. * @return the next can be make block queue's length
  427. */
  428. rt_size_t rt_rbb_next_blk_queue_len(rt_rbb_t rbb)
  429. {
  430. rt_base_t level;
  431. rt_size_t data_len = 0;
  432. rt_slist_t *node;
  433. rt_rbb_blk_t last_block = NULL, block;
  434. RT_ASSERT(rbb);
  435. if (rt_slist_isempty(&rbb->blk_list))
  436. return 0;
  437. level = rt_hw_interrupt_disable();
  438. for (node = rt_slist_first(&rbb->blk_list); node; node = rt_slist_next(node))
  439. {
  440. if (!last_block)
  441. {
  442. last_block = rt_slist_entry(node, struct rt_rbb_blk, list);
  443. if (last_block->status != RT_RBB_BLK_PUT)
  444. {
  445. /* the first block must be put status */
  446. last_block = NULL;
  447. continue;
  448. }
  449. }
  450. else
  451. {
  452. block = rt_slist_entry(node, struct rt_rbb_blk, list);
  453. /*
  454. * these following conditions will break the loop:
  455. * 1. the current block is not put status
  456. * 2. the last block and current block is not continuous
  457. */
  458. if (block->status != RT_RBB_BLK_PUT || last_block->buf > block->buf)
  459. {
  460. break;
  461. }
  462. /* backup last block */
  463. last_block = block;
  464. }
  465. data_len += last_block->size;
  466. }
  467. rt_hw_interrupt_enable(level);
  468. return data_len;
  469. }
  470. RTM_EXPORT(rt_rbb_next_blk_queue_len);
  471. /**
  472. * get the ring block buffer object buffer size
  473. *
  474. * @param rbb ring block buffer object
  475. *
  476. * @return buffer size
  477. */
  478. rt_size_t rt_rbb_get_buf_size(rt_rbb_t rbb)
  479. {
  480. RT_ASSERT(rbb);
  481. return rbb->buf_size;
  482. }
  483. RTM_EXPORT(rt_rbb_get_buf_size);