buf.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760
  1. /*
  2. * SPDX-FileCopyrightText: 2015 Intel Corporation
  3. * SPDX-FileContributor: 2018-2021 Espressif Systems (Shanghai) CO LTD
  4. *
  5. * SPDX-License-Identifier: Apache-2.0
  6. */
  7. #include <string.h>
  8. #include "mesh/common.h"
  9. int net_buf_id(struct net_buf *buf)
  10. {
  11. struct net_buf_pool *pool = buf->pool;
  12. return buf - pool->__bufs;
  13. }
  14. static inline struct net_buf *pool_get_uninit(struct net_buf_pool *pool,
  15. uint16_t uninit_count)
  16. {
  17. struct net_buf *buf = NULL;
  18. buf = &pool->__bufs[pool->buf_count - uninit_count];
  19. buf->pool = pool;
  20. return buf;
  21. }
  22. void net_buf_simple_clone(const struct net_buf_simple *original,
  23. struct net_buf_simple *clone)
  24. {
  25. memcpy(clone, original, sizeof(struct net_buf_simple));
  26. }
  27. void *net_buf_simple_add(struct net_buf_simple *buf, size_t len)
  28. {
  29. uint8_t *tail = net_buf_simple_tail(buf);
  30. NET_BUF_SIMPLE_DBG("buf %p len %u", buf, len);
  31. NET_BUF_SIMPLE_ASSERT(net_buf_simple_tailroom(buf) >= len);
  32. buf->len += len;
  33. return tail;
  34. }
  35. void *net_buf_simple_add_mem(struct net_buf_simple *buf, const void *mem,
  36. size_t len)
  37. {
  38. NET_BUF_SIMPLE_DBG("buf %p len %u", buf, len);
  39. return memcpy(net_buf_simple_add(buf, len), mem, len);
  40. }
  41. uint8_t *net_buf_simple_add_u8(struct net_buf_simple *buf, uint8_t val)
  42. {
  43. uint8_t *u8 = NULL;
  44. NET_BUF_SIMPLE_DBG("buf %p val 0x%02x", buf, val);
  45. u8 = net_buf_simple_add(buf, 1);
  46. *u8 = val;
  47. return u8;
  48. }
  49. void net_buf_simple_add_le16(struct net_buf_simple *buf, uint16_t val)
  50. {
  51. NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
  52. sys_put_le16(val, net_buf_simple_add(buf, sizeof(val)));
  53. }
  54. void net_buf_simple_add_be16(struct net_buf_simple *buf, uint16_t val)
  55. {
  56. NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
  57. sys_put_be16(val, net_buf_simple_add(buf, sizeof(val)));
  58. }
  59. void net_buf_simple_add_le24(struct net_buf_simple *buf, uint32_t val)
  60. {
  61. NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
  62. sys_put_le24(val, net_buf_simple_add(buf, 3));
  63. }
  64. void net_buf_simple_add_be24(struct net_buf_simple *buf, uint32_t val)
  65. {
  66. NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
  67. sys_put_be24(val, net_buf_simple_add(buf, 3));
  68. }
  69. void net_buf_simple_add_le32(struct net_buf_simple *buf, uint32_t val)
  70. {
  71. NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
  72. sys_put_le32(val, net_buf_simple_add(buf, sizeof(val)));
  73. }
  74. void net_buf_simple_add_be32(struct net_buf_simple *buf, uint32_t val)
  75. {
  76. NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
  77. sys_put_be32(val, net_buf_simple_add(buf, sizeof(val)));
  78. }
  79. void net_buf_simple_add_le48(struct net_buf_simple *buf, uint64_t val)
  80. {
  81. NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
  82. sys_put_le48(val, net_buf_simple_add(buf, 6));
  83. }
  84. void net_buf_simple_add_be48(struct net_buf_simple *buf, uint64_t val)
  85. {
  86. NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
  87. sys_put_be48(val, net_buf_simple_add(buf, 6));
  88. }
  89. void net_buf_simple_add_le64(struct net_buf_simple *buf, uint64_t val)
  90. {
  91. NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
  92. sys_put_le64(val, net_buf_simple_add(buf, sizeof(val)));
  93. }
  94. void net_buf_simple_add_be64(struct net_buf_simple *buf, uint64_t val)
  95. {
  96. NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
  97. sys_put_be64(val, net_buf_simple_add(buf, sizeof(val)));
  98. }
  99. void *net_buf_simple_push(struct net_buf_simple *buf, size_t len)
  100. {
  101. NET_BUF_SIMPLE_DBG("buf %p len %u", buf, len);
  102. NET_BUF_SIMPLE_ASSERT(net_buf_simple_headroom(buf) >= len);
  103. buf->data -= len;
  104. buf->len += len;
  105. return buf->data;
  106. }
  107. void net_buf_simple_push_le16(struct net_buf_simple *buf, uint16_t val)
  108. {
  109. NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
  110. sys_put_le16(val, net_buf_simple_push(buf, sizeof(val)));
  111. }
  112. void net_buf_simple_push_be16(struct net_buf_simple *buf, uint16_t val)
  113. {
  114. NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
  115. sys_put_be16(val, net_buf_simple_push(buf, sizeof(val)));
  116. }
  117. void net_buf_simple_push_u8(struct net_buf_simple *buf, uint8_t val)
  118. {
  119. uint8_t *data = net_buf_simple_push(buf, 1);
  120. *data = val;
  121. }
  122. void net_buf_simple_push_le24(struct net_buf_simple *buf, uint32_t val)
  123. {
  124. NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
  125. sys_put_le24(val, net_buf_simple_push(buf, 3));
  126. }
  127. void net_buf_simple_push_be24(struct net_buf_simple *buf, uint32_t val)
  128. {
  129. NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
  130. sys_put_be24(val, net_buf_simple_push(buf, 3));
  131. }
  132. void net_buf_simple_push_le32(struct net_buf_simple *buf, uint32_t val)
  133. {
  134. NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
  135. sys_put_le32(val, net_buf_simple_push(buf, sizeof(val)));
  136. }
  137. void net_buf_simple_push_be32(struct net_buf_simple *buf, uint32_t val)
  138. {
  139. NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
  140. sys_put_be32(val, net_buf_simple_push(buf, sizeof(val)));
  141. }
  142. void net_buf_simple_push_le48(struct net_buf_simple *buf, uint64_t val)
  143. {
  144. NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
  145. sys_put_le48(val, net_buf_simple_push(buf, 6));
  146. }
  147. void net_buf_simple_push_be48(struct net_buf_simple *buf, uint64_t val)
  148. {
  149. NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
  150. sys_put_be48(val, net_buf_simple_push(buf, 6));
  151. }
  152. void net_buf_simple_push_le64(struct net_buf_simple *buf, uint64_t val)
  153. {
  154. NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
  155. sys_put_le64(val, net_buf_simple_push(buf, sizeof(val)));
  156. }
  157. void net_buf_simple_push_be64(struct net_buf_simple *buf, uint64_t val)
  158. {
  159. NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
  160. sys_put_be64(val, net_buf_simple_push(buf, sizeof(val)));
  161. }
  162. void *net_buf_simple_pull(struct net_buf_simple *buf, size_t len)
  163. {
  164. NET_BUF_SIMPLE_DBG("buf %p len %u", buf, len);
  165. NET_BUF_SIMPLE_ASSERT(buf->len >= len);
  166. buf->len -= len;
  167. return buf->data += len;
  168. }
  169. void *net_buf_simple_pull_mem(struct net_buf_simple *buf, size_t len)
  170. {
  171. void *data = buf->data;
  172. NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
  173. NET_BUF_SIMPLE_ASSERT(buf->len >= len);
  174. buf->len -= len;
  175. buf->data += len;
  176. return data;
  177. }
  178. uint8_t net_buf_simple_pull_u8(struct net_buf_simple *buf)
  179. {
  180. uint8_t val = 0U;
  181. val = buf->data[0];
  182. net_buf_simple_pull(buf, 1);
  183. return val;
  184. }
  185. uint16_t net_buf_simple_pull_le16(struct net_buf_simple *buf)
  186. {
  187. uint16_t val = 0U;
  188. val = UNALIGNED_GET((uint16_t *)buf->data);
  189. net_buf_simple_pull(buf, sizeof(val));
  190. return sys_le16_to_cpu(val);
  191. }
  192. uint16_t net_buf_simple_pull_be16(struct net_buf_simple *buf)
  193. {
  194. uint16_t val = 0U;
  195. val = UNALIGNED_GET((uint16_t *)buf->data);
  196. net_buf_simple_pull(buf, sizeof(val));
  197. return sys_be16_to_cpu(val);
  198. }
  199. uint32_t net_buf_simple_pull_le24(struct net_buf_simple *buf)
  200. {
  201. struct uint24 {
  202. uint32_t u24:24;
  203. } __attribute__((packed)) val;
  204. val = UNALIGNED_GET((struct uint24 *)buf->data);
  205. net_buf_simple_pull(buf, sizeof(val));
  206. return sys_le24_to_cpu(val.u24);
  207. }
  208. uint32_t net_buf_simple_pull_be24(struct net_buf_simple *buf)
  209. {
  210. struct uint24 {
  211. uint32_t u24:24;
  212. } __attribute__((packed)) val;
  213. val = UNALIGNED_GET((struct uint24 *)buf->data);
  214. net_buf_simple_pull(buf, sizeof(val));
  215. return sys_be24_to_cpu(val.u24);
  216. }
  217. uint32_t net_buf_simple_pull_le32(struct net_buf_simple *buf)
  218. {
  219. uint32_t val = 0U;
  220. val = UNALIGNED_GET((uint32_t *)buf->data);
  221. net_buf_simple_pull(buf, sizeof(val));
  222. return sys_le32_to_cpu(val);
  223. }
  224. uint32_t net_buf_simple_pull_be32(struct net_buf_simple *buf)
  225. {
  226. uint32_t val = 0U;
  227. val = UNALIGNED_GET((uint32_t *)buf->data);
  228. net_buf_simple_pull(buf, sizeof(val));
  229. return sys_be32_to_cpu(val);
  230. }
  231. uint64_t net_buf_simple_pull_le48(struct net_buf_simple *buf)
  232. {
  233. struct uint48 {
  234. uint64_t u48:48;
  235. } __attribute__((packed)) val;
  236. val = UNALIGNED_GET((struct uint48 *)buf->data);
  237. net_buf_simple_pull(buf, sizeof(val));
  238. return sys_le48_to_cpu(val.u48);
  239. }
  240. uint64_t net_buf_simple_pull_be48(struct net_buf_simple *buf)
  241. {
  242. struct uint48 {
  243. uint64_t u48:48;
  244. } __attribute__((packed)) val;
  245. val = UNALIGNED_GET((struct uint48 *)buf->data);
  246. net_buf_simple_pull(buf, sizeof(val));
  247. return sys_be48_to_cpu(val.u48);
  248. }
  249. uint64_t net_buf_simple_pull_le64(struct net_buf_simple *buf)
  250. {
  251. uint64_t val;
  252. val = UNALIGNED_GET((uint64_t *)buf->data);
  253. net_buf_simple_pull(buf, sizeof(val));
  254. return sys_le64_to_cpu(val);
  255. }
  256. uint64_t net_buf_simple_pull_be64(struct net_buf_simple *buf)
  257. {
  258. uint64_t val;
  259. val = UNALIGNED_GET((uint64_t *)buf->data);
  260. net_buf_simple_pull(buf, sizeof(val));
  261. return sys_be64_to_cpu(val);
  262. }
  263. size_t net_buf_simple_headroom(struct net_buf_simple *buf)
  264. {
  265. return buf->data - buf->__buf;
  266. }
  267. size_t net_buf_simple_tailroom(struct net_buf_simple *buf)
  268. {
  269. return buf->size - net_buf_simple_headroom(buf) - buf->len;
  270. }
  271. void net_buf_reset(struct net_buf *buf)
  272. {
  273. NET_BUF_ASSERT(buf->flags == 0);
  274. NET_BUF_ASSERT(buf->frags == NULL);
  275. net_buf_simple_reset(&buf->b);
  276. }
  277. void net_buf_simple_init_with_data(struct net_buf_simple *buf,
  278. void *data, size_t size)
  279. {
  280. buf->__buf = data;
  281. buf->data = data;
  282. buf->size = size;
  283. buf->len = size;
  284. }
  285. void net_buf_simple_reserve(struct net_buf_simple *buf, size_t reserve)
  286. {
  287. NET_BUF_ASSERT(buf);
  288. NET_BUF_ASSERT(buf->len == 0U);
  289. NET_BUF_DBG("buf %p reserve %zu", buf, reserve);
  290. buf->data = buf->__buf + reserve;
  291. }
  292. void net_buf_slist_put(sys_slist_t *list, struct net_buf *buf)
  293. {
  294. struct net_buf *tail = NULL;
  295. NET_BUF_ASSERT(list);
  296. NET_BUF_ASSERT(buf);
  297. for (tail = buf; tail->frags; tail = tail->frags) {
  298. tail->flags |= NET_BUF_FRAGS;
  299. }
  300. bt_mesh_list_lock();
  301. sys_slist_append_list(list, &buf->node, &tail->node);
  302. bt_mesh_list_unlock();
  303. }
  304. struct net_buf *net_buf_slist_get(sys_slist_t *list)
  305. {
  306. struct net_buf *buf = NULL, *frag = NULL;
  307. NET_BUF_ASSERT(list);
  308. bt_mesh_list_lock();
  309. buf = (void *)sys_slist_get(list);
  310. bt_mesh_list_unlock();
  311. if (!buf) {
  312. return NULL;
  313. }
  314. /* Get any fragments belonging to this buffer */
  315. for (frag = buf; (frag->flags & NET_BUF_FRAGS); frag = frag->frags) {
  316. bt_mesh_list_lock();
  317. frag->frags = (void *)sys_slist_get(list);
  318. bt_mesh_list_unlock();
  319. NET_BUF_ASSERT(frag->frags);
  320. /* The fragments flag is only for list-internal usage */
  321. frag->flags &= ~NET_BUF_FRAGS;
  322. }
  323. /* Mark the end of the fragment list */
  324. frag->frags = NULL;
  325. return buf;
  326. }
  327. struct net_buf *net_buf_ref(struct net_buf *buf)
  328. {
  329. NET_BUF_ASSERT(buf);
  330. NET_BUF_DBG("buf %p (old) ref %u pool %p", buf, buf->ref, buf->pool);
  331. buf->ref++;
  332. return buf;
  333. }
  334. #if CONFIG_BLE_MESH_NET_BUF_LOG
  335. void net_buf_unref_debug(struct net_buf *buf, const char *func, int line)
  336. #else
  337. void net_buf_unref(struct net_buf *buf)
  338. #endif
  339. {
  340. NET_BUF_ASSERT(buf);
  341. while (buf) {
  342. struct net_buf *frags = buf->frags;
  343. struct net_buf_pool *pool = NULL;
  344. #if CONFIG_BLE_MESH_NET_BUF_LOG
  345. if (!buf->ref) {
  346. NET_BUF_ERR("%s():%d: buf %p double free", func, line,
  347. buf);
  348. return;
  349. }
  350. #endif
  351. NET_BUF_DBG("buf %p ref %u pool %p frags %p", buf, buf->ref,
  352. buf->pool, buf->frags);
  353. /* Changed by Espressif. Add !buf->ref to avoid minus 0 */
  354. if (!buf->ref || --buf->ref > 0) {
  355. return;
  356. }
  357. buf->frags = NULL;
  358. pool = buf->pool;
  359. pool->uninit_count++;
  360. #if CONFIG_BLE_MESH_NET_BUF_POOL_USAGE
  361. pool->avail_count++;
  362. NET_BUF_DBG("Unref, pool %p, avail_count %d, uninit_count %d",
  363. pool, pool->avail_count, pool->uninit_count);
  364. NET_BUF_ASSERT(pool->avail_count <= pool->buf_count);
  365. #endif
  366. if (pool->destroy) {
  367. pool->destroy(buf);
  368. }
  369. buf = frags;
  370. }
  371. }
  372. static uint8_t *fixed_data_alloc(struct net_buf *buf, size_t *size, int32_t timeout)
  373. {
  374. struct net_buf_pool *pool = buf->pool;
  375. const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
  376. *size = MIN(fixed->data_size, *size);
  377. return fixed->data_pool + fixed->data_size * net_buf_id(buf);
  378. }
  379. static void fixed_data_unref(struct net_buf *buf, uint8_t *data)
  380. {
  381. /* Nothing needed for fixed-size data pools */
  382. }
  383. const struct net_buf_data_cb net_buf_fixed_cb = {
  384. .alloc = fixed_data_alloc,
  385. .unref = fixed_data_unref,
  386. };
  387. static uint8_t *data_alloc(struct net_buf *buf, size_t *size, int32_t timeout)
  388. {
  389. struct net_buf_pool *pool = buf->pool;
  390. return pool->alloc->cb->alloc(buf, size, timeout);
  391. }
  392. #if CONFIG_BLE_MESH_NET_BUF_LOG
  393. struct net_buf *net_buf_alloc_len_debug(struct net_buf_pool *pool, size_t size,
  394. int32_t timeout, const char *func, int line)
  395. #else
  396. struct net_buf *net_buf_alloc_len(struct net_buf_pool *pool, size_t size,
  397. int32_t timeout)
  398. #endif
  399. {
  400. struct net_buf *buf = NULL;
  401. int i;
  402. NET_BUF_ASSERT(pool);
  403. NET_BUF_DBG("Alloc, pool %p, uninit_count %d, buf_count %d",
  404. pool, pool->uninit_count, pool->buf_count);
  405. /* We need to lock interrupts temporarily to prevent race conditions
  406. * when accessing pool->uninit_count.
  407. */
  408. bt_mesh_buf_lock();
  409. /* If there are uninitialized buffers we're guaranteed to succeed
  410. * with the allocation one way or another.
  411. */
  412. if (pool->uninit_count) {
  413. /* Changed by Espressif. Use buf when buf->ref is 0 */
  414. for (i = pool->buf_count; i > 0; i--) {
  415. buf = pool_get_uninit(pool, i);
  416. if (!buf->ref) {
  417. bt_mesh_buf_unlock();
  418. goto success;
  419. }
  420. }
  421. }
  422. bt_mesh_buf_unlock();
  423. NET_BUF_ERR("Out of free buffer, pool %p", pool);
  424. return NULL;
  425. success:
  426. NET_BUF_DBG("allocated buf %p", buf);
  427. if (size) {
  428. buf->__buf = data_alloc(buf, &size, timeout);
  429. if (!buf->__buf) {
  430. NET_BUF_ERR("Out of data, buf %p", buf);
  431. return NULL;
  432. }
  433. } else {
  434. NET_BUF_WARN("Zero data size, buf %p", buf);
  435. buf->__buf = NULL;
  436. }
  437. buf->ref = 1;
  438. buf->flags = 0;
  439. buf->frags = NULL;
  440. buf->size = size;
  441. net_buf_reset(buf);
  442. pool->uninit_count--;
  443. #if CONFIG_BLE_MESH_NET_BUF_POOL_USAGE
  444. pool->avail_count--;
  445. NET_BUF_ASSERT(pool->avail_count >= 0);
  446. #endif
  447. return buf;
  448. }
  449. #if CONFIG_BLE_MESH_NET_BUF_LOG
  450. struct net_buf *net_buf_alloc_fixed_debug(struct net_buf_pool *pool,
  451. int32_t timeout, const char *func,
  452. int line)
  453. {
  454. const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
  455. return net_buf_alloc_len_debug(pool, fixed->data_size, timeout, func, line);
  456. }
  457. #else
  458. struct net_buf *net_buf_alloc_fixed(struct net_buf_pool *pool, int32_t timeout)
  459. {
  460. const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
  461. return net_buf_alloc_len(pool, fixed->data_size, timeout);
  462. }
  463. #endif
  464. struct net_buf *net_buf_frag_last(struct net_buf *buf)
  465. {
  466. NET_BUF_ASSERT(buf);
  467. while (buf->frags) {
  468. buf = buf->frags;
  469. }
  470. return buf;
  471. }
  472. void net_buf_frag_insert(struct net_buf *parent, struct net_buf *frag)
  473. {
  474. NET_BUF_ASSERT(parent);
  475. NET_BUF_ASSERT(frag);
  476. if (parent->frags) {
  477. net_buf_frag_last(frag)->frags = parent->frags;
  478. }
  479. /* Take ownership of the fragment reference */
  480. parent->frags = frag;
  481. }
  482. struct net_buf *net_buf_frag_add(struct net_buf *head, struct net_buf *frag)
  483. {
  484. NET_BUF_ASSERT(frag);
  485. if (!head) {
  486. return net_buf_ref(frag);
  487. }
  488. net_buf_frag_insert(net_buf_frag_last(head), frag);
  489. return head;
  490. }
  491. #if CONFIG_BLE_MESH_NET_BUF_LOG
  492. struct net_buf *net_buf_frag_del_debug(struct net_buf *parent,
  493. struct net_buf *frag,
  494. const char *func, int line)
  495. #else
  496. struct net_buf *net_buf_frag_del(struct net_buf *parent, struct net_buf *frag)
  497. #endif
  498. {
  499. struct net_buf *next_frag = NULL;
  500. NET_BUF_ASSERT(frag);
  501. if (parent) {
  502. NET_BUF_ASSERT(parent->frags);
  503. NET_BUF_ASSERT(parent->frags == frag);
  504. parent->frags = frag->frags;
  505. }
  506. next_frag = frag->frags;
  507. frag->frags = NULL;
  508. #if CONFIG_BLE_MESH_NET_BUF_LOG
  509. net_buf_unref_debug(frag, func, line);
  510. #else
  511. net_buf_unref(frag);
  512. #endif
  513. return next_frag;
  514. }
  515. size_t net_buf_linearize(void *dst, size_t dst_len, struct net_buf *src,
  516. size_t offset, size_t len)
  517. {
  518. struct net_buf *frag = NULL;
  519. size_t to_copy = 0U;
  520. size_t copied = 0U;
  521. len = MIN(len, dst_len);
  522. frag = src;
  523. /* find the right fragment to start copying from */
  524. while (frag && offset >= frag->len) {
  525. offset -= frag->len;
  526. frag = frag->frags;
  527. }
  528. /* traverse the fragment chain until len bytes are copied */
  529. copied = 0;
  530. while (frag && len > 0) {
  531. to_copy = MIN(len, frag->len - offset);
  532. memcpy((uint8_t *)dst + copied, frag->data + offset, to_copy);
  533. copied += to_copy;
  534. /* to_copy is always <= len */
  535. len -= to_copy;
  536. frag = frag->frags;
  537. /* after the first iteration, this value will be 0 */
  538. offset = 0;
  539. }
  540. return copied;
  541. }
  542. /* This helper routine will append multiple bytes, if there is no place for
  543. * the data in current fragment then create new fragment and add it to
  544. * the buffer. It assumes that the buffer has at least one fragment.
  545. */
  546. size_t net_buf_append_bytes(struct net_buf *buf, size_t len,
  547. const void *value, int32_t timeout,
  548. net_buf_allocator_cb allocate_cb, void *user_data)
  549. {
  550. struct net_buf *frag = net_buf_frag_last(buf);
  551. size_t added_len = 0U;
  552. const uint8_t *value8 = value;
  553. do {
  554. uint16_t count = MIN(len, net_buf_tailroom(frag));
  555. net_buf_add_mem(frag, value8, count);
  556. len -= count;
  557. added_len += count;
  558. value8 += count;
  559. if (len == 0) {
  560. return added_len;
  561. }
  562. frag = allocate_cb(timeout, user_data);
  563. if (!frag) {
  564. return added_len;
  565. }
  566. net_buf_frag_add(buf, frag);
  567. } while (1);
  568. /* Unreachable */
  569. return 0;
  570. }